vdirsyncer-0.16.2/0000755000175000017500000000000013147536465016044 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/CODE_OF_CONDUCT.rst0000644000175000017500000000006313013735125021034 0ustar untitakeruntitaker00000000000000See `the pimutils CoC `_. vdirsyncer-0.16.2/config.example0000644000175000017500000000420313121521602020640 0ustar untitakeruntitaker00000000000000# An example configuration for vdirsyncer. # # Move it to ~/.vdirsyncer/config or ~/.config/vdirsyncer/config and edit it. # Run `vdirsyncer --help` for CLI usage. # # Optional parameters are commented out. # This file doesn't document all available parameters, see # http://vdirsyncer.pimutils.org/ for the rest of them. [general] # A folder where vdirsyncer can store some metadata about each pair. status_path = "~/.vdirsyncer/status/" # CARDDAV [pair bob_contacts] # A `[pair ]` block defines two storages `a` and `b` that should be # synchronized. The definition of these storages follows in `[storage ]` # blocks. This is similar to accounts in OfflineIMAP. a = "bob_contacts_local" b = "bob_contacts_remote" # Synchronize all collections that can be found. # You need to run `vdirsyncer discover` if new calendars/addressbooks are added # on the server. collections = ["from a", "from b"] # Synchronize the "display name" property into a local file (~/.contacts/displayname). metadata = ["displayname"] # To resolve a conflict the following values are possible: # `null` - abort when collisions occur (default) # `"a wins"` - assume a's items to be more up-to-date # `"b wins"` - assume b's items to be more up-to-date #conflict_resolution = null [storage bob_contacts_local] # A storage references actual data on a remote server or on the local disk. # Similar to repositories in OfflineIMAP. type = "filesystem" path = "~/.contacts/" fileext = ".vcf" [storage bob_contacts_remote] type = "carddav" url = "https://owncloud.example.com/remote.php/carddav/" #username = # The password can also be fetched from the system password storage, netrc or a # custom command. See http://vdirsyncer.pimutils.org/en/stable/keyring.html #password = # CALDAV [pair bob_calendar] a = "bob_calendar_local" b = "bob_calendar_remote" collections = ["from a", "from b"] # Calendars also have a color property metadata = ["displayname", "color"] [storage bob_calendar_local] type = "filesystem" path = "~/.calendars/" fileext = ".ics" [storage bob_calendar_remote] type = "caldav" url = "https://owncloud.example.com/remote.php/caldav/" #username = #password = vdirsyncer-0.16.2/MANIFEST.in0000644000175000017500000000045513121521602017561 0ustar untitakeruntitaker00000000000000# setuptools-scm includes everything tracked by git prune contrib prune scripts prune tests/storage/servers prune tests/storage/etesync recursive-include tests/storage/servers/radicale * recursive-include tests/storage/servers/skip * prune docs/_build global-exclude *.py[cdo] __pycache__ *.so *.pyd vdirsyncer-0.16.2/setup.py0000644000175000017500000000566113147534155017560 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- ''' Vdirsyncer synchronizes calendars and contacts. Please refer to https://vdirsyncer.pimutils.org/en/stable/packaging.html for how to package vdirsyncer. ''' from setuptools import Command, find_packages, setup requirements = [ # https://github.com/mitsuhiko/click/issues/200 'click>=5.0', 'click-log>=0.2.0, <0.3.0', # https://github.com/pimutils/vdirsyncer/issues/478 'click-threading>=0.2', # !=2.9.0: https://github.com/kennethreitz/requests/issues/2930 # # >=2.4.1: https://github.com/shazow/urllib3/pull/444 # Without the above pull request, `verify=False` also disables fingerprint # validation. This is *not* what we want, and it's not possible to # replicate vdirsyncer's current behavior (verifying fingerprints without # verifying against CAs) with older versions of urllib3. 'requests >=2.4.1, !=2.9.0', # https://github.com/sigmavirus24/requests-toolbelt/pull/28 # And https://github.com/sigmavirus24/requests-toolbelt/issues/54 'requests_toolbelt >=0.4.0', # https://github.com/untitaker/python-atomicwrites/commit/4d12f23227b6a944ab1d99c507a69fdbc7c9ed6d # noqa 'atomicwrites>=0.1.7' ] class PrintRequirements(Command): description = 'Prints minimal requirements' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): for requirement in requirements: print(requirement.replace(">", "=").replace(" ", "")) with open('README.rst') as f: long_description = f.read() setup( # General metadata name='vdirsyncer', author='Markus Unterwaditzer', author_email='markus@unterwaditzer.net', url='https://github.com/pimutils/vdirsyncer', description='Synchronize calendars and contacts', license='BSD', long_description=long_description, # Runtime dependencies install_requires=requirements, # Optional dependencies extras_require={ 'google': ['requests-oauthlib'], 'etesync': ['etesync'] }, # Build dependencies setup_requires=['setuptools_scm != 1.12.0'], # Other packages=find_packages(exclude=['tests.*', 'tests']), include_package_data=True, cmdclass={ 'minimal_requirements': PrintRequirements }, use_scm_version={ 'write_to': 'vdirsyncer/version.py' }, entry_points={ 'console_scripts': ['vdirsyncer = vdirsyncer.cli:main'] }, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'License :: OSI Approved :: BSD License', 'Operating System :: POSIX', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Internet', 'Topic :: Utilities', ], ) vdirsyncer-0.16.2/setup.cfg0000644000175000017500000000052713147536465017671 0ustar untitakeruntitaker00000000000000[wheel] universal = 1 [tool:pytest] norecursedirs = tests/storage/servers/* addopts = --tb=short [flake8] ignore = E731 select = C,E,F,W,B,B9 exclude = tests/storage/servers/owncloud/, tests/storage/servers/nextcloud/, tests/storage/servers/baikal/, build/ application-package-names = tests,vdirsyncer [egg_info] tag_build = tag_date = 0 vdirsyncer-0.16.2/.gitignore0000644000175000017500000000021113134636312020012 0ustar untitakeruntitaker00000000000000*.pyc __pycache__ htmlcov .coverage build env *.egg-info .cache .eggs .egg .xprocess dist docs/_build/ vdirsyncer/version.py .hypothesis vdirsyncer-0.16.2/.travis.yml0000644000175000017500000001102313144561565020146 0ustar untitakeruntitaker00000000000000{ "branches": { "only": [ "auto", "master" ] }, "cache": "pip", "dist": "trusty", "git": { "submodules": false }, "install": [ "\n. scripts/travis-install.sh;\npip install -U pip;\npip install wheel;\nmake -e install-dev;\nmake -e install-$BUILD;\n" ], "language": "python", "matrix": { "include": [ { "env": "BUILD=style BUILD_PRS=true", "python": "3.6" }, { "env": "BUILD=test DAV_SERVER=radicale REQUIREMENTS=devel BUILD_PRS=true ", "python": "3.3" }, { "env": "BUILD=test DAV_SERVER=xandikos REQUIREMENTS=devel BUILD_PRS=true ", "python": "3.3" }, { "env": "BUILD=test DAV_SERVER=radicale REQUIREMENTS=release BUILD_PRS=true ", "python": "3.3" }, { "env": "BUILD=test DAV_SERVER=xandikos REQUIREMENTS=release BUILD_PRS=true ", "python": "3.3" }, { "env": "BUILD=test DAV_SERVER=radicale REQUIREMENTS=minimal BUILD_PRS=true ", "python": "3.3" }, { "env": "BUILD=test DAV_SERVER=xandikos REQUIREMENTS=minimal BUILD_PRS=true ", "python": "3.3" }, { "env": "BUILD=test DAV_SERVER=radicale REQUIREMENTS=devel BUILD_PRS=true ", "python": "3.4" }, { "env": "BUILD=test DAV_SERVER=xandikos REQUIREMENTS=devel BUILD_PRS=true ", "python": "3.4" }, { "env": "BUILD=test DAV_SERVER=radicale REQUIREMENTS=release BUILD_PRS=true ", "python": "3.4" }, { "env": "BUILD=test DAV_SERVER=xandikos REQUIREMENTS=release BUILD_PRS=true ", "python": "3.4" }, { "env": "BUILD=test DAV_SERVER=radicale REQUIREMENTS=minimal BUILD_PRS=true ", "python": "3.4" }, { "env": "BUILD=test DAV_SERVER=xandikos REQUIREMENTS=minimal BUILD_PRS=true ", "python": "3.4" }, { "env": "BUILD=test DAV_SERVER=radicale REQUIREMENTS=devel BUILD_PRS=true ", "python": "3.5" }, { "env": "BUILD=test DAV_SERVER=xandikos REQUIREMENTS=devel BUILD_PRS=true ", "python": "3.5" }, { "env": "BUILD=test DAV_SERVER=radicale REQUIREMENTS=release BUILD_PRS=true ", "python": "3.5" }, { "env": "BUILD=test DAV_SERVER=xandikos REQUIREMENTS=release BUILD_PRS=true ", "python": "3.5" }, { "env": "BUILD=test DAV_SERVER=radicale REQUIREMENTS=minimal BUILD_PRS=true ", "python": "3.5" }, { "env": "BUILD=test DAV_SERVER=xandikos REQUIREMENTS=minimal BUILD_PRS=true ", "python": "3.5" }, { "env": "BUILD=test DAV_SERVER=radicale REQUIREMENTS=devel BUILD_PRS=true ", "python": "3.6" }, { "env": "BUILD=test DAV_SERVER=xandikos REQUIREMENTS=devel BUILD_PRS=true ", "python": "3.6" }, { "env": "BUILD=test DAV_SERVER=radicale REQUIREMENTS=release BUILD_PRS=true ", "python": "3.6" }, { "env": "BUILD=test DAV_SERVER=xandikos REQUIREMENTS=release BUILD_PRS=true ", "python": "3.6" }, { "env": "BUILD=test DAV_SERVER=owncloud REQUIREMENTS=release BUILD_PRS=true ", "python": "3.6" }, { "env": "BUILD=test DAV_SERVER=nextcloud REQUIREMENTS=release BUILD_PRS=true ", "python": "3.6" }, { "env": "BUILD=test DAV_SERVER=baikal REQUIREMENTS=release BUILD_PRS=true ", "python": "3.6" }, { "env": "BUILD=test DAV_SERVER=davical REQUIREMENTS=release BUILD_PRS=false ", "python": "3.6" }, { "env": "BUILD=test DAV_SERVER=icloud REQUIREMENTS=release BUILD_PRS=false ", "python": "3.6" }, { "env": "BUILD=test DAV_SERVER=fastmail REQUIREMENTS=release BUILD_PRS=false ", "python": "3.6" }, { "env": "BUILD=test DAV_SERVER=radicale REQUIREMENTS=minimal BUILD_PRS=true ", "python": "3.6" }, { "env": "BUILD=test DAV_SERVER=xandikos REQUIREMENTS=minimal BUILD_PRS=true ", "python": "3.6" }, { "env": "BUILD=test ETESYNC_TESTS=true REQUIREMENTS=latest BUILD_PRS=true ", "python": "3.6" }, { "env": "BUILD=test BUILD_PRS=true", "language": "generic", "os": "osx" } ] }, "script": [ "\nif [ \"$TRAVIS_PULL_REQUEST\" = \"false\" ] || [ \"$BUILD_PRS\" != \"false\" ];\nthen make -e $BUILD;\nfi" ], "sudo": true }vdirsyncer-0.16.2/CONTRIBUTING.rst0000644000175000017500000000020513013735125020464 0ustar untitakeruntitaker00000000000000Please see `the documentation `_ for how to contribute to this project. vdirsyncer-0.16.2/PKG-INFO0000644000175000017500000000523013147536465017141 0ustar untitakeruntitaker00000000000000Metadata-Version: 1.1 Name: vdirsyncer Version: 0.16.2 Summary: Synchronize calendars and contacts Home-page: https://github.com/pimutils/vdirsyncer Author: Markus Unterwaditzer Author-email: markus@unterwaditzer.net License: BSD Description: ========== vdirsyncer ========== - `Documentation `_ - `Source code `_ Vdirsyncer synchronizes your calendars and addressbooks between two storages_. The most popular purpose is to synchronize a CalDAV/CardDAV server with a local folder or file. The local data can then be accessed via a variety of programs_, none of which have to know or worry about syncing to a server. .. _storages: https://vdirsyncer.pimutils.org/en/latest/config.html#storages .. _programs: https://vdirsyncer.pimutils.org/en/latest/tutorials/ It aims to be for CalDAV and CardDAV what `OfflineIMAP `_ is for IMAP. .. image:: https://travis-ci.org/pimutils/vdirsyncer.svg?branch=master :target: https://travis-ci.org/pimutils/vdirsyncer .. image:: https://codecov.io/github/pimutils/vdirsyncer/coverage.svg?branch=master :target: https://codecov.io/github/pimutils/vdirsyncer?branch=master .. image:: https://badge.waffle.io/pimutils/vdirsyncer.svg?label=ready&title=Ready :target: https://waffle.io/pimutils/vdirsyncer Links of interest ================= * Check out `the tutorial `_ for basic usage. * `Contact information `_ * `How to contribute to this project `_ * `Donations `_ License ======= Licensed under the 3-clause BSD license, see ``LICENSE``. Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Environment :: Console Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: POSIX Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Topic :: Internet Classifier: Topic :: Utilities vdirsyncer-0.16.2/.codecov.yml0000644000175000017500000000025313121521602020242 0ustar untitakeruntitaker00000000000000comment: false coverage: status: patch: false project: unit: flags: unit system: flags: system storage: flags: storage vdirsyncer-0.16.2/ISSUE_TEMPLATE.md0000644000175000017500000000101113051102032020507 0ustar untitakeruntitaker00000000000000Before you submit bug reports: https://vdirsyncer.pimutils.org/en/stable/contributing.html Things to include in your bugreport: * Your vdirsyncer version * If applicable, which server software (and which version) you're using * Your Python version * Your operating system * Your config file * Use `vdirsyncer -vdebug` for debug output. The output is sensitive, but please attach at least the last few lines before the error (if applicable), censored as necessary. This is almost always the most useful information. vdirsyncer-0.16.2/LICENSE0000644000175000017500000000307213121521602017026 0ustar untitakeruntitaker00000000000000Copyright (c) 2014-2016 by Markus Unterwaditzer & contributors. See AUTHORS.rst for more details. Some rights reserved. Redistribution and use in source and binary forms of the software as well as documentation, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vdirsyncer-0.16.2/.gitmodules0000644000175000017500000000064413121521602020200 0ustar untitakeruntitaker00000000000000[submodule "tests/storage/servers/baikal"] path = tests/storage/servers/baikal url = https://github.com/vdirsyncer/baikal-testserver [submodule "tests/storage/servers/owncloud"] path = tests/storage/servers/owncloud url = https://github.com/vdirsyncer/owncloud-testserver [submodule "tests/storage/servers/nextcloud"] path = tests/storage/servers/nextcloud url = https://github.com/vdirsyncer/nextcloud-testserver vdirsyncer-0.16.2/Makefile0000644000175000017500000000751613134641433017501 0ustar untitakeruntitaker00000000000000# See the documentation on how to run the tests: # https://vdirsyncer.pimutils.org/en/stable/contributing.html # Which DAV server to run the tests against (radicale, xandikos, skip, owncloud, nextcloud, ...) export DAV_SERVER := skip # release (install release versions of dependencies) # development (install development versions of some of vdirsyncer's dependencies) # or minimal (install oldest version of each dependency that is supported by vdirsyncer) export REQUIREMENTS := release # Set this to true if you run vdirsyncer's test as part of e.g. packaging. export DETERMINISTIC_TESTS := false # Run the etesync testsuite. export ETESYNC_TESTS := false # Assume to run in Travis. Don't use this outside of a virtual machine. It will # heavily "pollute" your system. export CI := false # Whether to generate coverage data while running tests. export COVERAGE := $(CI) # Additional arguments that should be passed to py.test. PYTEST_ARGS = # Variables below this line are not very interesting for getting started. TEST_EXTRA_PACKAGES = ifeq ($(COVERAGE), true) TEST_EXTRA_PACKAGES += pytest-cov PYTEST_ARGS += --cov-config .coveragerc --cov vdirsyncer endif ifeq ($(ETESYNC_TESTS), true) TEST_EXTRA_PACKAGES += git+https://github.com/etesync/journal-manager TEST_EXTRA_PACKAGES += django djangorestframework wsgi_intercept drf-nested-routers endif export TESTSERVER_BASE := ./tests/storage/servers/ CODECOV_PATH = /tmp/codecov.sh ifeq ($(CI), true) test: curl -s https://codecov.io/bash > $(CODECOV_PATH) py.test $(PYTEST_ARGS) tests/unit/ bash $(CODECOV_PATH) -c -F unit py.test $(PYTEST_ARGS) tests/system/ bash $(CODECOV_PATH) -c -F system py.test $(PYTEST_ARGS) tests/storage/ bash $(CODECOV_PATH) -c -F storage else test: py.test $(PYTEST_ARGS) tests/ endif all: $(error Take a look at https://vdirsyncer.pimutils.org/en/stable/tutorial.html#installation) install-servers: set -ex; \ for server in $(DAV_SERVER); do \ if [ ! "$$(ls $(TESTSERVER_BASE)$$server/)" ]; then \ git submodule update --init -- "$(TESTSERVER_BASE)$$server"; \ fi; \ (cd $(TESTSERVER_BASE)$$server && sh install.sh); \ done install-test: install-servers (python --version | grep -vq 'Python 3.3') || pip install enum34 pip install -r test-requirements.txt set -xe && if [ "$$REQUIREMENTS" = "devel" ]; then \ pip install -U --force-reinstall \ git+https://github.com/DRMacIver/hypothesis \ git+https://github.com/kennethreitz/requests \ git+https://github.com/pytest-dev/pytest; \ fi [ -z "$(TEST_EXTRA_PACKAGES)" ] || pip install $(TEST_EXTRA_PACKAGES) install-style: install-docs pip install flake8 flake8-import-order flake8-bugbear>=17.3.0 style: flake8 ! git grep -i syncroniz */* ! git grep -i 'text/icalendar' */* sphinx-build -W -b html ./docs/ ./docs/_build/html/ python3 scripts/make_travisconf.py | diff -b .travis.yml - travis-conf: python3 scripts/make_travisconf.py > .travis.yml install-docs: pip install -r docs-requirements.txt docs: cd docs && make html sh: # open subshell with default test config $$SHELL; linkcheck: sphinx-build -W -b linkcheck ./docs/ ./docs/_build/linkcheck/ release: python setup.py sdist bdist_wheel upload install-dev: pip install -e . [ "$(ETESYNC_TESTS)" = "false" ] || pip install -e .[etesync] set -xe && if [ "$(REQUIREMENTS)" = "devel" ]; then \ pip install -U --force-reinstall \ git+https://github.com/mitsuhiko/click \ git+https://github.com/kennethreitz/requests; \ elif [ "$(REQUIREMENTS)" = "minimal" ]; then \ pip install -U --force-reinstall $$(python setup.py --quiet minimal_requirements); \ fi ssh-submodule-urls: git submodule foreach "\ echo -n 'Old: '; \ git remote get-url origin; \ git remote set-url origin \$$(git remote get-url origin | sed -e 's/https:\/\/github\.com\//git@github.com:/g'); \ echo -n 'New URL: '; \ git remote get-url origin" .PHONY: docs vdirsyncer-0.16.2/CHANGELOG.rst0000644000175000017500000004507413147536432020071 0ustar untitakeruntitaker00000000000000========= Changelog ========= This changelog only contains information that might be useful to end users and package maintainers. For further info, see the git commit log. Package maintainers and users who have to manually update their installation may want to subscribe to `GitHub's tag feed `_. Version 0.16.2 ============== *released on 24 August 2017* - Fix crash when using daterange or item_type filters in :storage:`google_calendar`, see :gh:`657`. - **Packagers:** Fixes for new version ``0.2.0`` of ``click-log``. The version requirements for the dependency ``click-log`` changed. Version 0.16.1 ============== *released on 8 August 2017* - Removed remoteStorage support, see :gh:`647`. - Fixed test failures caused by latest requests version, see :gh:`660`. Version 0.16.0 ============== *released on 2 June 2017* - Strip ``METHOD:PUBLISH`` added by some calendar providers, see :gh:`502`. - Fix crash of Google storages when saving token file. - Make DAV discovery more RFC-conformant, see :ghpr:`585`. - Vdirsyncer is now tested against Xandikos, see :ghpr:`601`. - Subfolders with a leading dot are now ignored during discover for ``filesystem`` storage. This makes it easier to combine it with version control. - Statuses are now stored in a sqlite database. Old data is automatically migrated. Users with really large datasets should encounter performance improvements. This means that **sqlite3 is now a dependency of vdirsyncer**. - **Vdirsyncer is now licensed under the 3-clause BSD license**, see :gh:`610`. - Vdirsyncer now includes experimental support for `EteSync `_, see :ghpr:`614`. - Vdirsyncer now uses more filesystem metadata for determining whether an item changed. You will notice a **possibly heavy CPU/IO spike on the first sync after upgrading**. - **Packagers:** Reference ``systemd.service`` and ``systemd.timer`` unit files are provided. It is recommended to install these as documentation if your distribution is systemd-based. Version 0.15.0 ============== *released on 28 February 2017* - Deprecated syntax for configuration values is now completely rejected. All values now have to be valid JSON. - A few UX improvements for Google storages, see :gh:`549` and :gh:`552`. - Fix collection discovery for :storage:`google_contacts`, see :gh:`564`. - iCloud is now tested on Travis, see :gh:`567`. Version 0.14.1 ============== *released on 05 January 2017* - ``vdirsyncer repair`` no longer changes "unsafe" UIDs by default, an extra option has to be specified. See :gh:`527`. - A lot of important documentation updates. Version 0.14.0 ============== *released on 26 October 2016* - ``vdirsyncer sync`` now continues other uploads if one upload failed. The exit code in such situations is still non-zero. - Add ``partial_sync`` option to pair section. See :ref:`the config docs `. - Vdirsyner will now warn if there's a string without quotes in your config. Please file issues if you find documentation that uses unquoted strings. - Fix an issue that would break khal's config setup wizard. Version 0.13.1 ============== *released on 30 September 2016* - Fix a bug that would completely break collection discovery. Version 0.13.0 ============== *released on 29 September 2016* - Python 2 is no longer supported at all. See :gh:`219`. - Config sections are now checked for duplicate names. This also means that you cannot have a storage section ``[storage foo]`` and a pair ``[pair foo]`` in your config, they have to have different names. This is done such that console output is always unambiguous. See :gh:`459`. - Custom commands can now be used for conflict resolution during sync. See :gh:`127`. - :storage:`http` now completely ignores UIDs. This avoids a lot of unnecessary down- and uploads. Version 0.12.1 ============== *released on 20 August 2016* - Fix a crash for Google and DAV storages. See :ghpr:`492`. - Fix an URL-encoding problem with DavMail. See :gh:`491`. Version 0.12 ============ *released on 19 August 2016* - :storage:`singlefile` now supports collections. See :ghpr:`488`. Version 0.11.3 ============== *released on 29 July 2016* - Default value of ``auth`` parameter was changed from ``guess`` to ``basic`` to resolve issues with the Apple Calendar Server (:gh:`457`) and improve performance. See :gh:`461`. - **Packagers:** The ``click-threading`` requirement is now ``>=0.2``. It was incorrect before. See :gh:`478`. - Fix a bug in the DAV XML parsing code that would make vdirsyncer crash on certain input. See :gh:`480`. - Redirect chains should now be properly handled when resolving ``well-known`` URLs. See :ghpr:`481`. Version 0.11.2 ============== *released on 15 June 2016* - Fix typo that would break tests. Version 0.11.1 ============== *released on 15 June 2016* - Fix a bug in collection validation. - Fix a cosmetic bug in debug output. - Various documentation improvements. Version 0.11.0 ============== *released on 19 May 2016* - Discovery is no longer automatically done when running ``vdirsyncer sync``. ``vdirsyncer discover`` now has to be explicitly called. - Add a ``.plist`` example for Mac OS X. - Usage under Python 2 now requires a special config parameter to be set. - Various deprecated configuration parameters do no longer have specialized errormessages. The generic error message for unknown parameters is shown. - Vdirsyncer no longer warns that the ``passwordeval`` parameter has been renamed to ``password_command``. - The ``keyring`` fetching strategy has been dropped some versions ago, but the specialized error message has been dropped. - An old status format from version 0.4 is no longer supported. If you're experiencing problems, just delete your status folder. Version 0.10.0 ============== *released on 23 April 2016* - New storage types :storage:`google_calendar` and :storage:`google_contacts` have been added. - New global command line option `--config`, to specify an alternative config file. See :gh:`409`. - The ``collections`` parameter can now be used to synchronize differently-named collections with each other. - **Packagers:** The ``lxml`` dependency has been dropped. - XML parsing is now a lot stricter. Malfunctioning servers that used to work with vdirsyncer may stop working. Version 0.9.3 ============= *released on 22 March 2016* - :storage:`singlefile` and :storage:`http` now handle recurring events properly. - Fix a typo in the packaging guidelines. - Moved to ``pimutils`` organization on GitHub. Old links *should* redirect, but be aware of client software that doesn't properly handle redirects. Version 0.9.2 ============= *released on 13 March 2016* - Fixed testsuite for environments that don't have any web browser installed. See :ghpr:`384`. Version 0.9.1 ============= *released on 13 March 2016* - Removed leftover debug print statement in ``vdirsyncer discover``, see commit ``3d856749f37639821b148238ef35f1acba82db36``. - ``metasync`` will now strip whitespace from the start and the end of the values. See :gh:`358`. - New ``Packaging Guidelines`` have been added to the documentation. Version 0.9.0 ============= *released on 15 February 2016* - The ``collections`` parameter is now required in pair configurations. Vdirsyncer will tell you what to do in its error message. See :gh:`328`. Version 0.8.1 ============= *released on 30 January 2016* - Fix error messages when invalid parameter fetching strategy is used. This is important because users would receive awkward errors for using deprecated ``keyring`` fetching. Version 0.8.0 ============= *released on 27 January 2016* - Keyring support has been removed, which means that ``password.fetch = ["keyring", "example.com", "myuser"]`` doesn't work anymore. For existing setups: Use ``password.fetch = ["command", "keyring", "get", "example.com", "myuser"]`` instead, which is more generic. See the documentation for details. - Now emitting a warning when running under Python 2. See :gh:`219`. Version 0.7.5 ============= *released on 23 December 2015* - Fixed a bug in :storage:`remotestorage` that would try to open a CLI browser for OAuth. - Fix a packaging bug that would prevent vdirsyncer from working with newer lxml versions. Version 0.7.4 ============= *released on 22 December 2015* - Improved error messages instead of faulty server behavior, see :gh:`290` and :gh:`300`. - Safer shutdown of threadpool, avoid exceptions, see :gh:`291`. - Fix a sync bug for read-only storages see commit ``ed22764921b2e5bf6a934cf14aa9c5fede804d8e``. - Etag changes are no longer sufficient to trigger sync operations. An actual content change is also necessary. See :gh:`257`. - :storage:`remotestorage` now automatically opens authentication dialogs in your configured GUI browser. - **Packagers:** ``lxml>=3.1`` is now required (newer lower-bound version). Version 0.7.3 ============= *released on 05 November 2015* - Make remotestorage-dependencies actually optional. Version 0.7.2 ============= *released on 05 November 2015* - Un-break testsuite. Version 0.7.1 ============= *released on 05 November 2015* - **Packagers:** The setuptools extras ``keyring`` and ``remotestorage`` have been added. They're basically optional dependencies. See ``setup.py`` for more details. - Highly experimental remoteStorage support has been added. It may be completely overhauled or even removed in any version. - Removed mentions of old ``password_command`` in documentation. Version 0.7.0 ============= *released on 27 October 2015* - **Packagers:** New dependencies are ``click_threading``, ``click_log`` and ``click>=5.0``. - ``password_command`` is gone. Keyring support got completely overhauled. See :doc:`keyring`. Version 0.6.0 ============= *released on 06 August 2015* - ``password_command`` invocations with non-zero exit code are now fatal (and will abort synchronization) instead of just producing a warning. - Vdirsyncer is now able to synchronize metadata of collections. Set ``metadata = ["displayname"]`` and run ``vdirsyncer metasync``. - **Packagers:** Don't use the GitHub tarballs, but the PyPI ones. - **Packagers:** ``build.sh`` is gone, and ``Makefile`` is included in tarballs. See the content of ``Makefile`` on how to run tests post-packaging. - ``verify_fingerprint`` doesn't automatically disable ``verify`` anymore. Version 0.5.2 ============= *released on 15 June 2015* - Vdirsyncer now checks and corrects the permissions of status files. - Vdirsyncer is now more robust towards changing UIDs inside items. - Vdirsyncer is now handling unicode hrefs and UIDs correctly. Software that produces non-ASCII UIDs is broken, but apparently it exists. Version 0.5.1 ============= *released on 29 May 2015* - **N.b.: The PyPI upload of 0.5.0 is completely broken.** - Raise version of required requests-toolbelt to ``0.4.0``. - Command line should be a lot faster when no work is done, e.g. for help output. - Fix compatibility with iCloud again. - Use only one worker if debug mode is activated. - ``verify=false`` is now disallowed in vdirsyncer, please use ``verify_fingerprint`` instead. - Fixed a bug where vdirsyncer's DAV storage was not using the configured useragent for collection discovery. Version 0.4.4 ============= *released on 12 March 2015* - Support for client certificates via the new ``auth_cert`` parameter, see :gh:`182` and :ghpr:`183`. - The ``icalendar`` package is no longer required. - Several bugfixes related to collection creation. Version 0.4.3 ============= *released on 20 February 2015* - More performance improvements to ``singlefile``-storage. - Add ``post_hook`` param to ``filesystem``-storage. - Collection creation now also works with SabreDAV-based servers, such as Baikal or ownCloud. - Removed some workarounds for Radicale. Upgrading to the latest Radicale will fix the issues. - Fixed issues with iCloud discovery. - Vdirsyncer now includes a simple ``repair`` command that seeks to fix some broken items. Version 0.4.2 ============= *released on 30 January 2015* - Vdirsyncer now respects redirects when uploading and updating items. This might fix issues with Zimbra. - Relative ``status_path`` values are now interpreted as relative to the configuration file's directory. - Fixed compatibility with custom SabreDAV servers. See :gh:`166`. - Catch harmless threading exceptions that occur when shutting down vdirsyncer. See :gh:`167`. - Vdirsyncer now depends on ``atomicwrites``. - Massive performance improvements to ``singlefile``-storage. - Items with extremely long UIDs should now be saved properly in ``filesystem``-storage. See :gh:`173`. Version 0.4.1 ============= *released on 05 January 2015* - All ``create`` arguments from all storages are gone. Vdirsyncer now asks if it should try to create collections. - The old config values ``True``, ``False``, ``on``, ``off`` and ``None`` are now invalid. - UID conflicts are now properly handled instead of ignoring one item. Card- and CalDAV servers are already supposed to take care of those though. - Official Baikal support added. Version 0.4.0 ============= *released on 31 December 2014* - The ``passwordeval`` parameter has been renamed to ``password_command``. - The old way of writing certain config values such as lists is now gone. - Collection discovery has been rewritten. Old configuration files should be compatible with it, but vdirsyncer now caches the results of the collection discovery. You have to run ``vdirsyncer discover`` if collections were added or removed on one side. - Pair and storage names are now restricted to certain characters. Vdirsyncer will issue a clear error message if your configuration file is invalid in that regard. - Vdirsyncer now supports the XDG-Basedir specification. If the ``VDIRSYNCER_CONFIG`` environment variable isn't set and the ``~/.vdirsyncer/config`` file doesn't exist, it will look for the configuration file at ``$XDG_CONFIG_HOME/vdirsyncer/config``. - Some improvements to CardDAV and CalDAV discovery, based on problems found with FastMail. Support for ``.well-known``-URIs has been added. Version 0.3.4 ============= *released on 8 December 2014* - Some more bugfixes to config handling. Version 0.3.3 ============= *released on 8 December 2014* - Vdirsyncer now also works with iCloud. Particularly collection discovery and etag handling were fixed. - Vdirsyncer now encodes Cal- and CardDAV requests differently. This hasn't been well-tested with servers like Zimbra or SoGo, but isn't expected to cause any problems. - Vdirsyncer is now more robust regarding invalid responses from CalDAV servers. This should help with future compatibility with Davmail/Outlook. - Fix a bug when specifying ``item_types`` of :storage:`caldav` in the deprecated config format. - Fix a bug where vdirsyncer would ignore all but one character specified in ``unsafe_href_chars`` of :storage:`caldav` and :storage:`carddav`. Version 0.3.2 ============= *released on 3 December 2014* - The current config format has been deprecated, and support for it will be removed in version 0.4.0. Vdirsyncer warns about this now. Version 0.3.1 ============= *released on 24 November 2014* - Fixed a bug where vdirsyncer would delete items if they're deleted on side A but modified on side B. Instead vdirsyncer will now upload the new items to side A. See :gh:`128`. - Synchronization continues with the remaining pairs if one pair crashes, see :gh:`121`. - The ``processes`` config key is gone. There is now a ``--max-workers`` option on the CLI which has a similar purpose. See :ghpr:`126`. - The Read The Docs-theme is no longer required for building the docs. If it is not installed, the default theme will be used. See :gh:`134`. Version 0.3.0 ============= *released on 20 September 2014* - Add ``verify_fingerprint`` parameter to :storage:`http`, :storage:`caldav` and :storage:`carddav`, see :gh:`99` and :ghpr:`106`. - Add ``passwordeval`` parameter to :ref:`general_config`, see :gh:`108` and :ghpr:`117`. - Emit warnings (instead of exceptions) about certain invalid responses from the server, see :gh:`113`. This is apparently required for compatibility with Davmail. Version 0.2.5 ============= *released on 27 August 2014* - Don't ask for the password of one server more than once and fix multiple concurrency issues, see :gh:`101`. - Better validation of DAV endpoints. Version 0.2.4 ============= *released on 18 August 2014* - Include workaround for collection discovery with latest version of Radicale. - Include metadata files such as the changelog or license in source distribution, see :gh:`97` and :gh:`98`. Version 0.2.3 ============= *released on 11 August 2014* - Vdirsyncer now has a ``--version`` flag, see :gh:`92`. - Fix a lot of bugs related to special characters in URLs, see :gh:`49`. Version 0.2.2 ============= *released on 04 August 2014* - Remove a security check that caused problems with special characters in DAV URLs and certain servers. On top of that, the security check was nonsensical. See :gh:`87` and :gh:`91`. - Change some errors to warnings, see :gh:`88`. - Improve collection autodiscovery for servers without full support. Version 0.2.1 ============= *released on 05 July 2014* - Fix bug where vdirsyncer shows empty addressbooks when using CardDAV with Zimbra. - Fix infinite loop when password doesn't exist in system keyring. - Colorized errors, warnings and debug messages. - vdirsyncer now depends on the ``click`` package instead of argvard. Version 0.2.0 ============= *released on 12 June 2014* - vdirsyncer now depends on the ``icalendar`` package from PyPI, to get rid of its own broken parser. - vdirsyncer now also depends on ``requests_toolbelt``. This makes it possible to guess the authentication type instead of blankly assuming ``basic``. - Fix a semi-bug in caldav and carddav storages where a tuple (href, etag) instead of the proper etag would have been returned from the upload method. vdirsyncer might do unnecessary copying when upgrading to this version. - Add the storage :storage:`singlefile`. See :gh:`48`. - The ``collections`` parameter for pair sections now accepts the special values ``from a`` and ``from b`` for automatically discovering collections. See :ref:`pair_config`. - The ``read_only`` parameter was added to storage sections. See :ref:`storage_config`. Version 0.1.5 ============= *released on 14 May 2014* - Introduced changelogs - Many bugfixes - Many doc fixes - vdirsyncer now doesn't necessarily need UIDs anymore for synchronization. - vdirsyncer now aborts if one collection got completely emptied between synchronizations. See :gh:`42`. vdirsyncer-0.16.2/.coveragerc0000644000175000017500000000067713057746063020174 0ustar untitakeruntitaker00000000000000[run] branch = True [paths] source = vdirsyncer/ [report] exclude_lines = # Have to re-enable the standard pragma pragma: no cover # Don't complain about missing debug-only code: def __repr__ if self\.debug # Don't complain if tests don't hit defensive assertion code: raise AssertionError raise NotImplementedError # Don't complain if non-runnable code isn't run: if 0: if __name__ == .__main__.: vdirsyncer-0.16.2/AUTHORS.rst0000644000175000017500000000055713121521602017705 0ustar untitakeruntitaker00000000000000Contributors ============ In alphabetical order: - Ben Boeckel - Christian Geier - Clément Mondon - Hugo Osvaldo Barrera - Julian Mehne - Malte Kiefer - Marek Marczykowski-Górecki - Markus Unterwaditzer - Michael Adler - Thomas Weißschuh Additionally `FastMail sponsored a paid account for testing `_. Thanks! vdirsyncer-0.16.2/docs/0000755000175000017500000000000013147536465016774 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/docs/partial-sync.rst0000644000175000017500000000450613051102032022106 0ustar untitakeruntitaker00000000000000.. _partial_sync_tutorial: =============================== Syncing with read-only storages =============================== If you want to subscribe to a public, read-only `WebCAL `_-calendar but neither your server nor your calendar apps support that (or support it insufficiently), vdirsyncer can be used to synchronize such a public calendar ``A`` with a new calendar ``B`` of your own and keep ``B`` updated. Step 1: Create the target calendar ================================== First you need to create the calendar you want to sync the WebCAL-calendar with. Most servers offer a web interface for this. You then need to note the CalDAV URL of your calendar. Note that this URL should directly point to the calendar you just created, which means you would have one such URL for each calendar you have. Step 2: Creating the config =========================== Paste this into your vdirsyncer config:: [pair holidays] a = "holidays_public" b = "holidays_private" collections = null [storage holidays_public] type = "http" # The URL to your iCalendar file. url = ... [storage holidays_private] type = "caldav" # The direct URL to your calendar. url = ... # The credentials to your CalDAV server username = ... password = ... Then run ``vdirsyncer discover holidays`` and ``vdirsyncer sync holidays``, and your previously created calendar should be filled with events. Step 3: The partial_sync parameter ================================== .. versionadded:: 0.14 You may get into a situation where you want to hide or modify some events from your ``holidays`` calendar. If you try to do that at this point, you'll notice that vdirsyncer will revert any changes you've made after a few times of running ``sync``. This is because vdirsyncer wants to keep everything in sync, and it can't synchronize changes to the public holidays-calendar because it doesn't have the rights to do so. For such purposes you can set the ``partial_sync`` parameter to ``ignore``:: [pair holidays] a = "holidays_public" b = "holidays_private" collections = null partial_sync = ignore See :ref:`the config docs ` for more information. .. _nextCloud: https://nextcloud.com/ .. _Baikal: http://sabre.io/baikal/ .. _DAViCal: http://www.davical.org/ vdirsyncer-0.16.2/docs/keyring.rst0000644000175000017500000000341413121521602021153 0ustar untitakeruntitaker00000000000000================= Storing passwords ================= .. versionchanged:: 0.7.0 Password configuration got completely overhauled. Vdirsyncer can fetch passwords from several sources other than the config file. Command ======= Say you have the following configuration:: [storage foo] type = "caldav" url = ... username = "foo" password = "bar" But it bugs you that the password is stored in cleartext in the config file. You can do this:: [storage foo] type = "caldav" url = ... username = "foo" password.fetch = ["command", "~/get-password.sh", "more", "args"] You can fetch the username as well:: [storage foo] type = "caldav" url = ... username.fetch = ["command", "~/get-username.sh"] password.fetch = ["command", "~/get-password.sh"] Or really any kind of parameter in a storage section. With pass_ for example, you might find yourself writing something like this in your configuration file:: password.fetch = ["command", "pass", "caldav"] .. _pass: https://www.passwordstore.org/ Accessing the system keyring ---------------------------- As shown above, you can use the ``command`` strategy to fetch your credentials from arbitrary sources. A very common usecase is to fetch your password from the system keyring. The keyring_ Python package contains a command-line utility for fetching passwords from the OS's password store. Installation:: pip install keyring Basic usage:: password.fetch = ["command", "keyring", "get", "example.com", "foouser"] .. _keyring: https://github.com/jaraco/keyring/ Password Prompt =============== You can also simply prompt for the password:: [storage foo] type = "caldav" username = "myusername" password.fetch = ["prompt", "Password for CalDAV"] vdirsyncer-0.16.2/docs/conf.py0000644000175000017500000001111113121521602020241 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import datetime import json import os from sphinx.ext import autodoc import vdirsyncer extensions = ['sphinx.ext.autodoc'] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = u'vdirsyncer' copyright = (u'2014-{}, Markus Unterwaditzer & contributors' .format(datetime.date.today().strftime('%Y'))) release = vdirsyncer.__version__ version = '.'.join(release.split('.')[:2]) # The short X.Y version. rst_epilog = '.. |vdirsyncer_version| replace:: %s' % release exclude_patterns = ['_build'] pygments_style = 'sphinx' on_rtd = os.environ.get('READTHEDOCS', None) == 'True' try: import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] except ImportError: html_theme = 'default' if not on_rtd: print('-' * 74) print('Warning: sphinx-rtd-theme not installed, building with default ' 'theme.') print('-' * 74) html_static_path = ['_static'] htmlhelp_basename = 'vdirsyncerdoc' latex_elements = {} latex_documents = [ ('index', 'vdirsyncer.tex', u'vdirsyncer Documentation', u'Markus Unterwaditzer', 'manual'), ] man_pages = [ ('index', 'vdirsyncer', u'vdirsyncer Documentation', [u'Markus Unterwaditzer'], 1) ] texinfo_documents = [ ('index', 'vdirsyncer', u'vdirsyncer Documentation', u'Markus Unterwaditzer', 'vdirsyncer', 'Synchronize calendars and contacts.', 'Miscellaneous'), ] def github_issue_role(name, rawtext, text, lineno, inliner, options={}, content=()): # noqa: B006 try: issue_num = int(text) if issue_num <= 0: raise ValueError() except ValueError: msg = inliner.reporter.error('Invalid GitHub issue: {}'.format(text), line=lineno) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] import vdirsyncer from docutils import nodes link = '{}/{}/{}'.format(vdirsyncer.PROJECT_HOME, 'issues' if name == 'gh' else 'pull', issue_num) linktext = ('issue #{}' if name == 'gh' else 'pull request #{}').format(issue_num) node = nodes.reference(rawtext, linktext, refuri=link, **options) return [node], [] def format_storage_config(cls, header=True): if header is True: yield '[storage example_for_{}]'.format(cls.storage_name) yield 'type = "{}"'.format(cls.storage_name) from vdirsyncer.storage.base import Storage from vdirsyncer.utils import get_storage_init_specs handled = set() for spec in get_storage_init_specs(cls, stop_at=Storage): defaults = spec.defaults or () defaults = dict(zip(spec.args[-len(defaults):], defaults)) for key in spec.args[1:]: if key in handled: continue handled.add(key) comment = '' if key not in defaults else '#' value = defaults.get(key, '...') yield '{}{} = {}'.format(comment, key, json.dumps(value)) class StorageDocumenter(autodoc.ClassDocumenter): '''Custom formatter for auto-documenting storage classes. It assumes that the first line of the class' docstring is its own paragraph. After that first paragraph, an example configuration will be inserted and Sphinx' __init__ signature removed.''' objtype = 'storage' domain = None directivetype = 'storage' option_spec = {} @classmethod def can_document_member(cls, member, membername, isattr, parent): from vdirsyncer.storage.base import Storage return isinstance(member, Storage) def format_signature(self): return '' def add_directive_header(self, sig): directive = getattr(self, 'directivetype', self.objtype) name = self.object.storage_name self.add_line(u'.. %s:: %s%s' % (directive, name, sig), '') def get_doc(self, encoding=None, ignore=1): rv = autodoc.ClassDocumenter.get_doc(self, encoding, ignore) config = [u' ' + x for x in format_storage_config(self.object)] rv[0] = rv[0][:1] + [u'::', u''] + config + [u''] + rv[0][1:] return rv def setup(app): from sphinx.domains.python import PyObject app.add_object_type('storage', 'storage', 'pair: %s; storage', doc_field_types=PyObject.doc_field_types) app.add_role('gh', github_issue_role) app.add_role('ghpr', github_issue_role) app.add_autodocumenter(StorageDocumenter) vdirsyncer-0.16.2/docs/changelog.rst0000644000175000017500000000003713013735125021437 0ustar untitakeruntitaker00000000000000.. include:: ../CHANGELOG.rst vdirsyncer-0.16.2/docs/installation.rst0000644000175000017500000000765113134641433022224 0ustar untitakeruntitaker00000000000000.. _installation: ============ Installation ============ OS/distro packages ------------------ The following packages are user-contributed. They may or may not be up-to-date: - `ArchLinux (AUR) `_ - `Debian `_ - `GNU Guix `_ - `Ubuntu `_ - `OS X (homebrew) `_ - `BSD (pkgsrc) `_ - `OpenBSD `_ We only support the latest version of vdirsyncer, which is at the time of this writing |vdirsyncer_version|. Please **do not file bugs if you use an older version**. Some distributions have multiple release channels. Debian and Fedora for example have a "stable" release channel that ships an older version of vdirsyncer. Those versions aren't supported either. If there is no suitable package for your distribution, you'll need to :ref:`install vdirsyncer manually `. There is an easy command to copy-and-paste for this as well, but you should be aware of its consequences. .. _manual-installation: Manual installation ------------------- If your distribution doesn't provide a package for vdirsyncer, you still can use Python's package manager "pip". First, you'll have to check that the following things are installed: - Python 3.3+ and pip. - ``libxml`` and ``libxslt`` - ``zlib`` - Linux or OS X. **Windows is not supported, see :gh:`535`.** On Linux systems, using the distro's package manager is the best way to do this, for example, using Ubuntu:: sudo apt-get install libxml2 libxslt1.1 zlib1g python Then you have several options. The following text applies for most Python software by the way. The dirty, easy way ~~~~~~~~~~~~~~~~~~~ The easiest way to install vdirsyncer at this point would be to run:: pip install --user --ignore-installed vdirsyncer - ``--user`` is to install without root rights (into your home directory) - ``--ignore-installed`` is to work around Debian's potentially broken packages (see :ref:`debian-urllib3`). This method has a major flaw though: Pip doesn't keep track of the files it installs. Vdirsyncer's files would be located somewhere in ``~/.local/lib/python*``, but you can't possibly know which packages were installed as dependencies of vdirsyncer and which ones were not, should you decide to uninstall it. In other words, using pip that way would pollute your home directory. The clean, hard way ~~~~~~~~~~~~~~~~~~~ There is a way to install Python software without scattering stuff across your filesystem: virtualenv_. There are a lot of resources on how to use it, the simplest possible way would look something like:: virtualenv ~/vdirsyncer_env ~/vdirsyncer_env/bin/pip install vdirsyncer alias vdirsyncer="~/vdirsyncer_env/bin/vdirsyncer You'll have to put the last line into your ``.bashrc`` or ``.bash_profile``. This method has two advantages: - It separately installs all Python packages into ``~/vdirsyncer_env/``, without relying on the system packages. This works around OS- or distro-specific issues. - You can delete ``~/vdirsyncer_env/`` to uninstall vdirsyncer entirely. The clean, easy way ~~~~~~~~~~~~~~~~~~~ pipsi_ is a new package manager for Python-based software that automatically sets up a virtualenv for each program you install. Assuming you have it installed on your operating system, you can do:: pipsi install --python python3 vdirsyncer and ``.local/bin/vdirsyncer`` will be your new vdirsyncer installation. To update vdirsyncer to the latest version:: pipsi upgrade vdirsyncer If you're done with vdirsyncer, you can do:: pipsi uninstall vdirsyncer and vdirsyncer will be uninstalled, including its dependencies. .. _virtualenv: https://virtualenv.readthedocs.io/ .. _pipsi: https://github.com/mitsuhiko/pipsi vdirsyncer-0.16.2/docs/make.bat0000644000175000017500000001506513013735125020372 0ustar untitakeruntitaker00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\vdirsyncer.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\vdirsyncer.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %BUILDDIR%/.. echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %BUILDDIR%/.. echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end vdirsyncer-0.16.2/docs/tutorial.rst0000644000175000017500000002477013122440344021362 0ustar untitakeruntitaker00000000000000======== Tutorial ======== Before starting, :doc:`consider if you actually need vdirsyncer `. There are better alternatives available for particular usecases. Installation ============ See :ref:`installation`. Configuration ============= .. note:: - The `config.example from the repository `_ contains a very terse version of this. - In this example we set up contacts synchronization, but calendar sync works almost the same. Just swap ``type = "carddav"`` for ``type = "caldav"`` and ``fileext = ".vcf"`` for ``fileext = ".ics"``. - Take a look at the :doc:`problems` page if anything doesn't work like planned. By default, vdirsyncer looks for its configuration file in the following locations: - The file pointed to by the ``VDIRSYNCER_CONFIG`` environment variable. - ``~/.vdirsyncer/config``. - ``$XDG_CONFIG_HOME/vdirsyncer/config``, which is normally ``~/.config/vdirsyncer/config``. See the XDG-Basedir_ specification. .. _XDG-Basedir: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html#variables The config file should start with a :ref:`general section `, where the only required parameter is ``status_path``. The following is a minimal example:: [general] status_path = "~/.vdirsyncer/status/" After the general section, an arbitrary amount of *pair and storage sections* might come. In vdirsyncer, synchronization is always done between two storages. Such storages are defined in :ref:`storage sections `, and which pairs of storages should actually be synchronized is defined in :ref:`pair section `. This format is copied from OfflineIMAP, where storages are called repositories and pairs are called accounts. The following example synchronizes ownCloud's addressbooks to ``~/.contacts/``:: [pair my_contacts] a = "my_contacts_local" b = "my_contacts_remote" collections = ["from a", "from b"] [storage my_contacts_local] type = "filesystem" path = "~/.contacts/" fileext = ".vcf" [storage my_contacts_remote] type = "carddav" # We can simplify this URL here as well. In theory it shouldn't matter. url = "https://owncloud.example.com/remote.php/carddav/" username = "bob" password = "asdf" .. note:: Configuration for other servers can be found at :ref:`supported-servers`. After running ``vdirsyncer discover`` and ``vdirsyncer sync``, ``~/.contacts/`` will contain subfolders for each addressbook, which in turn will contain a bunch of ``.vcf`` files which all contain a contact in ``VCARD`` format each. You can modify their contents, add new ones and delete some [1]_, and your changes will be synchronized to the CalDAV server after you run ``vdirsyncer sync`` again. For further reference, it uses the storages :storage:`filesystem` and :storage:`carddav`. However, if new collections are created on the server, it will not automatically start synchronizing those [2]_. You need to run ``vdirsyncer discover`` again to re-fetch this list instead. .. [1] You'll want to :doc:`use a helper program for this `. .. [2] Because collections are added rarely, and checking for this case before every synchronization isn't worth the overhead. More Configuration ================== .. _conflict_resolution_tutorial: Conflict resolution ------------------- What if the same item is changed on both sides? What should vdirsyncer do? Three options are currently provided: 1. vdirsyncer displays an error message (the default); 2. vdirsyncer chooses one alternative version over the other; 3. vdirsyncer starts a command of your choice that is supposed to merge the two alternative versions. Options 2 and 3 require adding a ``"conflict_resolution"`` parameter to the pair section. Option 2 requires giving either ``"a wins"`` or ``"b wins"`` as value to the parameter:: [pair my_contacts] ... conflict_resolution = "b wins" Earlier we wrote that ``b = "my_contacts_remote"``, so when vdirsyncer encounters the situation where an item changed on both sides, it will simply overwrite the local item with the one from the server. Option 3 requires specifying as value of ``"conflict_resolution"`` an array starting with ``"command"`` and containing paths and arguments to a command. For example:: [pair my_contacts] ... conflict_resolution = ["command", "vimdiff"] In this example, ``vimdiff `` will be called with ```` and ```` being two temporary files containing the conflicting files. The files need to be exactly the same when the command returns. More arguments can be passed to the command by adding more elements to the array. See :ref:`pair_config` for the reference documentation. .. _metasync_tutorial: Metadata synchronization ------------------------ Besides items, vdirsyncer can also synchronize metadata like the addressbook's or calendar's "human-friendly" name (internally called "displayname") or the color associated with a calendar. For the purpose of explaining this feature, let's switch to a different base example. This time we'll synchronize calendars:: [pair my_calendars] a = "my_calendars_local" b = "my_calendars_remote" collections = ["from a", "from b"] metadata = ["color"] [storage my_calendars_local] type = "filesystem" path = "~/.calendars/" fileext = ".ics" [storage my_calendars_remote] type = "caldav" url = "https://owncloud.example.com/remote.php/caldav/" username = "bob" password = "asdf" Run ``vdirsyncer discover`` for discovery. Then you can use ``vdirsyncer metasync`` to synchronize the ``color`` property between your local calendars in ``~/.calendars/`` and your ownCloud. Locally the color is just represented as a file called ``color`` within the calendar folder. .. _collections_tutorial: More information about collections ---------------------------------- "Collection" is a collective term for addressbooks and calendars. Each collection from a storage has a "collection name", a unique identifier for each collection. In the case of :storage:`filesystem`-storage, this is the name of the directory that represents the collection, in the case of the DAV-storages this is the last segment of the URL. We use this identifier in the ``collections`` parameter in the ``pair``-section. This identifier doesn't change even if you rename your calendar in whatever UI you have, because that only changes the so-called "displayname" property [3]_. On some servers (iCloud, Google) this identifier is randomly generated and has no correlation with the displayname you chose. .. [3] Which you can also synchronize with ``metasync`` using ``metadata = ["displayname"]``. There are three collection names that have a special meaning: - ``"from a"``, ``"from b"``: A placeholder for all collections that can be found on side A/B when running ``vdirsyncer discover``. - ``null``: The parameters give to the storage are exact and require no discovery. The last one requires a bit more explanation. Assume this config which synchronizes two directories of addressbooks:: [pair foobar] a = "foo" b = "bar" collections = ["from a", "from b"] [storage foo] type = "filesystem" fileext = ".vcf" path = "./contacts_foo/" [storage bar] type = "filesystem" fileext = ".vcf" path = "./contacts_bar/" As we saw previously this will synchronize all collections in ``./contacts_foo/`` with each same-named collection in ``./contacts_bar/``. If there's a collection that exists on one side but not the other, vdirsyncer will ask whether to create that folder on the other side. If we set ``collections = null``, ``./contacts_foo/`` and ``./contacts_bar/`` are no longer treated as folders with collections, but as collections themselves. This means that ``./contacts_foo/`` and ``./contacts_bar/`` will contain ``.vcf``-files, not subfolders that contain ``.vcf``-files. This is useful in situations where listing all collections fails because your DAV-server doesn't support it, for example. In this case, you can set ``url`` of your :storage:`carddav`- or :storage:`caldav`-storage to a URL that points to your CalDAV/CardDAV collection directly. Note that not all storages support the ``null``-collection, for example :storage:`google_contacts` and :storage:`google_calendar` don't. Advanced collection configuration (server-to-server sync) --------------------------------------------------------- The examples above are good enough if you want to synchronize a remote server to a previously empty disk. However, even more trickery is required when you have two servers with *already existing* collections which you want to synchronize. The core problem in this situation is that vdirsyncer pairs collections by collection name by default (see definition in previous section, basically a foldername or a remote UUID). When you have two servers, those collection names may not line up as nicely. Suppose you created two calendars "Test", one on a NextCloud server and one on iCloud, using their respective web interfaces. The URLs look something like this:: NextCloud: https://example.com/remote.php/dav/calendars/user/test/ iCloud: https://p-XX.caldav.icloud.com/YYY/calendars/3b4c9995-5c67-4021-9fa0-be4633623e1c Those are two DAV calendar collections. Their collection names will be ``test`` and ``3b4c9995-5c67-4021-9fa0-be4633623e1c`` respectively, so you don't have a single name you can address them both with. You will need to manually "pair" (no pun intended) those collections up like this:: [pair doublecloud] a = "my_nextcloud" b = "my_icloud" collections = [["mytest", "test", "3b4c9995-5c67-4021-9fa0-be4633623e1c"]] ``mytest`` gives that combination of calendars a nice name you can use when talking about it, so you would use ``vdirsyncer sync doublecloud/mytest`` to say: "Only synchronize these two storages, nothing else that may be configured". .. note:: Why not use displaynames? You may wonder why vdirsyncer just couldn't figure this out by itself. After all, you did name both collections "Test" (which is called "the displayname"), so why not pair collections by that value? There are a few problems with this idea: - Two calendars may have the same exact displayname. - A calendar may not have a (non-empty) displayname. - The displayname might change. Either you rename the calendar, or the calendar renames itself because you change a language setting. In the end, that property was never designed to be parsed by machines. vdirsyncer-0.16.2/docs/when.rst0000644000175000017500000000462013051102032020436 0ustar untitakeruntitaker00000000000000========================== When do I need Vdirsyncer? ========================== Why not Dropbox + todo.txt? --------------------------- Projects like `todo.txt `_ criticize the complexity of modern productivity apps, and that rightfully. So they set out to create a new, super-simple, human-readable format, such that vim suffices for viewing the raw data. However, when they're faced with the question how to synchronize that data across multiple devices, they seemed to have reached the dead end with their novel idea: "Let's just use Dropbox". What does file sync software do if both files have changed since the last sync? The answer is to ignore the question, just sync as often as possible, and hope for the best. Because if it comes to a sync conflict, most sync services are not daring to merge files, and create two copies on each computer instead. Merging the two task lists is left to the user. A better idea would've been to use ``git`` to synchronize the ``todo.txt`` file, which is at least able to resolve some basic conflicts. Why not file sync (Dropbox, git, ...) + vdir? --------------------------------------------- Since :doc:`vdirs ` are just a bunch of files, it is obvious to try *file synchronization* for synchronizing your data between multiple computers, such as: * `Syncthing `_ * `Dropbox `_ or one of the gajillion services like it * `unison `_ * Just ``git`` with a ``sshd``. The disadvantages of those solutions largely depend on the exact file sync program chosen: * Like with ``todo.txt``, Dropbox and friends are obviously agnostic/unaware of the files' contents. If a file has changed on both sides, Dropbox just copies both versions to both sides. This is a good idea if the user is directly interfacing with the file system and is able to resolve conflicts themselves. Here it might lead to erroneous behavior with e.g. ``khal``, since there are now two events with the same UID. This point doesn't apply to git: It has very good merging capabilities, better than what vdirsyncer currently has. * Such a setup doesn't work at all with smartphones. Vdirsyncer, on the other hand, synchronizes with CardDAV/CalDAV servers, which can be accessed with e.g. DAVDroid_ or the apps by dmfs_. .. _DAVDroid: http://davdroid.bitfire.at/ .. _dmfs: https://dmfs.org/ vdirsyncer-0.16.2/docs/donations.rst0000644000175000017500000000136413126020502021501 0ustar untitakeruntitaker00000000000000========= Donations ========= If you found my work useful, please consider donating. Thank you! - Bitcoin: ``16sSHxZm263WHR9P9PJjCxp64jp9ooXKVt`` - `PayPal.me `_ - `Bountysource `_ is useful for funding work on a specific GitHub issue. - There's also `Bountysource Salt `_, for one-time and recurring donations. - Donations via Bountysource are publicly listed. Use PayPal if you dislike that. - `Flattr `_ or `Gratipay `_ can be used for recurring donations. vdirsyncer-0.16.2/docs/contributing.rst0000644000175000017500000001061413134641433022223 0ustar untitakeruntitaker00000000000000============================ Contributing to this project ============================ .. note:: - Please read :doc:`contact` for questions and support requests. - All participants must follow the `pimutils Code of Conduct `_. The issue tracker ================= We use `GitHub issues `_ for organizing bug reports and feature requests. The following `labels `_ are of interest: * "Planning" is for issues that are still undecided, but where at least some discussion exists. * "Blocked" is for issues that can't be worked on at the moment because some other unsolved problem exists. This problem may be a bug in some software dependency, for instance. * "Ready" contains issues that are ready to work on. If you just want to get started with contributing, the "ready" issues are an option. Issues that are still in "Planning" are also an option, but require more upfront thinking and may turn out to be impossible to solve, or at least harder than anticipated. On the flip side those tend to be the more interesting issues as well, depending on how one looks at it. All of those labels are also available as a kanban board on `waffle.io `_. It is really just an alternative overview over all issues, but might be easier to comprehend. Feel free to :doc:`contact ` me or comment on the relevant issues for further information. Reporting bugs -------------- * Make sure your problem isn't already listed in :doc:`problems`. * Make sure you have the absolutely latest version of vdirsyncer. For users of some Linux distributions such as Debian or Fedora this may not be the version that your distro offers. In those cases please file a bug against the distro package, not against upstream vdirsyncer. * Use ``--verbosity=DEBUG`` when including output from vdirsyncer. Suggesting features ------------------- If you're suggesting a feature, keep in mind that vdirsyncer tries not to be a full calendar or contacts client, but rather just the piece of software that synchronizes all the data. :doc:`Take a look at the documentation for software working with vdirsyncer `. Submitting patches, pull requests ================================= * **Discuss everything in the issue tracker first** (or contact me somehow else) before implementing it. * Make sure the tests pass. See below for running them. * But not because you wrote too few tests. * Add yourself to ``AUTHORS.rst``, and add a note to ``CHANGELOG.rst`` too. Running tests, how to set up your development environment --------------------------------------------------------- For many patches, it might suffice to just let Travis run the tests. However, Travis is slow, so you might want to run them locally too. For this, set up a virtualenv_ and run this inside of it:: # install vdirsyncer from the repo into the virtualenv. Prerequisite for # most other tasks. make install-dev make install-test # install test dependencies make install-style # install dependencies for stylechecking make install-docs # install dependencies for building documentation Then you can run:: make test # The normal testsuite make style # Stylechecker make docs # Build the HTML docs, output is at docs/_build/html/ The ``Makefile`` has a lot of options that allow you to control which tests are run, and which servers are tested. Take a look at its code where they are all initialized and documented. For example, to test xandikos, run:: make DAV_SERVER=xandikos install-test make DAV_SERVER=xandikos test If you have any questions, feel free to open issues about it. Structure of the testsuite -------------------------- Within ``tests/``, there are three main folders: - ``system`` contains system- and also integration tests. A rough rule is: If the test is using temporary files, put it here. - ``unit``, where each testcase tests a single class or function. - ``storage`` runs a generic storage testsuite against all storages. The reason for this separation is: We are planning to generate separate coverage reports for each of those testsuites. Ideally ``unit`` would generate palatable coverage of the entire codebase *on its own*, and the *combination* of ``system`` and ``storage`` as well. .. _virtualenv: http://virtualenv.readthedocs.io/ vdirsyncer-0.16.2/docs/config.rst0000644000175000017500000001716213126225101020755 0ustar untitakeruntitaker00000000000000========================= Full configuration manual ========================= Vdirsyncer uses an ini-like format for storing its configuration. All values are JSON, invalid JSON will get interpreted as string:: x = "foo" # String x = foo # Shorthand for same string x = 42 # Integer x = ["a", "b", "c"] # List of strings x = true # Boolean x = false x = null # Also known as None .. _general_config: General Section =============== :: [general] status_path = ... - ``status_path``: A directory where vdirsyncer will store some additional data for the next sync. The data is needed to determine whether a new item means it has been added on one side or deleted on the other. Relative paths will be interpreted as relative to the configuration file's directory. See `A simple synchronization algorithm `_ for what exactly is in there. .. _pair_config: Pair Section ============ :: [pair pair_name] a = ... b = ... #collections = null #conflict_resolution = null - Pair names can consist of any alphanumeric characters and the underscore. - ``a`` and ``b`` reference the storages to sync by their names. - ``collections``: A list of collections to synchronize when ``vdirsyncer sync`` is executed. See also :ref:`collections_tutorial`. The special values ``"from a"`` and ``"from b"``, tell vdirsyncer to try autodiscovery on a specific storage. If the collection you want to sync doesn't have the same name on each side, you may also use a value of the form ``["config_name", "name_a", "name_b"]``. This will synchronize the collection ``name_a`` on side A with the collection ``name_b`` on side B. The ``config_name`` will be used for representation in CLI arguments and logging. Examples: - ``collections = ["from b", "foo", "bar"]`` makes vdirsyncer synchronize the collections from side B, and also the collections named "foo" and "bar". - ``collections = ["from b", "from a"]`` makes vdirsyncer synchronize all existing collections on either side. - ``collections = [["bar", "bar_a", "bar_b"], "foo"]`` makes vdirsyncer synchronize ``bar_a`` from side A with ``bar_b`` from side B, and also synchronize ``foo`` on both sides with each other. - ``conflict_resolution``: Optional, define how conflicts should be handled. A conflict occurs when one item (event, task) changed on both sides since the last sync. See also :ref:`conflict_resolution_tutorial`. Valid values are: - ``null``, where an error is shown and no changes are done. - ``"a wins"`` and ``"b wins"``, where the whole item is taken from one side. - ``["command", "vimdiff"]``: ``vimdiff `` will be called where ```` and ```` are temporary files that contain the item of each side respectively. The files need to be exactly the same when the command returns. - ``vimdiff`` can be replaced with any other command. For example, in POSIX ``["command", "cp"]`` is equivalent to ``"a wins"``. - Additional list items will be forwarded as arguments. For example, ``["command", "vimdiff", "--noplugin"]`` runs ``vimdiff --noplugin``. Vdirsyncer never attempts to "automatically merge" the two items. .. _partial_sync_def: - ``partial_sync``: Assume A is read-only, B not. If you change items on B, vdirsyncer can't sync the changes to A. What should happen instead? - ``error``: An error is shown. - ``ignore``: The change is ignored. However: Events deleted in B still reappear if they're updated in A. - ``revert`` (default): The change is reverted on next sync. See also :ref:`partial_sync_tutorial`. - ``metadata``: Metadata keys that should be synchronized when ``vdirsyncer metasync`` is executed. Example:: metadata = ["color", "displayname"] This synchronizes the ``color`` and the ``displayname`` properties. The ``conflict_resolution`` parameter applies here as well. .. _storage_config: Storage Section =============== :: [storage storage_name] type = ... - Storage names can consist of any alphanumeric characters and the underscore. - ``type`` defines which kind of storage is defined. See :ref:`storages`. - ``read_only`` defines whether the storage should be regarded as a read-only storage. The value ``true`` means synchronization will discard any changes made to the other side. The value ``false`` implies normal 2-way synchronization. - Any further parameters are passed on to the storage class. .. _storages: Supported Storages ------------------ CalDAV and CardDAV ++++++++++++++++++ .. autostorage:: vdirsyncer.storage.dav.CalDAVStorage .. autostorage:: vdirsyncer.storage.dav.CardDAVStorage Google ++++++ Vdirsyncer supports synchronization with Google calendars with the restriction that ``VTODO`` files are rejected by the server. Synchronization with Google contacts is less reliable due to negligence of Google's CardDAV API. **Google's CardDAV implementation is allegedly a disaster in terms of data safety**. See `this blog post `_ for the details. Always back up your data. At first run you will be asked to authorize application for google account access. To use this storage type, you need to install some additional dependencies:: pip install vdirsyncer[google] Furthermore you need to register vdirsyncer as an application yourself to obtain ``client_id`` and ``client_secret``, as `it is against Google's Terms of Service to hardcode those into opensource software `_: 1. Go to the `Google API Manager `_ and create a new project under any name. 2. Within that project, enable the "CalDAV" and "CardDAV" APIs (**not** the Calendar and Contacts APIs, those are different and won't work). There should be a searchbox where you can just enter those terms. 3. In the sidebar, select "Credentials" and create a new "OAuth Client ID". The application type is "Other". You'll be prompted to create a OAuth consent screen first. Fill out that form however you like. 4. Finally you should have a Client ID and a Client secret. Provide these in your storage config. The ``token_file`` parameter should be a filepath where vdirsyncer can later store authentication-related data. You do not need to create the file itself or write anything to it. .. note:: You need to configure which calendars Google should offer vdirsyncer using a rather hidden `settings page `_. .. autostorage:: vdirsyncer.storage.google.GoogleCalendarStorage .. autostorage:: vdirsyncer.storage.google.GoogleContactsStorage EteSync +++++++ `EteSync `_ is a new cloud provider for end to end encrypted contacts and calendar storage. Vdirsyncer contains **experimental** support for it. To use it, you need to install some optional dependencies:: pip install vdirsyncer[etesync] On first usage you will be prompted for the service password and the encryption password. Neither are stored. .. autostorage:: vdirsyncer.storage.etesync.EtesyncContacts .. autostorage:: vdirsyncer.storage.etesync.EtesyncCalendars Local +++++ .. autostorage:: vdirsyncer.storage.filesystem.FilesystemStorage .. autostorage:: vdirsyncer.storage.singlefile.SingleFileStorage Read-only storages ++++++++++++++++++ These storages don't support writing of their items, consequently ``read_only`` is set to ``true`` by default. Changing ``read_only`` to ``false`` on them leads to an error. .. autostorage:: vdirsyncer.storage.http.HttpStorage vdirsyncer-0.16.2/docs/packaging.rst0000644000175000017500000000577613121521602021444 0ustar untitakeruntitaker00000000000000==================== Packaging guidelines ==================== Thank you very much for packaging vdirsyncer! The following guidelines should help you to avoid some common pitfalls. While they are called guidelines and therefore theoretically not mandatory, if you consider going a different direction, please first open an issue or contact me otherwise instead of just going ahead. These guidelines exist for my own convenience too. Obtaining the source code ========================= The main distribution channel is `PyPI `_, and source tarballs can be obtained there. Do not use the ones from GitHub: Their tarballs contain useless junk and are more of a distraction than anything else. I give each release a tag in the git repo. If you want to get notified of new releases, `GitHub's feed `_ is a good way. Dependency versions =================== As with most Python packages, ``setup.py`` denotes the dependencies of vdirsyncer. It also contains lower-bound versions of each dependency. Older versions will be rejected by the testsuite. Testing ======= Everything testing-related goes through the ``Makefile`` in the root of the repository or PyPI package. Trying to e.g. run ``py.test`` directly will require a lot of environment variables to be set (for configuration) and you probably don't want to deal with that. You can install the testing dependencies with:: make install-test You probably don't want this since it will use pip to download the dependencies. Alternatively you can find the testing dependencies in ``test-requirements.txt``, again with lower-bound version requirements. You also have to have vdirsyncer fully installed at this point. Merely ``cd``-ing into the tarball will not be sufficient. Running the tests happens with:: make test Hypothesis will randomly generate test input. If you care about deterministic tests, set the ``DETERMINISTIC_TESTS`` variable to ``"true"``:: make DETERMINISTIC_TESTS=true test There are a lot of additional variables that allow you to test vdirsyncer against a particular server. Those variables are not "stable" and may change drastically between minor versions. Just don't use them, you are unlikely to find bugs that vdirsyncer's CI hasn't found. Documentation ============= Using Sphinx_ you can generate the documentation you're reading right now in a variety of formats, such as HTML, PDF, or even as a manpage. That said, I only take care of the HTML docs' formatting. You can find a list of dependencies in ``docs-requirements.txt``. Again, you can install those using pip with:: make install-docs Then change into the ``docs/`` directory and build whatever format you want using the ``Makefile`` in there (run ``make`` for the formats you can build). .. _Sphinx: www.sphinx-doc.org/ Contrib files ============= Reference ``systemd.service`` and ``systemd.timer`` unit files are provided. It is recommended to install this if your distribution is systemd-based. vdirsyncer-0.16.2/docs/contact.rst0000644000175000017500000000113713074442004021143 0ustar untitakeruntitaker00000000000000=================== Support and Contact =================== * The ``#pimutils`` `IRC channel on Freenode `_ might be active, depending on your timezone. Use it for support and general (including off-topic) discussion. * Open `a GitHub issue `_ for concrete bug reports and feature requests. * Lastly, you can also `contact the author directly `_. Do this for security issues. If that doesn't work out (i.e. if I don't respond within one week), use ``contact@pimutils.org``. vdirsyncer-0.16.2/docs/license.rst0000644000175000017500000000020313013735125021125 0ustar untitakeruntitaker00000000000000=================== Credits and License =================== .. include:: ../AUTHORS.rst License ======= .. include:: ../LICENSE vdirsyncer-0.16.2/docs/ssl-tutorial.rst0000644000175000017500000000527513121521602022154 0ustar untitakeruntitaker00000000000000.. _ssl-tutorial: ============================== SSL and certificate validation ============================== All SSL configuration is done per-storage. Pinning by fingerprint ---------------------- To pin the certificate by fingerprint:: [storage foo] type = "caldav" ... verify_fingerprint = "94:FD:7A:CB:50:75:A4:69:82:0A:F8:23:DF:07:FC:69:3E:CD:90:CA" #verify = false # Optional: Disable CA validation, useful for self-signed certs SHA1-, SHA256- or MD5-Fingerprints can be used. They're detected by their length. You can use the following command for obtaining a SHA-1 fingerprint:: echo -n | openssl s_client -connect unterwaditzer.net:443 | openssl x509 -noout -fingerprint Note that ``verify_fingerprint`` doesn't suffice for vdirsyncer to work with self-signed certificates (or certificates that are not in your trust store). You most likely need to set ``verify = false`` as well. This disables verification of the SSL certificate's expiration time and the existence of it in your trust store, all that's verified now is the fingerprint. However, please consider using `Let's Encrypt `_ such that you can forget about all of that. It is easier to deploy a free certificate from them than configuring all of your clients to accept the self-signed certificate. .. _ssl-cas: Custom root CAs --------------- To point vdirsyncer to a custom set of root CAs:: [storage foo] type = "caldav" ... verify = "/path/to/cert.pem" Vdirsyncer uses the requests_ library, which, by default, `uses its own set of trusted CAs `_. However, the actual behavior depends on how you have installed it. Many Linux distributions patch their ``python-requests`` package to use the system certificate CAs. Normally these two stores are similar enough for you to not care. But there are cases where certificate validation fails even though you can access the server fine through e.g. your browser. This usually indicates that your installation of the ``requests`` library is somehow broken. In such cases, it makes sense to explicitly set ``verify`` or ``verify_fingerprint`` as shown above. .. _requests: http://www.python-requests.org/ .. _ssl-client-certs: Client Certificates ------------------- Client certificates may be specified with the ``auth_cert`` parameter. If the key and certificate are stored in the same file, it may be a string:: [storage foo] type = "caldav" ... auth_cert = "/path/to/certificate.pem" If the key and certificate are separate, a list may be used:: [storage foo] type = "caldav" ... auth_cert = ["/path/to/certificate.crt", "/path/to/key.key"] vdirsyncer-0.16.2/docs/vdir.rst0000644000175000017500000000776113013735125020467 0ustar untitakeruntitaker00000000000000======================= The Vdir Storage Format ======================= This document describes a standard for storing calendars and contacts on a filesystem, with the main goal of being easy to implement. Vdirsyncer synchronizes to vdirs via :storage:`filesystem`. Each vdir (basically just a directory with some files in it) represents a calendar or addressbook. Basic Structure =============== The main folder (root) contains an arbitrary number of subfolders (collections), which contain only files (items). Synonyms for "collection" may be "addressbook" or "calendar". An item is: - A vCard_ file, in which case the file extension *must* be `.vcf`, *or* - An iCalendar_ file, in which case the file extension *must* be `.ics`. An item *should* contain a ``UID`` property as described by the vCard and iCalendar standards. If it contains more than one ``UID`` property, the values of those *must* not differ. The file *must* contain exactly one event, task or contact. In most cases this also implies only one ``VEVENT``/``VTODO``/``VCARD`` component per file, but e.g. recurrence exceptions would require multiple ``VEVENT`` components per event. The filename *should* consist of the ``ident``, followed by the file extension. The ``ident`` is either the ``UID``, if the item has one, else a string with similar properties as the ``UID``. However, several restrictions of the underlying filesystem might make an implementation of this naming scheme for items' filenames impossible. The approach to deal with such cases is left to the client, which are free to choose a different scheme for filenames instead. .. _vCard: https://tools.ietf.org/html/rfc6350 .. _iCalendar: https://tools.ietf.org/html/rfc5545 .. _CardDAV: http://tools.ietf.org/html/rfc6352 .. _CalDAV: http://tools.ietf.org/search/rfc4791 Metadata ======== Any of the below metadata files may be absent. None of the files listed below have any file extensions. - A file called ``color`` inside the vdir indicates the vdir's color, a property that is only relevant in UI design. Its content is an ASCII-encoded hex-RGB value of the form ``#RRGGBB``. For example, a file content of ``#FF0000`` indicates that the vdir has a red (user-visible) color. No short forms or informal values such as ``red`` (as known from CSS, for example) are allowed. The prefixing ``#`` must be present. - A file called ``displayname`` contains a UTF-8 encoded label that may be used to represent the vdir in UIs. Writing to vdirs ================ Creating and modifying items or metadata files *should* happen atomically_. Writing to a temporary file on the same physical device, and then moving it to the appropriate location is usually a very effective solution. For this purpose, files with the extension ``.tmp`` may be created inside collections. When changing an item, the original filename *must* be used. .. _atomically: https://en.wikipedia.org/wiki/Atomicity_%28programming%29 Reading from vdirs ================== - Any file ending with the ``.tmp`` or no file extension *must not* be treated as an item. - The ``ident`` part of the filename *should not* be parsed to improve the speed of item lookup. Considerations ============== The primary reason this format was chosen is due to its compatibility with the CardDAV_ and CalDAV_ standards. Performance ----------- Currently, vdirs suffer from a rather major performance problem, one which current implementations try to mitigate by building up indices of the collections for faster search and lookup. The reason items' filenames don't contain any extra information is simple: The solutions presented induced duplication of data, where one duplicate might become out of date because of bad implementations. As it stands right now, a index format could be formalized separately though. vdirsyncer doesn't really have to bother about efficient item lookup, because its synchronization algorithm needs to fetch the whole list of items anyway. Detecting changes is easily implemented by checking the files' modification time. vdirsyncer-0.16.2/docs/Makefile0000644000175000017500000001517213013735125020424 0ustar untitakeruntitaker00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/vdirsyncer.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/vdirsyncer.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/vdirsyncer" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/vdirsyncer" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." vdirsyncer-0.16.2/docs/_static/0000755000175000017500000000000013147536465020422 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/docs/_static/.gitkeep0000644000175000017500000000000013013735125022023 0ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/docs/problems.rst0000644000175000017500000000124513051102032021320 0ustar untitakeruntitaker00000000000000============== Known Problems ============== For any unanswered questions or problems, see :doc:`contact`. .. _debian-urllib3: Requests-related ImportErrors ----------------------------- ImportError: No module named packages.urllib3.poolmanager ImportError: cannot import name iter_field_objects Debian and nowadays even other distros make modifications to the ``requests`` package that don't play well with packages assuming a normal ``requests``. This is due to stubbornness on both sides. See :gh:`82` and :gh:`140` for past discussions. You have one option to work around this, that is, to install vdirsyncer in a virtualenv, see :ref:`manual-installation`. vdirsyncer-0.16.2/docs/tutorials/0000755000175000017500000000000013147536465021022 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/docs/tutorials/icloud.rst0000644000175000017500000000163113144561565023030 0ustar untitakeruntitaker00000000000000.. _icloud_setup: ====== iCloud ====== Vdirsyncer is regularly tested against iCloud_. :: [storage cal] type = "caldav" url = "https://caldav.icloud.com/" username = ... password = ... [storage card] type = "carddav" url = "https://contacts.icloud.com/" username = ... password = ... Problems: - Vdirsyncer can't do two-factor auth with iCloud (there doesn't seem to be a way to do two-factor auth over the DAV APIs) You'll need to use `app-specific passwords `_ instead. - iCloud has a few special requirements when creating collections. In principle vdirsyncer can do it, but it is recommended to create them from an Apple client (or the iCloud web interface). - iCloud requires a minimum length of collection names. - Calendars created by vdirsyncer cannot be used as tasklists. .. _iCloud: https://www.icloud.com/ vdirsyncer-0.16.2/docs/tutorials/xandikos.rst0000644000175000017500000000113213121521602023344 0ustar untitakeruntitaker00000000000000======== Xandikos ======== Xandikos_ is a lightweight, yet complete CalDAV and CardDAV server, backed by git. Vdirsyncer is continuously tested against its latest version. After running ``./bin/xandikos --defaults -d $HOME/dav``, you should be able to point vdirsyncer against the root of Xandikos like this:: [storage cal] type = "caldav" url = "https://xandikos.example.com/" username = ... password = ... [storage card] type = "carddav" url = "https://xandikos.example.com/" username = ... password = ... .. _Xandikos: https://github.com/jelmer/xandikos vdirsyncer-0.16.2/docs/tutorials/systemd-timer.rst0000644000175000017500000000251513121521602024340 0ustar untitakeruntitaker00000000000000.. _systemd_timer-tutorial: Running as a systemd.timer ========================== vdirsyncer includes unit files to run at an interval (by default every 15±5 minutes). .. note:: These are not installed when installing via pip, only via distribution packages. If you installed via pip, or your distribution doesn't ship systemd unit files, you'll need to download vdirsyncer.service_ and vdirsyncer.timer_ into either ``/etc/systemd/user/`` or ``~/.local/share/systemd/user``. .. _vdirsyncer.service: https://raw.githubusercontent.com/pimutils/vdirsyncer/master/contrib/vdirsyncer.service .. _vdirsyncer.timer: https://raw.githubusercontent.com/pimutils/vdirsyncer/master/contrib/vdirsyncer.timer Activation ---------- To activate the timer, just run ``systemctl --user enable vdirsyncer.timer``. To see logs of previous runs, use ``journalctl --user -u vdirsyncer``. Configuration ------------- It's quite possible that the default "every fifteen minutes" interval isn't to your liking. No default will suit everybody, but this is configurable by simply running:: systemctl --user edit vdirsyncer This will open a blank editor, where you can override the timer by including:: OnBootSec=5m # This is how long after boot the first run takes place. OnUnitActiveSec=15m # This is how often subsequent runs take place. vdirsyncer-0.16.2/docs/tutorials/radicale.rst0000644000175000017500000000212413121521602023272 0ustar untitakeruntitaker00000000000000======== Radicale ======== Radicale_ is a very lightweight server, however, it intentionally doesn't implement the CalDAV and CardDAV standards completely, which might lead to issues even with very well-written clients. Apart from its non-conformity with standards, there are multiple other problems with its code quality and the way it is maintained. Consider using e.g. :doc:`xandikos` instead. That said, vdirsyncer is continuously tested against the git version and the latest PyPI release of Radicale. - Vdirsyncer can't create collections on Radicale. - Radicale doesn't `support time ranges in the calendar-query of CalDAV `_, so setting ``start_date`` and ``end_date`` for :storage:`caldav` will have no or unpredicted consequences. - `Versions of Radicale older than 0.9b1 choke on RFC-conform queries for all items of a collection `_. You have to set ``item_types = ["VTODO", "VEVENT"]`` in :storage:`caldav` for vdirsyncer to work with those versions. .. _Radicale: http://radicale.org/ vdirsyncer-0.16.2/docs/tutorials/baikal.rst0000644000175000017500000000043713121521602022756 0ustar untitakeruntitaker00000000000000====== Baikal ====== Vdirsyncer is continuously tested against the latest version of Baikal_. - Baikal up to ``0.2.7`` also uses an old version of SabreDAV, with the same issue as ownCloud, see :gh:`160`. This issue is fixed in later versions. .. _Baikal: http://baikal-server.com/ vdirsyncer-0.16.2/docs/tutorials/owncloud.rst0000644000175000017500000000126413121521602023364 0ustar untitakeruntitaker00000000000000.. _owncloud_setup: ======== ownCloud ======== Vdirsyncer is continuously tested against the latest version of ownCloud_:: [storage cal] type = "caldav" url = "https://example.com/remote.php/dav/" username = ... password = ... [storage card] type = "carddav" url = "https://example.com/remote.php/dav/" username = ... password = ... - *Versions older than 7.0.0:* ownCloud uses SabreDAV, which had problems detecting collisions and race-conditions. The problems were reported and are fixed in SabreDAV's repo, and the corresponding fix is also in ownCloud since 7.0.0. See :gh:`16` for more information. .. _ownCloud: https://owncloud.org/ vdirsyncer-0.16.2/docs/tutorials/todoman.rst0000644000175000017500000000075413121521602023176 0ustar untitakeruntitaker00000000000000======= Todoman ======= The iCalendar format also supports saving tasks in form of ``VTODO``-entries, with the same file extension as normal events: ``.ics``. Many CalDAV servers support synchronizing tasks, vdirsyncer does too. todoman_ is a CLI task manager supporting :doc:`vdir `. Its interface is similar to the ones of Taskwarrior or the todo.txt CLI app. You can use :storage:`filesystem` with it. .. _todoman: https://hugo.barrera.io/journal/2015/03/30/introducing-todoman/ vdirsyncer-0.16.2/docs/tutorials/google.rst0000644000175000017500000000035513121521602023006 0ustar untitakeruntitaker00000000000000====== Google ====== Using vdirsyncer with Google Calendar is possible as of 0.10, but it is not tested frequently. You can use :storage:`google_contacts` and :storage:`google_calendar`. For more information see :gh:`202` and :gh:`8`. vdirsyncer-0.16.2/docs/tutorials/fastmail.rst0000644000175000017500000000112513121521602023326 0ustar untitakeruntitaker00000000000000======== FastMail ======== Vdirsyncer is continuously tested against FastMail_, thanks to them for providing a free account for this purpose. There are no known issues with it. `FastMail's support pages `_ provide the settings to use:: [storage cal] type = "caldav" url = "https://caldav.messagingengine.com/" username = ... password = ... [storage card] type = "carddav" url = "https://carddav.messagingengine.com/" username = ... password = ... .. _FastMail: https://www.fastmail.com/ vdirsyncer-0.16.2/docs/tutorials/davmail.rst0000644000175000017500000000351513121521602023150 0ustar untitakeruntitaker00000000000000.. _davmail_setup: =========================== DavMail (Exchange, Outlook) =========================== DavMail_ is a proxy program that allows you to use Card- and CalDAV clients with Outlook. That allows you to use vdirsyncer with Outlook. In practice your success with DavMail may wildly vary. Depending on your Exchange server you might get confronted with weird errors of all sorts (including data-loss). **Make absolutely sure you use the latest DavMail**:: [storage outlook] type = "caldav" url = "http://localhost:1080/users/user@example.com/calendar/" username = "user@example.com" password = ... - Older versions of DavMail handle URLs case-insensitively. See :gh:`144`. - DavMail is handling malformed data on the Exchange server very poorly. In such cases the `Calendar Checking Tool for Outlook `_ might help. - In some cases, you may see errors about duplicate events. It may look something like this:: error: my_calendar/calendar: Storage "my_calendar_remote/calendar" contains multiple items with the same UID or even content. Vdirsyncer will now abort the synchronization of this collection, because the fix for this is not clear; It could be the result of a badly behaving server. You can try running: error: error: vdirsyncer repair my_calendar_remote/calendar error: error: But make sure to have a backup of your data in some form. The offending hrefs are: [...] In order to fix this, you can try the Remove-DuplicateAppointments.ps1_ PowerShell script that Microsoft has come up with in order to remove duplicates. .. _DavMail: http://davmail.sourceforge.net/ .. _Remove-DuplicateAppointments.ps1: https://blogs.msdn.microsoft.com/emeamsgdev/2015/02/12/powershell-remove-duplicate-calendar-appointments/ vdirsyncer-0.16.2/docs/tutorials/claws-mail.rst0000644000175000017500000000477213121521602023572 0ustar untitakeruntitaker00000000000000.. _claws-mail-tutorial: Vdirsyncer with Claws Mail ========================== First of all, Claws-Mail only supports **read-only** functions for vCards. It can only read contacts, but there's no editor. Preparation ----------- We need to install vdirsyncer, for that look :doc:`here `. Then we need to create some folders:: mkdir ~/.vdirsyncer mkdir ~/.contacts Configuration ------------- Now we create the configuration for vdirsyncer. Open ``~/.vdirsyncer/config`` with a text editor. The config should look like this: .. code:: ini [general] status_path = "~/.vdirsyncer/status/" [storage local] type = "singlefile" path = "~/.contacts/%s.vcf" [storage online] type = "carddav" url = "CARDDAV_LINK" username = "USERNAME" password = "PASSWORD" read_only = true [pair contacts] a = "local" b = "online" collections = ["from a", "from b"] conflict_resolution = "b wins" - In the general section, we define the status folder path, for discovered collections and generally stuff that needs to persist between syncs. - In the local section we define that all contacts should be sync in a single file and the path for the contacts. - In the online section you must change the url, username and password to your setup. We also set the storage to read-only such that no changes get synchronized back. Claws-Mail should not be able to do any changes anyway, but this is one extra safety step in case files get corrupted or vdirsyncer behaves eratically. You can leave that part out if you want to be able to edit those files locally. - In the last section we configure that online contacts win in a conflict situation. Configure this part however you like. A correct value depends on which side is most likely to be up-to-date. Sync ---- Now we discover and sync our contacts:: vdirsyncer discover contacts vdirsyncer sync contacts Claws Mail ---------- Open Claws-Mail. Got to **Tools** => **Addressbook**. Click on **Addressbook** => **New vCard**. Choose a name for the book. Then search for the for the vCard in the folder **~/.contacts/**. Click ok, and you we will see your contacts. .. note:: Claws-Mail shows only contacts that have a mail address. Crontab ------- On the end we create a crontab, so that vdirsyncer syncs automatically every 30 minutes our contacts:: contab -e On the end of that file enter this line:: */30 * * * * /usr/local/bin/vdirsyncer sync > /dev/null And you're done! vdirsyncer-0.16.2/docs/tutorials/nextcloud.rst0000644000175000017500000000075213121521602023540 0ustar untitakeruntitaker00000000000000========= nextCloud ========= Vdirsyncer is continuously tested against the latest version of nextCloud_:: [storage cal] type = "caldav" url = "https://nextcloud.example.com/" username = ... password = ... [storage card] type = "carddav" url = "https://nextcloud.example.com/" - WebCAL-subscriptions can't be discovered by vdirsyncer. See `this relevant issue `_. .. _nextCloud: https://nextcloud.com/ vdirsyncer-0.16.2/docs/tutorials/index.rst0000644000175000017500000000347013147533547022665 0ustar untitakeruntitaker00000000000000=============== Other tutorials =============== The following section contains tutorials not explicitly about any particular core function of vdirsyncer. They usually show how to integrate vdirsyncer with third-party software. Because of that, it may be that the information regarding that other software only applies to specific versions of them. .. note:: Please :doc:`contribute ` your own tutorials too! Pages are often only stubs and are lacking full examples. Client applications =================== .. toctree:: :maxdepth: 1 claws-mail systemd-timer todoman Further applications, with missing pages: - khal_, a CLI calendar application supporting :doc:`vdir `. You can use :storage:`filesystem` with it. - Many graphical calendar apps such as dayplanner_, Orage_ or rainlendar_ save a calendar in a single ``.ics`` file. You can use :storage:`singlefile` with those. - khard_, a commandline addressbook supporting :doc:`vdir `. You can use :storage:`filesystem` with it. - contactquery.c_, a small program explicitly written for querying vdirs from mutt. - mates_, a commandline addressbook supporting :doc:`vdir `. - vdirel_, access :doc:`vdir ` contacts from Emacs. .. _khal: http://lostpackets.de/khal/ .. _dayplanner: http://www.day-planner.org/ .. _Orage: http://www.kolumbus.fi/~w408237/orage/ .. _rainlendar: http://www.rainlendar.net/ .. _khard: https://github.com/scheibler/khard/ .. _contactquery.c: https://github.com/t-8ch/snippets/blob/master/contactquery.c .. _mates: https://github.com/pimutils/mates.rs .. _vdirel: https://github.com/DamienCassou/vdirel .. _supported-servers: Servers ======= .. toctree:: :maxdepth: 1 baikal davmail fastmail google icloud nextcloud owncloud radicale xandikos vdirsyncer-0.16.2/docs/index.rst0000644000175000017500000000167713126020502020621 0ustar untitakeruntitaker00000000000000========== vdirsyncer ========== - `Documentation `_ - `Source code `_ Vdirsyncer synchronizes your calendars and addressbooks between two :ref:`storages `. The most popular purpose is to synchronize a CalDAV/CardDAV server with a local folder or file. The local data can then be accessed via a variety of :doc:`programs `, none of which have to know or worry about syncing to a server. It aims to be for CalDAV and CardDAV what `OfflineIMAP `_ is for IMAP. .. toctree:: :caption: Users :maxdepth: 1 when installation tutorial ssl-tutorial keyring partial-sync config tutorials/index problems .. toctree:: :caption: Developers :maxdepth: 1 contributing vdir .. toctree:: :caption: General :maxdepth: 1 packaging contact changelog license donations vdirsyncer-0.16.2/docs-requirements.txt0000644000175000017500000000004113051102032022216 0ustar untitakeruntitaker00000000000000sphinx != 1.4.7 sphinx_rtd_theme vdirsyncer-0.16.2/tests/0000755000175000017500000000000013147536465017206 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/tests/conftest.py0000644000175000017500000000161713134641433021376 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- ''' General-purpose fixtures for vdirsyncer's testsuite. ''' import logging import os import click_log from hypothesis import HealthCheck, Verbosity, settings import pytest @pytest.fixture(autouse=True) def setup_logging(): click_log.basic_config('vdirsyncer').setLevel(logging.DEBUG) try: import pytest_benchmark except ImportError: @pytest.fixture def benchmark(): return lambda x: x() else: del pytest_benchmark settings.suppress_health_check = [HealthCheck.too_slow] settings.register_profile("ci", settings( max_examples=1000, verbosity=Verbosity.verbose, )) settings.register_profile("deterministic", settings( derandomize=True, )) if os.environ.get('DETERMINISTIC_TESTS', 'false').lower() == 'true': settings.load_profile("deterministic") elif os.environ.get('CI', 'false').lower() == 'true': settings.load_profile("ci") vdirsyncer-0.16.2/tests/storage/0000755000175000017500000000000013147536465020652 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/tests/storage/conftest.py0000644000175000017500000000145413121521602023030 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import pytest import uuid @pytest.fixture def slow_create_collection(request): # We need to properly clean up because otherwise we might run into # storage limits. to_delete = [] def delete_collections(): for s in to_delete: s.session.request('DELETE', '') request.addfinalizer(delete_collections) def inner(cls, args, collection): assert collection.startswith('test') collection += '-vdirsyncer-ci-' + str(uuid.uuid4()) args = cls.create_collection(collection, **args) s = cls(**args) _clear_collection(s) assert not list(s.list()) to_delete.append(s) return args return inner def _clear_collection(s): for href, etag in s.list(): s.delete(href, etag) vdirsyncer-0.16.2/tests/storage/test_memory.py0000644000175000017500000000047113013735125023557 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import pytest from vdirsyncer.storage.memory import MemoryStorage from . import StorageTests class TestMemoryStorage(StorageTests): storage_class = MemoryStorage supports_collections = False @pytest.fixture def get_storage_args(self): return lambda **kw: kw vdirsyncer-0.16.2/tests/storage/dav/0000755000175000017500000000000013147536465021424 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/tests/storage/dav/test_main.py0000644000175000017500000000173113013735125023745 0ustar untitakeruntitaker00000000000000from vdirsyncer.storage.dav import _merge_xml, _parse_xml def test_xml_utilities(): x = _parse_xml(''' HTTP/1.1 404 Not Found ''') response = x.find('{DAV:}response') props = _merge_xml(response.findall('{DAV:}propstat/{DAV:}prop')) assert props.find('{DAV:}resourcetype/{DAV:}collection') is not None assert props.find('{DAV:}getcontenttype') is not None vdirsyncer-0.16.2/tests/storage/dav/__init__.py0000644000175000017500000000316613121521602023516 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import uuid import os import pytest import requests import requests.exceptions from tests import assert_item_equals from vdirsyncer import exceptions from vdirsyncer.vobject import Item from .. import StorageTests, get_server_mixin dav_server = os.environ['DAV_SERVER'] ServerMixin = get_server_mixin(dav_server) class DAVStorageTests(ServerMixin, StorageTests): dav_server = dav_server @pytest.mark.skipif(dav_server == 'radicale', reason='Radicale is very tolerant.') def test_dav_broken_item(self, s): item = Item(u'HAHA:YES') with pytest.raises((exceptions.Error, requests.exceptions.HTTPError)): s.upload(item) assert not list(s.list()) def test_dav_empty_get_multi_performance(self, s, monkeypatch): def breakdown(*a, **kw): raise AssertionError('Expected not to be called.') monkeypatch.setattr('requests.sessions.Session.request', breakdown) try: assert list(s.get_multi([])) == [] finally: # Make sure monkeypatch doesn't interfere with DAV server teardown monkeypatch.undo() def test_dav_unicode_href(self, s, get_item, monkeypatch): if self.dav_server == 'radicale': pytest.skip('Radicale is unable to deal with unicode hrefs') monkeypatch.setattr(s, '_get_href', lambda item: item.ident + s.fileext) item = get_item(uid=u'град сатану' + str(uuid.uuid4())) href, etag = s.upload(item) item2, etag2 = s.get(href) assert_item_equals(item, item2) vdirsyncer-0.16.2/tests/storage/dav/test_caldav.py0000644000175000017500000001124713134636312024260 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import datetime from textwrap import dedent import pytest import requests import requests.exceptions from tests import EVENT_TEMPLATE, TASK_TEMPLATE, VCARD_TEMPLATE from vdirsyncer import exceptions from vdirsyncer.storage.dav import CalDAVStorage from . import DAVStorageTests, dav_server from .. import format_item class TestCalDAVStorage(DAVStorageTests): storage_class = CalDAVStorage @pytest.fixture(params=['VTODO', 'VEVENT']) def item_type(self, request): return request.param def test_doesnt_accept_vcard(self, item_type, get_storage_args): s = self.storage_class(item_types=(item_type,), **get_storage_args()) try: s.upload(format_item(VCARD_TEMPLATE)) except (exceptions.Error, requests.exceptions.HTTPError): pass assert not list(s.list()) # The `arg` param is not named `item_types` because that would hit # https://bitbucket.org/pytest-dev/pytest/issue/745/ @pytest.mark.parametrize('arg,calls_num', [ (('VTODO',), 1), (('VEVENT',), 1), (('VTODO', 'VEVENT'), 2), (('VTODO', 'VEVENT', 'VJOURNAL'), 3), ((), 1) ]) def test_item_types_performance(self, get_storage_args, arg, calls_num, monkeypatch): s = self.storage_class(item_types=arg, **get_storage_args()) old_parse = s._parse_prop_responses calls = [] def new_parse(*a, **kw): calls.append(None) return old_parse(*a, **kw) monkeypatch.setattr(s, '_parse_prop_responses', new_parse) list(s.list()) assert len(calls) == calls_num @pytest.mark.xfail(dav_server == 'radicale', reason='Radicale doesn\'t support timeranges.') def test_timerange_correctness(self, get_storage_args): start_date = datetime.datetime(2013, 9, 10) end_date = datetime.datetime(2013, 9, 13) s = self.storage_class(start_date=start_date, end_date=end_date, **get_storage_args()) too_old_item = format_item(dedent(u''' BEGIN:VCALENDAR VERSION:2.0 PRODID:-//hacksw/handcal//NONSGML v1.0//EN BEGIN:VEVENT DTSTART:19970714T170000Z DTEND:19970715T035959Z SUMMARY:Bastille Day Party X-SOMETHING:{r} UID:{r} END:VEVENT END:VCALENDAR ''').strip()) too_new_item = format_item(dedent(u''' BEGIN:VCALENDAR VERSION:2.0 PRODID:-//hacksw/handcal//NONSGML v1.0//EN BEGIN:VEVENT DTSTART:20150714T170000Z DTEND:20150715T035959Z SUMMARY:Another Bastille Day Party X-SOMETHING:{r} UID:{r} END:VEVENT END:VCALENDAR ''').strip()) good_item = format_item(dedent(u''' BEGIN:VCALENDAR VERSION:2.0 PRODID:-//hacksw/handcal//NONSGML v1.0//EN BEGIN:VEVENT DTSTART:20130911T170000Z DTEND:20130912T035959Z SUMMARY:What's with all these Bastille Day Partys X-SOMETHING:{r} UID:{r} END:VEVENT END:VCALENDAR ''').strip()) s.upload(too_old_item) s.upload(too_new_item) expected_href, _ = s.upload(good_item) (actual_href, _), = s.list() assert actual_href == expected_href def test_invalid_resource(self, monkeypatch, get_storage_args): calls = [] args = get_storage_args(collection=None) def request(session, method, url, **kwargs): assert url == args['url'] calls.append(None) r = requests.Response() r.status_code = 200 r._content = 'Hello World.' return r monkeypatch.setattr('requests.sessions.Session.request', request) with pytest.raises(ValueError): s = self.storage_class(**args) list(s.list()) assert len(calls) == 1 @pytest.mark.skipif(dav_server == 'icloud', reason='iCloud only accepts VEVENT') def test_item_types_general(self, s): event = s.upload(format_item(EVENT_TEMPLATE))[0] task = s.upload(format_item(TASK_TEMPLATE))[0] s.item_types = ('VTODO', 'VEVENT') def l(): return set(href for href, etag in s.list()) assert l() == {event, task} s.item_types = ('VTODO',) assert l() == {task} s.item_types = ('VEVENT',) assert l() == {event} s.item_types = () assert l() == {event, task} vdirsyncer-0.16.2/tests/storage/dav/test_carddav.py0000644000175000017500000000045713051102032024414 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import pytest from vdirsyncer.storage.dav import CardDAVStorage from . import DAVStorageTests class TestCardDAVStorage(DAVStorageTests): storage_class = CardDAVStorage @pytest.fixture(params=['VCARD']) def item_type(self, request): return request.param vdirsyncer-0.16.2/tests/storage/test_filesystem.py0000644000175000017500000000507313134636312024440 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import subprocess import pytest from vdirsyncer.storage.filesystem import FilesystemStorage from vdirsyncer.vobject import Item from . import StorageTests class TestFilesystemStorage(StorageTests): storage_class = FilesystemStorage @pytest.fixture def get_storage_args(self, tmpdir): def inner(collection='test'): rv = {'path': str(tmpdir), 'fileext': '.txt', 'collection': collection} if collection is not None: rv = self.storage_class.create_collection(**rv) return rv return inner def test_is_not_directory(self, tmpdir): with pytest.raises(IOError): f = tmpdir.join('hue') f.write('stub') self.storage_class(str(tmpdir) + '/hue', '.txt') def test_broken_data(self, tmpdir): s = self.storage_class(str(tmpdir), '.txt') class BrokenItem(object): raw = u'Ц, Ш, Л, ж, Д, З, Ю'.encode('utf-8') uid = 'jeezus' ident = uid with pytest.raises(TypeError): s.upload(BrokenItem) assert not tmpdir.listdir() def test_ident_with_slash(self, tmpdir): s = self.storage_class(str(tmpdir), '.txt') s.upload(Item(u'UID:a/b/c')) item_file, = tmpdir.listdir() assert '/' not in item_file.basename and item_file.isfile() def test_too_long_uid(self, tmpdir): s = self.storage_class(str(tmpdir), '.txt') item = Item(u'UID:' + u'hue' * 600) href, etag = s.upload(item) assert item.uid not in href def test_post_hook_inactive(self, tmpdir, monkeypatch): def check_call_mock(*args, **kwargs): assert False monkeypatch.setattr(subprocess, 'call', check_call_mock) s = self.storage_class(str(tmpdir), '.txt', post_hook=None) s.upload(Item(u'UID:a/b/c')) def test_post_hook_active(self, tmpdir, monkeypatch): calls = [] exe = 'foo' def check_call_mock(l, *args, **kwargs): calls.append(True) assert len(l) == 2 assert l[0] == exe monkeypatch.setattr(subprocess, 'call', check_call_mock) s = self.storage_class(str(tmpdir), '.txt', post_hook=exe) s.upload(Item(u'UID:a/b/c')) assert calls def test_ignore_git_dirs(self, tmpdir): tmpdir.mkdir('.git').mkdir('foo') tmpdir.mkdir('a') tmpdir.mkdir('b') assert set(c['collection'] for c in self.storage_class.discover(str(tmpdir))) == {'a', 'b'} vdirsyncer-0.16.2/tests/storage/test_http_with_singlefile.py0000644000175000017500000000457313121521602026462 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import pytest from requests import Response import vdirsyncer.storage.http from vdirsyncer.storage.base import Storage from vdirsyncer.storage.singlefile import SingleFileStorage from . import StorageTests class CombinedStorage(Storage): '''A subclass of HttpStorage to make testing easier. It supports writes via SingleFileStorage.''' _repr_attributes = ('url', 'path') storage_name = 'http_and_singlefile' def __init__(self, url, path, **kwargs): if kwargs.get('collection', None) is not None: raise ValueError() super(CombinedStorage, self).__init__(**kwargs) self.url = url self.path = path self._reader = vdirsyncer.storage.http.HttpStorage(url=url) self._reader._ignore_uids = False self._writer = SingleFileStorage(path=path) def list(self, *a, **kw): return self._reader.list(*a, **kw) def get(self, *a, **kw): self.list() return self._reader.get(*a, **kw) def upload(self, *a, **kw): return self._writer.upload(*a, **kw) def update(self, *a, **kw): return self._writer.update(*a, **kw) def delete(self, *a, **kw): return self._writer.delete(*a, **kw) class TestHttpStorage(StorageTests): storage_class = CombinedStorage supports_collections = False supports_metadata = False @pytest.fixture(autouse=True) def setup_tmpdir(self, tmpdir, monkeypatch): self.tmpfile = str(tmpdir.ensure('collection.txt')) def _request(method, url, *args, **kwargs): assert method == 'GET' assert url == 'http://localhost:123/collection.txt' assert 'vdirsyncer' in kwargs['headers']['User-Agent'] r = Response() r.status_code = 200 try: with open(self.tmpfile, 'rb') as f: r._content = f.read() except IOError: r._content = b'' r.headers['Content-Type'] = 'text/calendar' r.encoding = 'utf-8' return r monkeypatch.setattr(vdirsyncer.storage.http, 'request', _request) @pytest.fixture def get_storage_args(self): def inner(collection=None): assert collection is None return {'url': 'http://localhost:123/collection.txt', 'path': self.tmpfile} return inner vdirsyncer-0.16.2/tests/storage/__init__.py0000644000175000017500000002710013134636312022747 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import random import uuid import textwrap from urllib.parse import quote as urlquote, unquote as urlunquote import hypothesis.strategies as st from hypothesis import given import pytest from vdirsyncer import exceptions from vdirsyncer.storage.base import normalize_meta_value from vdirsyncer.vobject import Item from .. import EVENT_TEMPLATE, TASK_TEMPLATE, VCARD_TEMPLATE, \ assert_item_equals, normalize_item, printable_characters_strategy def get_server_mixin(server_name): from . import __name__ as base x = __import__('{}.servers.{}'.format(base, server_name), fromlist=['']) return x.ServerMixin def format_item(item_template, uid=None): # assert that special chars are handled correctly. r = random.random() return Item(item_template.format(r=r, uid=uid or r)) class StorageTests(object): storage_class = None supports_collections = True supports_metadata = True @pytest.fixture(params=['VEVENT', 'VTODO', 'VCARD']) def item_type(self, request): '''Parametrize with all supported item types.''' return request.param @pytest.fixture def get_storage_args(self): ''' Return a function with the following properties: :param collection: The name of the collection to create and use. ''' raise NotImplementedError() @pytest.fixture def s(self, get_storage_args): return self.storage_class(**get_storage_args()) @pytest.fixture def get_item(self, item_type): template = { 'VEVENT': EVENT_TEMPLATE, 'VTODO': TASK_TEMPLATE, 'VCARD': VCARD_TEMPLATE, }[item_type] return lambda **kw: format_item(template, **kw) @pytest.fixture def requires_collections(self): if not self.supports_collections: pytest.skip('This storage does not support collections.') @pytest.fixture def requires_metadata(self): if not self.supports_metadata: pytest.skip('This storage does not support metadata.') def test_generic(self, s, get_item): items = [get_item() for i in range(1, 10)] hrefs = [] for item in items: href, etag = s.upload(item) if etag is None: _, etag = s.get(href) hrefs.append((href, etag)) hrefs.sort() assert hrefs == sorted(s.list()) for href, etag in hrefs: assert isinstance(href, (str, bytes)) assert isinstance(etag, (str, bytes)) assert s.has(href) item, etag2 = s.get(href) assert etag == etag2 def test_empty_get_multi(self, s): assert list(s.get_multi([])) == [] def test_get_multi_duplicates(self, s, get_item): href, etag = s.upload(get_item()) if etag is None: _, etag = s.get(href) (href2, item, etag2), = s.get_multi([href] * 2) assert href2 == href assert etag2 == etag def test_upload_already_existing(self, s, get_item): item = get_item() s.upload(item) with pytest.raises(exceptions.PreconditionFailed): s.upload(item) def test_upload(self, s, get_item): item = get_item() href, etag = s.upload(item) assert_item_equals(s.get(href)[0], item) def test_update(self, s, get_item): item = get_item() href, etag = s.upload(item) if etag is None: _, etag = s.get(href) assert_item_equals(s.get(href)[0], item) new_item = get_item(uid=item.uid) new_etag = s.update(href, new_item, etag) if new_etag is None: _, new_etag = s.get(href) # See https://github.com/pimutils/vdirsyncer/issues/48 assert isinstance(new_etag, (bytes, str)) assert_item_equals(s.get(href)[0], new_item) def test_update_nonexisting(self, s, get_item): item = get_item() with pytest.raises(exceptions.PreconditionFailed): s.update('huehue', item, '"123"') def test_wrong_etag(self, s, get_item): item = get_item() href, etag = s.upload(item) with pytest.raises(exceptions.PreconditionFailed): s.update(href, item, '"lolnope"') with pytest.raises(exceptions.PreconditionFailed): s.delete(href, '"lolnope"') def test_delete(self, s, get_item): href, etag = s.upload(get_item()) s.delete(href, etag) assert not list(s.list()) def test_delete_nonexisting(self, s, get_item): with pytest.raises(exceptions.PreconditionFailed): s.delete('1', '"123"') def test_list(self, s, get_item): assert not list(s.list()) href, etag = s.upload(get_item()) if etag is None: _, etag = s.get(href) assert list(s.list()) == [(href, etag)] def test_has(self, s, get_item): assert not s.has('asd') href, etag = s.upload(get_item()) assert s.has(href) assert not s.has('asd') s.delete(href, etag) assert not s.has(href) def test_update_others_stay_the_same(self, s, get_item): info = {} for _ in range(4): href, etag = s.upload(get_item()) if etag is None: _, etag = s.get(href) info[href] = etag assert dict( (href, etag) for href, item, etag in s.get_multi(href for href, etag in info.items()) ) == info def test_repr(self, s, get_storage_args): assert self.storage_class.__name__ in repr(s) assert s.instance_name is None def test_discover(self, requires_collections, get_storage_args, get_item): collections = set() for i in range(1, 5): collection = 'test{}'.format(i) s = self.storage_class(**get_storage_args(collection=collection)) assert not list(s.list()) s.upload(get_item()) collections.add(s.collection) actual = set( c['collection'] for c in self.storage_class.discover(**get_storage_args(collection=None)) ) assert actual >= collections def test_create_collection(self, requires_collections, get_storage_args, get_item): if getattr(self, 'dav_server', '') in \ ('icloud', 'fastmail', 'davical'): pytest.skip('Manual cleanup would be necessary.') args = get_storage_args(collection=None) args['collection'] = 'test' s = self.storage_class( **self.storage_class.create_collection(**args) ) href = s.upload(get_item())[0] assert href in set(href for href, etag in s.list()) def test_discover_collection_arg(self, requires_collections, get_storage_args): args = get_storage_args(collection='test2') with pytest.raises(TypeError) as excinfo: list(self.storage_class.discover(**args)) assert 'collection argument must not be given' in str(excinfo.value) def test_collection_arg(self, get_storage_args): if self.storage_class.storage_name.startswith('etesync'): pytest.skip('etesync uses UUIDs.') if self.supports_collections: s = self.storage_class(**get_storage_args(collection='test2')) # Can't do stronger assertion because of radicale, which needs a # fileextension to guess the collection type. assert 'test2' in s.collection else: with pytest.raises(ValueError): self.storage_class(collection='ayy', **get_storage_args()) def test_case_sensitive_uids(self, s, get_item): if s.storage_name == 'filesystem': pytest.skip('Behavior depends on the filesystem.') uid = str(uuid.uuid4()) s.upload(get_item(uid=uid.upper())) s.upload(get_item(uid=uid.lower())) items = list(href for href, etag in s.list()) assert len(items) == 2 assert len(set(items)) == 2 def test_specialchars(self, monkeypatch, requires_collections, get_storage_args, get_item): if getattr(self, 'dav_server', '') == 'radicale': pytest.skip('Radicale is fundamentally broken.') if getattr(self, 'dav_server', '') in ('icloud', 'fastmail'): pytest.skip('iCloud and FastMail reject this name.') monkeypatch.setattr('vdirsyncer.utils.generate_href', lambda x: x) uid = u'test @ foo ät bar град сатану' collection = 'test @ foo ät bar' s = self.storage_class(**get_storage_args(collection=collection)) item = get_item(uid=uid) href, etag = s.upload(item) item2, etag2 = s.get(href) if etag is not None: assert etag2 == etag assert_item_equals(item2, item) (_, etag3), = s.list() assert etag2 == etag3 # etesync uses UUIDs for collection names if self.storage_class.storage_name.startswith('etesync'): return assert collection in urlunquote(s.collection) if self.storage_class.storage_name.endswith('dav'): assert urlquote(uid, '/@:') in href def test_metadata(self, requires_metadata, s): if not getattr(self, 'dav_server', ''): assert not s.get_meta('color') assert not s.get_meta('displayname') try: s.set_meta('color', None) assert not s.get_meta('color') s.set_meta('color', u'#ff0000') assert s.get_meta('color') == u'#ff0000' except exceptions.UnsupportedMetadataError: pass for x in (u'hello world', u'hello wörld'): s.set_meta('displayname', x) rv = s.get_meta('displayname') assert rv == x assert isinstance(rv, str) @given(value=st.one_of( st.none(), printable_characters_strategy.filter(lambda x: x.strip() != x) )) def test_metadata_normalization(self, requires_metadata, s, value): x = s.get_meta('displayname') assert x == normalize_meta_value(x) if not getattr(self, 'dav_server', None): # ownCloud replaces "" with "unnamed" s.set_meta('displayname', value) assert s.get_meta('displayname') == normalize_meta_value(value) def test_recurring_events(self, s, item_type): if item_type != 'VEVENT': pytest.skip('This storage instance doesn\'t support iCalendar.') uid = str(uuid.uuid4()) item = Item(textwrap.dedent(u''' BEGIN:VCALENDAR VERSION:2.0 BEGIN:VEVENT DTSTART;TZID=UTC:20140325T084000Z DTEND;TZID=UTC:20140325T101000Z DTSTAMP:20140327T060506Z UID:{uid} RECURRENCE-ID;TZID=UTC:20140325T083000Z CREATED:20131216T033331Z DESCRIPTION: LAST-MODIFIED:20140327T060215Z LOCATION: SEQUENCE:1 STATUS:CONFIRMED SUMMARY:test Event TRANSP:OPAQUE END:VEVENT BEGIN:VEVENT DTSTART;TZID=UTC:20140128T083000Z DTEND;TZID=UTC:20140128T100000Z RRULE:FREQ=WEEKLY;UNTIL=20141208T213000Z;BYDAY=TU DTSTAMP:20140327T060506Z UID:{uid} CREATED:20131216T033331Z DESCRIPTION: LAST-MODIFIED:20140222T101012Z LOCATION: SEQUENCE:0 STATUS:CONFIRMED SUMMARY:Test event TRANSP:OPAQUE END:VEVENT END:VCALENDAR '''.format(uid=uid)).strip()) href, etag = s.upload(item) item2, etag2 = s.get(href) assert normalize_item(item) == normalize_item(item2) vdirsyncer-0.16.2/tests/storage/test_http.py0000644000175000017500000000712713051102032023216 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import pytest from requests import Response from tests import normalize_item from vdirsyncer.exceptions import UserError from vdirsyncer.storage.http import HttpStorage, prepare_auth def test_list(monkeypatch): collection_url = 'http://127.0.0.1/calendar/collection.ics' items = [ (u'BEGIN:VEVENT\n' u'SUMMARY:Eine Kurzinfo\n' u'DESCRIPTION:Beschreibung des Termines\n' u'END:VEVENT'), (u'BEGIN:VEVENT\n' u'SUMMARY:Eine zweite Küèrzinfo\n' u'DESCRIPTION:Beschreibung des anderen Termines\n' u'BEGIN:VALARM\n' u'ACTION:AUDIO\n' u'TRIGGER:19980403T120000\n' u'ATTACH;FMTTYPE=audio/basic:http://host.com/pub/ssbanner.aud\n' u'REPEAT:4\n' u'DURATION:PT1H\n' u'END:VALARM\n' u'END:VEVENT') ] responses = [ u'\n'.join([u'BEGIN:VCALENDAR'] + items + [u'END:VCALENDAR']) ] * 2 def get(self, method, url, *a, **kw): assert method == 'GET' assert url == collection_url r = Response() r.status_code = 200 assert responses r._content = responses.pop().encode('utf-8') r.headers['Content-Type'] = 'text/calendar' r.encoding = 'ISO-8859-1' return r monkeypatch.setattr('requests.sessions.Session.request', get) s = HttpStorage(url=collection_url) found_items = {} for href, etag in s.list(): item, etag2 = s.get(href) assert item.uid is not None assert etag2 == etag found_items[normalize_item(item)] = href expected = set(normalize_item(u'BEGIN:VCALENDAR\n' + x + '\nEND:VCALENDAR') for x in items) assert set(found_items) == expected for href, etag in s.list(): item, etag2 = s.get(href) assert item.uid is not None assert etag2 == etag assert found_items[normalize_item(item)] == href def test_readonly_param(): url = 'http://example.com/' with pytest.raises(ValueError): HttpStorage(url=url, read_only=False) a = HttpStorage(url=url, read_only=True).read_only b = HttpStorage(url=url, read_only=None).read_only assert a is b is True def test_prepare_auth(): assert prepare_auth(None, '', '') is None assert prepare_auth(None, 'user', 'pwd') == ('user', 'pwd') assert prepare_auth('basic', 'user', 'pwd') == ('user', 'pwd') with pytest.raises(ValueError) as excinfo: assert prepare_auth('basic', '', 'pwd') assert 'you need to specify username and password' in \ str(excinfo.value).lower() from requests.auth import HTTPDigestAuth assert isinstance(prepare_auth('digest', 'user', 'pwd'), HTTPDigestAuth) with pytest.raises(ValueError) as excinfo: prepare_auth('ladida', 'user', 'pwd') assert 'unknown authentication method' in str(excinfo.value).lower() def test_prepare_auth_guess(monkeypatch): import requests_toolbelt.auth.guess assert isinstance(prepare_auth('guess', 'user', 'pwd'), requests_toolbelt.auth.guess.GuessAuth) monkeypatch.delattr(requests_toolbelt.auth.guess, 'GuessAuth') with pytest.raises(UserError) as excinfo: prepare_auth('guess', 'user', 'pwd') assert 'requests_toolbelt is too old' in str(excinfo.value).lower() def test_verify_false_disallowed(): with pytest.raises(ValueError) as excinfo: HttpStorage(url='http://example.com', verify=False) assert 'forbidden' in str(excinfo.value).lower() assert 'consider setting verify_fingerprint' in str(excinfo.value).lower() vdirsyncer-0.16.2/tests/storage/servers/0000755000175000017500000000000013147536465022343 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/tests/storage/servers/radicale/0000755000175000017500000000000013147536465024107 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/tests/storage/servers/radicale/install.sh0000644000175000017500000000050213121521602026061 0ustar untitakeruntitaker00000000000000#!/bin/sh set -e if [ "$REQUIREMENTS" = "release" ] || [ "$REQUIREMENTS" = "minimal" ]; then radicale_pkg="radicale" elif [ "$REQUIREMENTS" = "devel" ]; then radicale_pkg="git+https://github.com/Kozea/Radicale.git" else echo "Invalid requirements envvar" false fi pip install wsgi_intercept $radicale_pkg vdirsyncer-0.16.2/tests/storage/servers/radicale/__init__.py0000644000175000017500000000340113121521602026171 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import logging import pytest import radicale import radicale.config from pkg_resources import parse_version as ver import wsgi_intercept import wsgi_intercept.requests_intercept logger = logging.getLogger(__name__) class ServerMixin(object): @pytest.fixture(autouse=True) def setup(self, request, tmpdir): if ver(radicale.VERSION) < ver('2.0.0-pre'): raise RuntimeError('Testing against Radicale only works with ' 'Radicale >= 2.0.0') def get_app(): config = radicale.config.load(()) config.set('storage', 'filesystem_folder', str(tmpdir)) config.set('rights', 'type', 'owner_only') app = radicale.Application(config, logger) def is_authenticated(user, password): return user == 'bob' and password == 'bob' app.is_authenticated = is_authenticated return app wsgi_intercept.requests_intercept.install() wsgi_intercept.add_wsgi_intercept('127.0.0.1', 80, get_app) def teardown(): wsgi_intercept.remove_wsgi_intercept('127.0.0.1', 80) wsgi_intercept.requests_intercept.uninstall() request.addfinalizer(teardown) @pytest.fixture def get_storage_args(self, get_item): def inner(collection='test'): url = 'http://127.0.0.1/' rv = {'url': url, 'username': 'bob', 'password': 'bob'} if collection is not None: collection = collection + self.storage_class.fileext rv = self.storage_class.create_collection(collection, **rv) s = self.storage_class(**rv) assert not list(s.list()) return rv return inner vdirsyncer-0.16.2/tests/storage/servers/skip/0000755000175000017500000000000013147536465023311 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/tests/storage/servers/skip/install.sh0000755000175000017500000000001213013735125025271 0ustar untitakeruntitaker00000000000000#!/bin/sh vdirsyncer-0.16.2/tests/storage/servers/skip/__init__.py0000644000175000017500000000021313013735125025400 0ustar untitakeruntitaker00000000000000import pytest class ServerMixin(object): @pytest.fixture def get_storage_args(self): pytest.skip('DAV tests disabled.') vdirsyncer-0.16.2/tests/storage/test_singlefile.py0000644000175000017500000000111113013735125024360 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import pytest from vdirsyncer.storage.singlefile import SingleFileStorage from . import StorageTests class TestSingleFileStorage(StorageTests): storage_class = SingleFileStorage supports_metadata = False @pytest.fixture def get_storage_args(self, tmpdir): def inner(collection='test'): rv = {'path': str(tmpdir.join('%s.txt')), 'collection': collection} if collection is not None: rv = self.storage_class.create_collection(**rv) return rv return inner vdirsyncer-0.16.2/tests/unit/0000755000175000017500000000000013147536465020165 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/tests/unit/utils/0000755000175000017500000000000013147536465021325 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/tests/unit/utils/test_vobject.py0000644000175000017500000002253113134636312024361 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- from textwrap import dedent import hypothesis.strategies as st from hypothesis import assume, given from hypothesis.stateful import Bundle, RuleBasedStateMachine, rule import pytest from tests import BARE_EVENT_TEMPLATE, EVENT_TEMPLATE, \ EVENT_WITH_TIMEZONE_TEMPLATE, VCARD_TEMPLATE, normalize_item, \ uid_strategy import vdirsyncer.vobject as vobject _simple_split = [ VCARD_TEMPLATE.format(r=123, uid=123), VCARD_TEMPLATE.format(r=345, uid=345), VCARD_TEMPLATE.format(r=678, uid=678) ] _simple_joined = u'\r\n'.join( [u'BEGIN:VADDRESSBOOK'] + _simple_split + [u'END:VADDRESSBOOK\r\n'] ) def test_split_collection_simple(benchmark): given = benchmark(lambda: list(vobject.split_collection(_simple_joined))) assert [normalize_item(item) for item in given] == \ [normalize_item(item) for item in _simple_split] assert [x.splitlines() for x in given] == \ [x.splitlines() for x in _simple_split] def test_split_collection_multiple_wrappers(benchmark): joined = u'\r\n'.join( u'BEGIN:VADDRESSBOOK\r\n' + x + u'\r\nEND:VADDRESSBOOK\r\n' for x in _simple_split ) given = benchmark(lambda: list(vobject.split_collection(joined))) assert [normalize_item(item) for item in given] == \ [normalize_item(item) for item in _simple_split] assert [x.splitlines() for x in given] == \ [x.splitlines() for x in _simple_split] def test_join_collection_simple(benchmark): given = benchmark(lambda: vobject.join_collection(_simple_split)) assert normalize_item(given) == normalize_item(_simple_joined) assert given.splitlines() == _simple_joined.splitlines() def test_join_collection_vevents(benchmark): actual = benchmark(lambda: vobject.join_collection([ dedent(""" BEGIN:VCALENDAR VERSION:2.0 PRODID:HUEHUE BEGIN:VTIMEZONE VALUE:The Timezone END:VTIMEZONE BEGIN:VEVENT VALUE:Event {} END:VEVENT END:VCALENDAR """).format(i) for i in range(3) ])) expected = dedent(""" BEGIN:VCALENDAR VERSION:2.0 PRODID:HUEHUE BEGIN:VTIMEZONE VALUE:The Timezone END:VTIMEZONE BEGIN:VEVENT VALUE:Event 0 END:VEVENT BEGIN:VEVENT VALUE:Event 1 END:VEVENT BEGIN:VEVENT VALUE:Event 2 END:VEVENT END:VCALENDAR """).lstrip() assert actual.splitlines() == expected.splitlines() def test_split_collection_timezones(): items = [ BARE_EVENT_TEMPLATE.format(r=123, uid=123), BARE_EVENT_TEMPLATE.format(r=345, uid=345) ] timezone = ( u'BEGIN:VTIMEZONE\r\n' u'TZID:/mozilla.org/20070129_1/Asia/Tokyo\r\n' u'X-LIC-LOCATION:Asia/Tokyo\r\n' u'BEGIN:STANDARD\r\n' u'TZOFFSETFROM:+0900\r\n' u'TZOFFSETTO:+0900\r\n' u'TZNAME:JST\r\n' u'DTSTART:19700101T000000\r\n' u'END:STANDARD\r\n' u'END:VTIMEZONE' ) full = u'\r\n'.join( [u'BEGIN:VCALENDAR'] + items + [timezone, u'END:VCALENDAR'] ) given = set(normalize_item(item) for item in vobject.split_collection(full)) expected = set( normalize_item(u'\r\n'.join(( u'BEGIN:VCALENDAR', item, timezone, u'END:VCALENDAR' ))) for item in items ) assert given == expected def test_split_contacts(): bare = '\r\n'.join([VCARD_TEMPLATE.format(r=x, uid=x) for x in range(4)]) with_wrapper = 'BEGIN:VADDRESSBOOK\r\n' + bare + '\nEND:VADDRESSBOOK\r\n' for _ in (bare, with_wrapper): split = list(vobject.split_collection(bare)) assert len(split) == 4 assert vobject.join_collection(split).splitlines() == \ with_wrapper.splitlines() def test_hash_item(): a = EVENT_TEMPLATE.format(r=1, uid=1) b = u'\n'.join(line for line in a.splitlines() if u'PRODID' not in line) assert vobject.hash_item(a) == vobject.hash_item(b) def test_multiline_uid(benchmark): a = (u'BEGIN:FOO\r\n' u'UID:123456789abcd\r\n' u' efgh\r\n' u'END:FOO\r\n') assert benchmark(lambda: vobject.Item(a).uid) == u'123456789abcdefgh' complex_uid_item = dedent(u''' BEGIN:VCALENDAR BEGIN:VTIMEZONE TZID:Europe/Rome X-LIC-LOCATION:Europe/Rome BEGIN:DAYLIGHT TZOFFSETFROM:+0100 TZOFFSETTO:+0200 TZNAME:CEST DTSTART:19700329T020000 RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=3 END:DAYLIGHT BEGIN:STANDARD TZOFFSETFROM:+0200 TZOFFSETTO:+0100 TZNAME:CET DTSTART:19701025T030000 RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10 END:STANDARD END:VTIMEZONE BEGIN:VEVENT DTSTART:20140124T133000Z DTEND:20140124T143000Z DTSTAMP:20140612T090652Z UID:040000008200E00074C5B7101A82E0080000000050AAABEEF50DCF 001000000062548482FA830A46B9EA62114AC9F0EF CREATED:20140110T102231Z DESCRIPTION:Test. LAST-MODIFIED:20140123T095221Z LOCATION:25.12.01.51 SEQUENCE:0 STATUS:CONFIRMED SUMMARY:Präsentation TRANSP:OPAQUE END:VEVENT END:VCALENDAR ''').strip() def test_multiline_uid_complex(benchmark): assert benchmark(lambda: vobject.Item(complex_uid_item).uid) == ( u'040000008200E00074C5B7101A82E008000000005' u'0AAABEEF50DCF001000000062548482FA830A46B9' u'EA62114AC9F0EF' ) def test_replace_multiline_uid(benchmark): def inner(): return vobject.Item(complex_uid_item).with_uid('a').uid assert benchmark(inner) == 'a' @pytest.mark.parametrize('template', [EVENT_TEMPLATE, EVENT_WITH_TIMEZONE_TEMPLATE, VCARD_TEMPLATE]) @given(uid=st.one_of(st.none(), uid_strategy)) def test_replace_uid(template, uid): item = vobject.Item(template.format(r=123, uid=123)).with_uid(uid) assert item.uid == uid if uid: assert item.raw.count('\nUID:{}'.format(uid)) == 1 else: assert '\nUID:' not in item.raw def test_broken_item(): with pytest.raises(ValueError) as excinfo: vobject._Component.parse('END:FOO') assert 'Parsing error at line 1' in str(excinfo.value) item = vobject.Item('END:FOO') assert item.parsed is None def test_multiple_items(): with pytest.raises(ValueError) as excinfo: vobject._Component.parse([ 'BEGIN:FOO', 'END:FOO', 'BEGIN:FOO', 'END:FOO', ]) assert 'Found 2 components, expected one' in str(excinfo.value) c1, c2 = vobject._Component.parse([ 'BEGIN:FOO', 'END:FOO', 'BEGIN:FOO', 'END:FOO', ], multiple=True) assert c1.name == c2.name == 'FOO' def test_input_types(): lines = ['BEGIN:FOO', 'FOO:BAR', 'END:FOO'] for x in (lines, '\r\n'.join(lines), '\r\n'.join(lines).encode('ascii')): c = vobject._Component.parse(x) assert c.name == 'FOO' assert c.props == ['FOO:BAR'] assert not c.subcomponents value_strategy = st.text( st.characters(blacklist_categories=( 'Zs', 'Zl', 'Zp', 'Cc', 'Cs' ), blacklist_characters=':='), min_size=1 ).filter(lambda x: x.strip() == x) class VobjectMachine(RuleBasedStateMachine): Unparsed = Bundle('unparsed') Parsed = Bundle('parsed') @rule(target=Unparsed, joined=st.booleans(), encoded=st.booleans()) def get_unparsed_lines(self, joined, encoded): rv = ['BEGIN:FOO', 'FOO:YES', 'END:FOO'] if joined: rv = '\r\n'.join(rv) if encoded: rv = rv.encode('utf-8') elif encoded: assume(False) return rv @rule(unparsed=Unparsed, target=Parsed) def parse(self, unparsed): return vobject._Component.parse(unparsed) @rule(parsed=Parsed, target=Unparsed) def serialize(self, parsed): return list(parsed.dump_lines()) @rule(c=Parsed, key=uid_strategy, value=uid_strategy) def add_prop(self, c, key, value): c[key] = value assert c[key] == value assert key in c assert c.get(key) == value dump = '\r\n'.join(c.dump_lines()) assert key in dump and value in dump @rule(c=Parsed, key=uid_strategy, value=uid_strategy, params=st.lists(st.tuples(value_strategy, value_strategy))) def add_prop_raw(self, c, key, value, params): params_str = ','.join(k + '=' + v for k, v in params) c.props.insert(0, '{};{}:{}'.format(key, params_str, value)) assert c[key] == value assert key in c assert c.get(key) == value @rule(c=Parsed, sub_c=Parsed) def add_component(self, c, sub_c): assume(sub_c is not c and sub_c not in c) c.subcomponents.append(sub_c) assert '\r\n'.join(sub_c.dump_lines()) in '\r\n'.join(c.dump_lines()) @rule(c=Parsed) def sanity_check(self, c): c1 = vobject._Component.parse(c.dump_lines()) assert c1 == c TestVobjectMachine = VobjectMachine.TestCase def test_component_contains(): item = vobject._Component.parse([ 'BEGIN:FOO', 'FOO:YES', 'END:FOO' ]) assert 'FOO' in item assert 'BAZ' not in item with pytest.raises(ValueError): 42 in item vdirsyncer-0.16.2/tests/unit/test_exceptions.py0000644000175000017500000000051713054065744023754 0ustar untitakeruntitaker00000000000000from vdirsyncer import exceptions def test_user_error_problems(): e = exceptions.UserError('A few problems occured', problems=[ 'Problem one', 'Problem two', 'Problem three' ]) assert 'one' in str(e) assert 'two' in str(e) assert 'three' in str(e) assert 'problems occured' in str(e) vdirsyncer-0.16.2/tests/unit/test_metasync.py0000644000175000017500000001055313121521602023400 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import hypothesis.strategies as st from hypothesis import example, given import pytest from tests import blow_up from vdirsyncer.exceptions import UserError from vdirsyncer.metasync import MetaSyncConflict, logger, metasync from vdirsyncer.storage.base import normalize_meta_value from vdirsyncer.storage.memory import MemoryStorage def test_irrelevant_status(): a = MemoryStorage() b = MemoryStorage() status = {'foo': 'bar'} metasync(a, b, status, keys=()) assert not status def test_basic(monkeypatch): a = MemoryStorage() b = MemoryStorage() status = {} a.set_meta('foo', 'bar') metasync(a, b, status, keys=['foo']) assert a.get_meta('foo') == b.get_meta('foo') == 'bar' a.set_meta('foo', 'baz') metasync(a, b, status, keys=['foo']) assert a.get_meta('foo') == b.get_meta('foo') == 'baz' monkeypatch.setattr(a, 'set_meta', blow_up) monkeypatch.setattr(b, 'set_meta', blow_up) metasync(a, b, status, keys=['foo']) assert a.get_meta('foo') == b.get_meta('foo') == 'baz' monkeypatch.undo() monkeypatch.undo() b.set_meta('foo', None) metasync(a, b, status, keys=['foo']) assert not a.get_meta('foo') and not b.get_meta('foo') @pytest.fixture def conflict_state(request): a = MemoryStorage() b = MemoryStorage() status = {} a.set_meta('foo', 'bar') b.set_meta('foo', 'baz') def cleanup(): assert a.get_meta('foo') == 'bar' assert b.get_meta('foo') == 'baz' assert not status request.addfinalizer(cleanup) return a, b, status def test_conflict(conflict_state): a, b, status = conflict_state with pytest.raises(MetaSyncConflict): metasync(a, b, status, keys=['foo']) def test_invalid_conflict_resolution(conflict_state): a, b, status = conflict_state with pytest.raises(UserError) as excinfo: metasync(a, b, status, keys=['foo'], conflict_resolution='foo') assert 'Invalid conflict resolution setting' in str(excinfo.value) def test_warning_on_custom_conflict_commands(conflict_state, monkeypatch): a, b, status = conflict_state warnings = [] monkeypatch.setattr(logger, 'warning', warnings.append) with pytest.raises(MetaSyncConflict): metasync(a, b, status, keys=['foo'], conflict_resolution=lambda *a, **kw: None) assert warnings == ['Custom commands don\'t work on metasync.'] def test_conflict_same_content(): a = MemoryStorage() b = MemoryStorage() status = {} a.set_meta('foo', 'bar') b.set_meta('foo', 'bar') metasync(a, b, status, keys=['foo']) assert a.get_meta('foo') == b.get_meta('foo') == status['foo'] == 'bar' @pytest.mark.parametrize('wins', 'ab') def test_conflict_x_wins(wins): a = MemoryStorage() b = MemoryStorage() status = {} a.set_meta('foo', 'bar') b.set_meta('foo', 'baz') metasync(a, b, status, keys=['foo'], conflict_resolution='a wins' if wins == 'a' else 'b wins') assert a.get_meta('foo') == b.get_meta('foo') == status['foo'] == ( 'bar' if wins == 'a' else 'baz' ) keys = st.text(min_size=1).filter(lambda x: x.strip() == x) values = st.text().filter(lambda x: normalize_meta_value(x) == x) metadata = st.dictionaries(keys, values) @given( a=metadata, b=metadata, status=metadata, keys=st.sets(keys), conflict_resolution=st.just('a wins') | st.just('b wins') ) @example(a={u'0': u'0'}, b={}, status={u'0': u'0'}, keys={u'0'}, conflict_resolution='a wins') @example(a={'0': '0'}, b={'0': '1'}, status={'0': '0'}, keys={'0'}, conflict_resolution='a wins') def test_fuzzing(a, b, status, keys, conflict_resolution): def _get_storage(m, instance_name): s = MemoryStorage(instance_name=instance_name) s.metadata = m return s a = _get_storage(a, 'A') b = _get_storage(b, 'B') winning_storage = (a if conflict_resolution == 'a wins' else b) expected_values = dict((key, winning_storage.get_meta(key)) for key in keys if key not in status) metasync(a, b, status, keys=keys, conflict_resolution=conflict_resolution) for key in keys: s = status.get(key, '') assert a.get_meta(key) == b.get_meta(key) == s if expected_values.get(key, '') and s: assert s == expected_values[key] vdirsyncer-0.16.2/tests/unit/test_repair.py0000644000175000017500000000437213134636312023052 0ustar untitakeruntitaker00000000000000from hypothesis import given, settings import pytest from tests import uid_strategy from vdirsyncer.repair import IrreparableItem, repair_item, repair_storage from vdirsyncer.storage.memory import MemoryStorage from vdirsyncer.utils import href_safe from vdirsyncer.vobject import Item @given(uid=uid_strategy) @settings(perform_health_check=False) # Using the random module for UIDs def test_repair_uids(uid): s = MemoryStorage() s.items = { 'one': ( 'asdf', Item(u'BEGIN:VCARD\nFN:Hans\nUID:{}\nEND:VCARD'.format(uid)) ), 'two': ( 'asdf', Item(u'BEGIN:VCARD\nFN:Peppi\nUID:{}\nEND:VCARD'.format(uid)) ) } uid1, uid2 = [s.get(href)[0].uid for href, etag in s.list()] assert uid1 == uid2 repair_storage(s, repair_unsafe_uid=False) uid1, uid2 = [s.get(href)[0].uid for href, etag in s.list()] assert uid1 != uid2 @given(uid=uid_strategy.filter(lambda x: not href_safe(x))) @settings(perform_health_check=False) # Using the random module for UIDs def test_repair_unsafe_uids(uid): s = MemoryStorage() item = Item(u'BEGIN:VCARD\nUID:{}\nEND:VCARD'.format(uid)) href, etag = s.upload(item) assert s.get(href)[0].uid == uid assert not href_safe(uid) repair_storage(s, repair_unsafe_uid=True) new_href = list(s.list())[0][0] assert href_safe(new_href) newuid = s.get(new_href)[0].uid assert href_safe(newuid) @pytest.mark.parametrize('uid,href', [ ('b@dh0mbr3', 'perfectly-fine'), ('perfectly-fine', 'b@dh0mbr3') ]) def test_repair_unsafe_href(uid, href): item = Item('BEGIN:VCARD\nUID:{}\nEND:VCARD'.format(uid)) new_item = repair_item(href, item, set(), True) assert new_item.raw != item.raw assert new_item.uid != item.uid assert href_safe(new_item.uid) def test_repair_do_nothing(): item = Item('BEGIN:VCARD\nUID:justfine\nEND:VCARD') assert repair_item('fine', item, set(), True) is item assert repair_item('@@@@/fine', item, set(), True) is item @pytest.mark.parametrize('raw', [ 'AYYY', '', '@@@@', 'BEGIN:VCARD', 'BEGIN:FOO\nEND:FOO' ]) def test_repair_irreparable(raw): with pytest.raises(IrreparableItem): repair_item('fine', Item(raw), set(), True) vdirsyncer-0.16.2/tests/unit/test_sync.py0000644000175000017500000004347013144561565022556 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- from copy import deepcopy import hypothesis.strategies as st from hypothesis import assume from hypothesis.stateful import Bundle, RuleBasedStateMachine, rule import pytest from tests import blow_up, uid_strategy from vdirsyncer.storage.memory import MemoryStorage, _random_string from vdirsyncer.sync import BothReadOnly, IdentConflict, PartialSync, \ StorageEmpty, SqliteStatus, SyncConflict, sync as _sync from vdirsyncer.vobject import Item def sync(a, b, status, *args, **kwargs): new_status = SqliteStatus(':memory:') new_status.load_legacy_status(status) rv = _sync(a, b, new_status, *args, **kwargs) status.clear() status.update(new_status.to_legacy_status()) return rv def empty_storage(x): return list(x.list()) == [] def items(s): return set(x[1].raw for x in s.items.values()) def test_irrelevant_status(): a = MemoryStorage() b = MemoryStorage() status = {'1': ('1', 1234, '1.ics', 2345)} sync(a, b, status) assert not status assert not items(a) assert not items(b) def test_missing_status(): a = MemoryStorage() b = MemoryStorage() status = {} item = Item(u'asdf') a.upload(item) b.upload(item) sync(a, b, status) assert len(status) == 1 assert items(a) == items(b) == {item.raw} def test_missing_status_and_different_items(): a = MemoryStorage() b = MemoryStorage() status = {} item1 = Item(u'UID:1\nhaha') item2 = Item(u'UID:1\nhoho') a.upload(item1) b.upload(item2) with pytest.raises(SyncConflict): sync(a, b, status) assert not status sync(a, b, status, conflict_resolution='a wins') assert items(a) == items(b) == {item1.raw} def test_read_only_and_prefetch(): a = MemoryStorage() b = MemoryStorage() b.read_only = True status = {} item1 = Item(u'UID:1\nhaha') item2 = Item(u'UID:2\nhoho') a.upload(item1) a.upload(item2) sync(a, b, status, force_delete=True) sync(a, b, status, force_delete=True) assert not items(a) and not items(b) def test_partial_sync_error(): a = MemoryStorage() b = MemoryStorage() status = {} a.upload(Item('UID:0')) b.read_only = True with pytest.raises(PartialSync): sync(a, b, status, partial_sync='error') def test_partial_sync_ignore(): a = MemoryStorage() b = MemoryStorage() status = {} item0 = Item('UID:0\nhehe') a.upload(item0) b.upload(item0) b.read_only = True item1 = Item('UID:1\nhaha') a.upload(item1) sync(a, b, status, partial_sync='ignore') sync(a, b, status, partial_sync='ignore') assert items(a) == {item0.raw, item1.raw} assert items(b) == {item0.raw} def test_partial_sync_ignore2(): a = MemoryStorage() b = MemoryStorage() status = {} href, etag = a.upload(Item('UID:0')) a.read_only = True sync(a, b, status, partial_sync='ignore', force_delete=True) assert items(b) == items(a) == {'UID:0'} b.items.clear() sync(a, b, status, partial_sync='ignore', force_delete=True) sync(a, b, status, partial_sync='ignore', force_delete=True) assert items(a) == {'UID:0'} assert not b.items a.read_only = False a.update(href, Item('UID:0\nupdated'), etag) a.read_only = True sync(a, b, status, partial_sync='ignore', force_delete=True) assert items(b) == items(a) == {'UID:0\nupdated'} def test_upload_and_update(): a = MemoryStorage(fileext='.a') b = MemoryStorage(fileext='.b') status = {} item = Item(u'UID:1') # new item 1 in a a.upload(item) sync(a, b, status) assert items(b) == items(a) == {item.raw} item = Item(u'UID:1\nASDF:YES') # update of item 1 in b b.update('1.b', item, b.get('1.b')[1]) sync(a, b, status) assert items(b) == items(a) == {item.raw} item2 = Item(u'UID:2') # new item 2 in b b.upload(item2) sync(a, b, status) assert items(b) == items(a) == {item.raw, item2.raw} item2 = Item(u'UID:2\nASDF:YES') # update of item 2 in a a.update('2.a', item2, a.get('2.a')[1]) sync(a, b, status) assert items(b) == items(a) == {item.raw, item2.raw} def test_deletion(): a = MemoryStorage(fileext='.a') b = MemoryStorage(fileext='.b') status = {} item = Item(u'UID:1') a.upload(item) item2 = Item(u'UID:2') a.upload(item2) sync(a, b, status) b.delete('1.b', b.get('1.b')[1]) sync(a, b, status) assert items(a) == items(b) == {item2.raw} a.upload(item) sync(a, b, status) assert items(a) == items(b) == {item.raw, item2.raw} a.delete('1.a', a.get('1.a')[1]) sync(a, b, status) assert items(a) == items(b) == {item2.raw} def test_insert_hash(): a = MemoryStorage() b = MemoryStorage() status = {} item = Item('UID:1') href, etag = a.upload(item) sync(a, b, status) for d in status['1']: del d['hash'] a.update(href, Item('UID:1\nHAHA:YES'), etag) sync(a, b, status) assert 'hash' in status['1'][0] and 'hash' in status['1'][1] def test_already_synced(): a = MemoryStorage(fileext='.a') b = MemoryStorage(fileext='.b') item = Item(u'UID:1') a.upload(item) b.upload(item) status = { '1': ({ 'href': '1.a', 'hash': item.hash, 'etag': a.get('1.a')[1] }, { 'href': '1.b', 'hash': item.hash, 'etag': b.get('1.b')[1] }) } old_status = deepcopy(status) a.update = b.update = a.upload = b.upload = \ lambda *a, **kw: pytest.fail('Method shouldn\'t have been called.') for _ in (1, 2): sync(a, b, status) assert status == old_status assert items(a) == items(b) == {item.raw} @pytest.mark.parametrize('winning_storage', 'ab') def test_conflict_resolution_both_etags_new(winning_storage): a = MemoryStorage() b = MemoryStorage() item = Item(u'UID:1') href_a, etag_a = a.upload(item) href_b, etag_b = b.upload(item) status = {} sync(a, b, status) assert status item_a = Item(u'UID:1\nitem a') item_b = Item(u'UID:1\nitem b') a.update(href_a, item_a, etag_a) b.update(href_b, item_b, etag_b) with pytest.raises(SyncConflict): sync(a, b, status) sync(a, b, status, conflict_resolution='{} wins'.format(winning_storage)) assert items(a) == items(b) == { item_a.raw if winning_storage == 'a' else item_b.raw } def test_updated_and_deleted(): a = MemoryStorage() b = MemoryStorage() href_a, etag_a = a.upload(Item(u'UID:1')) status = {} sync(a, b, status, force_delete=True) (href_b, etag_b), = b.list() b.delete(href_b, etag_b) updated = Item(u'UID:1\nupdated') a.update(href_a, updated, etag_a) sync(a, b, status, force_delete=True) assert items(a) == items(b) == {updated.raw} def test_conflict_resolution_invalid_mode(): a = MemoryStorage() b = MemoryStorage() item_a = Item(u'UID:1\nitem a') item_b = Item(u'UID:1\nitem b') a.upload(item_a) b.upload(item_b) with pytest.raises(ValueError): sync(a, b, {}, conflict_resolution='yolo') def test_conflict_resolution_new_etags_without_changes(): a = MemoryStorage() b = MemoryStorage() item = Item(u'UID:1') href_a, etag_a = a.upload(item) href_b, etag_b = b.upload(item) status = {'1': (href_a, 'BOGUS_a', href_b, 'BOGUS_b')} sync(a, b, status) (ident, (status_a, status_b)), = status.items() assert ident == '1' assert status_a['href'] == href_a assert status_a['etag'] == etag_a assert status_b['href'] == href_b assert status_b['etag'] == etag_b def test_uses_get_multi(monkeypatch): def breakdown(*a, **kw): raise AssertionError('Expected use of get_multi') get_multi_calls = [] old_get = MemoryStorage.get def get_multi(self, hrefs): hrefs = list(hrefs) get_multi_calls.append(hrefs) for href in hrefs: item, etag = old_get(self, href) yield href, item, etag monkeypatch.setattr(MemoryStorage, 'get', breakdown) monkeypatch.setattr(MemoryStorage, 'get_multi', get_multi) a = MemoryStorage() b = MemoryStorage() item = Item(u'UID:1') expected_href, etag = a.upload(item) sync(a, b, {}) assert get_multi_calls == [[expected_href]] def test_empty_storage_dataloss(): a = MemoryStorage() b = MemoryStorage() a.upload(Item(u'UID:1')) a.upload(Item(u'UID:2')) status = {} sync(a, b, status) with pytest.raises(StorageEmpty): sync(MemoryStorage(), b, status) with pytest.raises(StorageEmpty): sync(a, MemoryStorage(), status) def test_no_uids(): a = MemoryStorage() b = MemoryStorage() a.upload(Item(u'ASDF')) b.upload(Item(u'FOOBAR')) status = {} sync(a, b, status) assert items(a) == items(b) == {u'ASDF', u'FOOBAR'} def test_changed_uids(): a = MemoryStorage() b = MemoryStorage() href_a, etag_a = a.upload(Item(u'UID:A-ONE')) href_b, etag_b = b.upload(Item(u'UID:B-ONE')) status = {} sync(a, b, status) a.update(href_a, Item(u'UID:A-TWO'), etag_a) sync(a, b, status) def test_both_readonly(): a = MemoryStorage(read_only=True) b = MemoryStorage(read_only=True) assert a.read_only assert b.read_only status = {} with pytest.raises(BothReadOnly): sync(a, b, status) def test_partial_sync_revert(): a = MemoryStorage(instance_name='a') b = MemoryStorage(instance_name='b') status = {} a.upload(Item(u'UID:1')) b.upload(Item(u'UID:2')) b.read_only = True sync(a, b, status, partial_sync='revert') assert len(status) == 2 assert items(a) == {'UID:1', 'UID:2'} assert items(b) == {'UID:2'} sync(a, b, status, partial_sync='revert') assert len(status) == 1 assert items(a) == {'UID:2'} assert items(b) == {'UID:2'} # Check that updates get reverted a.items[next(iter(a.items))] = ('foo', Item('UID:2\nupdated')) assert items(a) == {'UID:2\nupdated'} sync(a, b, status, partial_sync='revert') assert len(status) == 1 assert items(a) == {'UID:2\nupdated'} sync(a, b, status, partial_sync='revert') assert items(a) == {'UID:2'} # Check that deletions get reverted a.items.clear() sync(a, b, status, partial_sync='revert', force_delete=True) sync(a, b, status, partial_sync='revert', force_delete=True) assert items(a) == {'UID:2'} @pytest.mark.parametrize('sync_inbetween', (True, False)) def test_ident_conflict(sync_inbetween): a = MemoryStorage() b = MemoryStorage() status = {} href_a, etag_a = a.upload(Item(u'UID:aaa')) href_b, etag_b = a.upload(Item(u'UID:bbb')) if sync_inbetween: sync(a, b, status) a.update(href_a, Item(u'UID:xxx'), etag_a) a.update(href_b, Item(u'UID:xxx'), etag_b) with pytest.raises(IdentConflict): sync(a, b, status) def test_moved_href(): ''' Concrete application: ppl_ stores contact aliases in filenames, which means item's hrefs get changed. Vdirsyncer doesn't synchronize this data, but also shouldn't do things like deleting and re-uploading to the server. .. _ppl: http://ppladdressbook.org/ ''' a = MemoryStorage() b = MemoryStorage() status = {} href, etag = a.upload(Item(u'UID:haha')) sync(a, b, status) b.items['lol'] = b.items.pop('haha') # The sync algorithm should prefetch `lol`, see that it's the same ident # and not do anything else. a.get_multi = blow_up # Absolutely no prefetch on A # No actual sync actions a.delete = a.update = a.upload = b.delete = b.update = b.upload = blow_up sync(a, b, status) assert len(status) == 1 assert items(a) == items(b) == {'UID:haha'} assert status['haha'][1]['href'] == 'lol' old_status = deepcopy(status) # Further sync should be a noop. Not even prefetching should occur. b.get_multi = blow_up sync(a, b, status) assert old_status == status assert items(a) == items(b) == {'UID:haha'} def test_bogus_etag_change(): '''Assert that sync algorithm is resilient against etag changes if content didn\'t change. In this particular case we test a scenario where both etags have been updated, but only one side actually changed its item content. ''' a = MemoryStorage() b = MemoryStorage() status = {} href_a, etag_a = a.upload(Item(u'UID:ASDASD')) sync(a, b, status) assert len(status) == len(list(a.list())) == len(list(b.list())) == 1 (href_b, etag_b), = b.list() a.update(href_a, Item(u'UID:ASDASD'), etag_a) b.update(href_b, Item(u'UID:ASDASD\nACTUALCHANGE:YES'), etag_b) b.delete = b.update = b.upload = blow_up sync(a, b, status) assert len(status) == 1 assert items(a) == items(b) == {u'UID:ASDASD\nACTUALCHANGE:YES'} def test_unicode_hrefs(): a = MemoryStorage() b = MemoryStorage() status = {} href, etag = a.upload(Item(u'UID:äää')) sync(a, b, status) class ActionIntentionallyFailed(Exception): pass def action_failure(*a, **kw): raise ActionIntentionallyFailed() class SyncMachine(RuleBasedStateMachine): Status = Bundle('status') Storage = Bundle('storage') @rule(target=Storage, flaky_etags=st.booleans(), null_etag_on_upload=st.booleans()) def newstorage(self, flaky_etags, null_etag_on_upload): s = MemoryStorage() if flaky_etags: def get(href): old_etag, item = s.items[href] etag = _random_string() s.items[href] = etag, item return item, etag s.get = get if null_etag_on_upload: _old_upload = s.upload _old_update = s.update s.upload = lambda item: (_old_upload(item)[0], 'NULL') s.update = lambda h, i, e: _old_update(h, i, e) and 'NULL' return s @rule(s=Storage, read_only=st.booleans()) def is_read_only(self, s, read_only): assume(s.read_only != read_only) s.read_only = read_only @rule(s=Storage) def actions_fail(self, s): s.upload = action_failure s.update = action_failure s.delete = action_failure @rule(s=Storage) def none_as_etag(self, s): _old_upload = s.upload _old_update = s.update def upload(item): return _old_upload(item)[0], None def update(href, item, etag): _old_update(href, item, etag) s.upload = upload s.update = update @rule(target=Status) def newstatus(self): return {} @rule(storage=Storage, uid=uid_strategy, etag=st.text()) def upload(self, storage, uid, etag): item = Item(u'UID:{}'.format(uid)) storage.items[uid] = (etag, item) @rule(storage=Storage, href=st.text()) def delete(self, storage, href): assume(storage.items.pop(href, None)) @rule( status=Status, a=Storage, b=Storage, force_delete=st.booleans(), conflict_resolution=st.one_of((st.just('a wins'), st.just('b wins'))), with_error_callback=st.booleans(), partial_sync=st.one_of(( st.just('ignore'), st.just('revert'), st.just('error') )) ) def sync(self, status, a, b, force_delete, conflict_resolution, with_error_callback, partial_sync): assume(a is not b) old_items_a = items(a) old_items_b = items(b) a.instance_name = 'a' b.instance_name = 'b' errors = [] if with_error_callback: error_callback = errors.append else: error_callback = None try: # If one storage is read-only, double-sync because changes don't # get reverted immediately. for _ in range(2 if a.read_only or b.read_only else 1): sync(a, b, status, force_delete=force_delete, conflict_resolution=conflict_resolution, error_callback=error_callback, partial_sync=partial_sync) for e in errors: raise e except PartialSync: assert partial_sync == 'error' except ActionIntentionallyFailed: pass except BothReadOnly: assert a.read_only and b.read_only assume(False) except StorageEmpty: if force_delete: raise else: assert not list(a.list()) or not list(b.list()) else: items_a = items(a) items_b = items(b) assert items_a == items_b or partial_sync == 'ignore' assert items_a == old_items_a or not a.read_only assert items_b == old_items_b or not b.read_only assert set(a.items) | set(b.items) == set(status) or \ partial_sync == 'ignore' TestSyncMachine = SyncMachine.TestCase @pytest.mark.parametrize('error_callback', [True, False]) def test_rollback(error_callback): a = MemoryStorage() b = MemoryStorage() status = {} a.items['0'] = ('', Item('UID:0')) b.items['1'] = ('', Item('UID:1')) b.upload = b.update = b.delete = action_failure if error_callback: errors = [] sync(a, b, status=status, conflict_resolution='a wins', error_callback=errors.append) assert len(errors) == 1 assert isinstance(errors[0], ActionIntentionallyFailed) assert len(status) == 1 assert status['1'] else: with pytest.raises(ActionIntentionallyFailed): sync(a, b, status=status, conflict_resolution='a wins') def test_duplicate_hrefs(): a = MemoryStorage() b = MemoryStorage() a.list = lambda: [('a', 'a')] * 3 a.items['a'] = ('a', Item('UID:a')) status = {} sync(a, b, status) with pytest.raises(AssertionError): sync(a, b, status) vdirsyncer-0.16.2/tests/unit/cli/0000755000175000017500000000000013147536465020734 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/tests/unit/cli/test_discover.py0000644000175000017500000000606513073721646024165 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import pytest from vdirsyncer.cli.discover import expand_collections missing = object() @pytest.mark.parametrize('shortcuts,expected', [ (['from a'], [ ('c1', ({'type': 'fooboo', 'custom_arg': 'a1', 'collection': 'c1'}, {'type': 'fooboo', 'custom_arg': 'b1', 'collection': 'c1'})), ('c2', ({'type': 'fooboo', 'custom_arg': 'a2', 'collection': 'c2'}, {'type': 'fooboo', 'custom_arg': 'b2', 'collection': 'c2'})), ('a3', ({'type': 'fooboo', 'custom_arg': 'a3', 'collection': 'a3'}, missing)) ]), (['from b'], [ ('c1', ({'type': 'fooboo', 'custom_arg': 'a1', 'collection': 'c1'}, {'type': 'fooboo', 'custom_arg': 'b1', 'collection': 'c1'})), ('c2', ({'type': 'fooboo', 'custom_arg': 'a2', 'collection': 'c2'}, {'type': 'fooboo', 'custom_arg': 'b2', 'collection': 'c2'})), ('b3', (missing, {'type': 'fooboo', 'custom_arg': 'b3', 'collection': 'b3'})) ]), (['from a', 'from b'], [ ('c1', ({'type': 'fooboo', 'custom_arg': 'a1', 'collection': 'c1'}, {'type': 'fooboo', 'custom_arg': 'b1', 'collection': 'c1'})), ('c2', ({'type': 'fooboo', 'custom_arg': 'a2', 'collection': 'c2'}, {'type': 'fooboo', 'custom_arg': 'b2', 'collection': 'c2'})), ('a3', ({'type': 'fooboo', 'custom_arg': 'a3', 'collection': 'a3'}, missing)), ('b3', (missing, {'type': 'fooboo', 'custom_arg': 'b3', 'collection': 'b3'})) ]), ([['c12', 'c1', 'c2']], [ ('c12', ({'type': 'fooboo', 'custom_arg': 'a1', 'collection': 'c1'}, {'type': 'fooboo', 'custom_arg': 'b2', 'collection': 'c2'})), ]), (None, [ (None, ({'type': 'fooboo', 'storage_side': 'a', 'collection': None}, {'type': 'fooboo', 'storage_side': 'b', 'collection': None})) ]), ([None], [ (None, ({'type': 'fooboo', 'storage_side': 'a', 'collection': None}, {'type': 'fooboo', 'storage_side': 'b', 'collection': None})) ]), ]) def test_expand_collections(shortcuts, expected): config_a = { 'type': 'fooboo', 'storage_side': 'a' } config_b = { 'type': 'fooboo', 'storage_side': 'b' } def get_discovered_a(): return { 'c1': {'type': 'fooboo', 'custom_arg': 'a1', 'collection': 'c1'}, 'c2': {'type': 'fooboo', 'custom_arg': 'a2', 'collection': 'c2'}, 'a3': {'type': 'fooboo', 'custom_arg': 'a3', 'collection': 'a3'} } def get_discovered_b(): return { 'c1': {'type': 'fooboo', 'custom_arg': 'b1', 'collection': 'c1'}, 'c2': {'type': 'fooboo', 'custom_arg': 'b2', 'collection': 'c2'}, 'b3': {'type': 'fooboo', 'custom_arg': 'b3', 'collection': 'b3'} } assert sorted(expand_collections( shortcuts, config_a, config_b, get_discovered_a, get_discovered_b, lambda config, collection: missing )) == sorted(expected) vdirsyncer-0.16.2/tests/unit/cli/test_config.py0000644000175000017500000000123413074442004023572 0ustar untitakeruntitaker00000000000000import os from vdirsyncer.cli.config import _resolve_conflict_via_command from vdirsyncer.vobject import Item def test_conflict_resolution_command(): def check_call(command): command, a_tmp, b_tmp = command assert command == os.path.expanduser('~/command') with open(a_tmp) as f: assert f.read() == a.raw with open(b_tmp) as f: assert f.read() == b.raw with open(b_tmp, 'w') as f: f.write(a.raw) a = Item('UID:AAAAAAA') b = Item('UID:BBBBBBB') assert _resolve_conflict_via_command( a, b, ['~/command'], 'a', 'b', _check_call=check_call ).raw == a.raw vdirsyncer-0.16.2/tests/system/0000755000175000017500000000000013147536465020532 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/tests/system/utils/0000755000175000017500000000000013147536465021672 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/tests/system/utils/test_main.py0000644000175000017500000000456113144561565024231 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import logging import click_log import pytest import requests from vdirsyncer import http, utils @pytest.fixture(autouse=True) def no_debug_output(request): logger = click_log.basic_config('vdirsyncer') logger.setLevel(logging.WARNING) def test_get_storage_init_args(): from vdirsyncer.storage.memory import MemoryStorage all, required = utils.get_storage_init_args(MemoryStorage) assert all == set(['fileext', 'collection', 'read_only', 'instance_name']) assert not required def test_request_ssl(httpsserver): httpsserver.serve_content('') # we need to serve something with pytest.raises(requests.exceptions.ConnectionError) as excinfo: http.request('GET', httpsserver.url) assert 'certificate verify failed' in str(excinfo.value) http.request('GET', httpsserver.url, verify=False) def _fingerprints_broken(): from pkg_resources import parse_version as ver broken_urllib3 = ver(requests.__version__) <= ver('2.5.1') return broken_urllib3 @pytest.mark.skipif(_fingerprints_broken(), reason='https://github.com/shazow/urllib3/issues/529') @pytest.mark.parametrize('fingerprint', [ '94:FD:7A:CB:50:75:A4:69:82:0A:F8:23:DF:07:FC:69:3E:CD:90:CA', '19:90:F7:23:94:F2:EF:AB:2B:64:2D:57:3D:25:95:2D' ]) def test_request_ssl_fingerprints(httpsserver, fingerprint): httpsserver.serve_content('') # we need to serve something http.request('GET', httpsserver.url, verify=False, verify_fingerprint=fingerprint) with pytest.raises(requests.exceptions.ConnectionError) as excinfo: http.request('GET', httpsserver.url, verify_fingerprint=fingerprint) with pytest.raises(requests.exceptions.ConnectionError) as excinfo: http.request('GET', httpsserver.url, verify=False, verify_fingerprint=''.join(reversed(fingerprint))) assert 'Fingerprints did not match' in str(excinfo.value) def test_open_graphical_browser(monkeypatch): import webbrowser # Just assert that this internal attribute still exists and is some sort of # collection. iter(webbrowser._tryorder) monkeypatch.setattr('webbrowser._tryorder', []) with pytest.raises(RuntimeError) as excinfo: utils.open_graphical_browser('http://example.com') assert 'No graphical browser found' in str(excinfo.value) vdirsyncer-0.16.2/tests/system/cli/0000755000175000017500000000000013147536465021301 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/tests/system/cli/conftest.py0000644000175000017500000000141313054065744023471 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- from textwrap import dedent from click.testing import CliRunner import pytest import vdirsyncer.cli as cli class _CustomRunner(object): def __init__(self, tmpdir): self.tmpdir = tmpdir self.cfg = tmpdir.join('config') self.runner = CliRunner() def invoke(self, args, env=None, **kwargs): env = env or {} env.setdefault('VDIRSYNCER_CONFIG', str(self.cfg)) return self.runner.invoke(cli.app, args, env=env, **kwargs) def write_with_general(self, data): self.cfg.write(dedent(''' [general] status_path = "{}/status/" ''').format(str(self.tmpdir))) self.cfg.write(data, mode='a') @pytest.fixture def runner(tmpdir): return _CustomRunner(tmpdir) vdirsyncer-0.16.2/tests/system/cli/test_discover.py0000644000175000017500000001210613054065744024522 0ustar untitakeruntitaker00000000000000import json from textwrap import dedent import hypothesis.strategies as st from hypothesis import given from vdirsyncer import exceptions from vdirsyncer.storage.base import Storage def test_discover_command(tmpdir, runner): runner.write_with_general(dedent(''' [storage foo] type = "filesystem" path = "{0}/foo/" fileext = ".txt" [storage bar] type = "filesystem" path = "{0}/bar/" fileext = ".txt" [pair foobar] a = "foo" b = "bar" collections = ["from a"] ''').format(str(tmpdir))) foo = tmpdir.mkdir('foo') bar = tmpdir.mkdir('bar') for x in 'abc': foo.mkdir(x) bar.mkdir(x) bar.mkdir('d') result = runner.invoke(['discover']) assert not result.exception foo.mkdir('d') result = runner.invoke(['sync']) assert not result.exception lines = result.output.splitlines() assert 'Syncing foobar/a' in lines assert 'Syncing foobar/b' in lines assert 'Syncing foobar/c' in lines assert 'Syncing foobar/d' not in result.output result = runner.invoke(['discover']) assert not result.exception result = runner.invoke(['sync']) assert not result.exception assert 'Syncing foobar/a' in lines assert 'Syncing foobar/b' in lines assert 'Syncing foobar/c' in lines assert 'Syncing foobar/d' in result.output # Check for redundant data that is already in the config. This avoids # copying passwords from the config too. assert 'fileext' not in tmpdir \ .join('status') \ .join('foobar.collections') \ .read() def test_discover_different_collection_names(tmpdir, runner): foo = tmpdir.mkdir('foo') bar = tmpdir.mkdir('bar') runner.write_with_general(dedent(''' [storage foo] type = "filesystem" fileext = ".txt" path = "{foo}" [storage bar] type = "filesystem" fileext = ".txt" path = "{bar}" [pair foobar] a = "foo" b = "bar" collections = [ ["coll1", "coll_a1", "coll_b1"], "coll2" ] ''').format(foo=str(foo), bar=str(bar))) result = runner.invoke(['discover'], input='y\n' * 6) assert not result.exception coll_a1 = foo.join('coll_a1') coll_b1 = bar.join('coll_b1') assert coll_a1.exists() assert coll_b1.exists() result = runner.invoke(['sync']) assert not result.exception foo_txt = coll_a1.join('foo.txt') foo_txt.write('BEGIN:VCALENDAR\nUID:foo\nEND:VCALENDAR') result = runner.invoke(['sync']) assert not result.exception assert foo_txt.exists() assert coll_b1.join('foo.txt').exists() def test_discover_direct_path(tmpdir, runner): foo = tmpdir.join('foo') bar = tmpdir.join('bar') runner.write_with_general(dedent(''' [storage foo] type = "filesystem" fileext = ".txt" path = "{foo}" [storage bar] type = "filesystem" fileext = ".txt" path = "{bar}" [pair foobar] a = "foo" b = "bar" collections = null ''').format(foo=str(foo), bar=str(bar))) result = runner.invoke(['discover'], input='y\n' * 2) assert not result.exception result = runner.invoke(['sync']) assert not result.exception assert foo.exists() assert bar.exists() def test_null_collection_with_named_collection(tmpdir, runner): runner.write_with_general(dedent(''' [pair foobar] a = "foo" b = "bar" collections = [["baz", "baz", null]] [storage foo] type = "filesystem" path = "{base}/foo/" fileext = ".txt" [storage bar] type = "singlefile" path = "{base}/bar.txt" '''.format(base=str(tmpdir)))) result = runner.invoke(['discover'], input='y\n' * 2) assert not result.exception foo = tmpdir.join('foo') foobaz = foo.join('baz') assert foo.exists() assert foobaz.exists() bar = tmpdir.join('bar.txt') assert bar.exists() foobaz.join('lol.txt').write('BEGIN:VCARD\nUID:HAHA\nEND:VCARD') result = runner.invoke(['sync']) assert not result.exception assert 'HAHA' in bar.read() @given(a_requires=st.booleans(), b_requires=st.booleans()) def test_collection_required(a_requires, b_requires, tmpdir, runner, monkeypatch): class TestStorage(Storage): storage_name = 'test' def __init__(self, require_collection, **kw): if require_collection: assert not kw.get('collection') raise exceptions.CollectionRequired() from vdirsyncer.cli.utils import storage_names monkeypatch.setitem(storage_names._storages, 'test', TestStorage) runner.write_with_general(dedent(''' [pair foobar] a = "foo" b = "bar" collections = null [storage foo] type = "test" require_collection = {a} [storage bar] type = "test" require_collection = {b} '''.format(a=json.dumps(a_requires), b=json.dumps(b_requires)))) result = runner.invoke(['discover']) if a_requires or b_requires: assert result.exception assert \ 'One or more storages don\'t support `collections = null`.' in \ result.output vdirsyncer-0.16.2/tests/system/cli/__init__.py0000644000175000017500000000000013054065744023372 0ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/tests/system/cli/test_repair.py0000644000175000017500000000377013134636312024167 0ustar untitakeruntitaker00000000000000# encoding: utf-8 from textwrap import dedent import pytest @pytest.fixture def storage(tmpdir, runner): runner.write_with_general(dedent(''' [storage foo] type = "filesystem" path = "{base}/foo/" fileext = ".txt" ''').format(base=str(tmpdir))) return tmpdir.mkdir('foo') @pytest.mark.parametrize('collection', [None, "foocoll"]) def test_basic(storage, runner, collection): if collection is not None: storage = storage.mkdir(collection) collection_arg = 'foo/{}'.format(collection) else: collection_arg = 'foo' argv = ['repair', collection_arg] result = runner.invoke(argv, input='y') assert not result.exception storage.join('item.txt').write('BEGIN:VCARD\nEND:VCARD') storage.join('toobroken.txt').write('') result = runner.invoke(argv, input='y') assert not result.exception assert 'No UID' in result.output assert '\'toobroken.txt\' is malformed beyond repair' \ in result.output new_fname, = [x for x in storage.listdir() if 'toobroken' not in str(x)] assert 'UID:' in new_fname.read() @pytest.mark.parametrize('repair_uids', [None, True, False]) def test_repair_uids(storage, runner, repair_uids): f = storage.join('baduid.txt') orig_f = 'BEGIN:VCARD\nUID:!!!!!\nEND:VCARD' f.write(orig_f) if repair_uids is None: opt = [] elif repair_uids: opt = ['--repair-unsafe-uid'] else: opt = ['--no-repair-unsafe-uid'] result = runner.invoke(['repair'] + opt + ['foo'], input='y') assert not result.exception if repair_uids: assert 'UID or href is unsafe, assigning random UID' in result.output assert not f.exists() new_f, = storage.listdir() s = new_f.read() assert s.startswith('BEGIN:VCARD') assert s.endswith('END:VCARD') assert s != orig_f else: assert 'UID may cause problems, add --repair-unsafe-uid to repair.' \ in result.output assert f.read() == orig_f vdirsyncer-0.16.2/tests/system/cli/test_fetchparams.py0000644000175000017500000001044013054065744025200 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- from textwrap import dedent import hypothesis.strategies as st from hypothesis import given import pytest from vdirsyncer import exceptions from vdirsyncer.cli.fetchparams import STRATEGIES, expand_fetch_params @pytest.fixture def mystrategy(monkeypatch): def strategy(x): calls.append(x) return x calls = [] monkeypatch.setitem(STRATEGIES, 'mystrategy', strategy) return calls @pytest.fixture def value_cache(monkeypatch): _cache = {} class FakeContext(object): fetched_params = _cache def find_object(self, _): return self def get_context(*a, **kw): return FakeContext() monkeypatch.setattr('click.get_current_context', get_context) return _cache def test_get_password_from_command(tmpdir, runner): runner.write_with_general(dedent(''' [pair foobar] a = "foo" b = "bar" collections = ["a", "b", "c"] [storage foo] type = "filesystem" path = "{base}/foo/" fileext.fetch = ["command", "echo", ".txt"] [storage bar] type = "filesystem" path = "{base}/bar/" fileext.fetch = ["prompt", "Fileext for bar"] '''.format(base=str(tmpdir)))) foo = tmpdir.ensure('foo', dir=True) foo.ensure('a', dir=True) foo.ensure('b', dir=True) foo.ensure('c', dir=True) bar = tmpdir.ensure('bar', dir=True) bar.ensure('a', dir=True) bar.ensure('b', dir=True) bar.ensure('c', dir=True) result = runner.invoke(['discover'], input='.asdf\n') assert not result.exception status = tmpdir.join('status').join('foobar.collections').read() assert 'foo' in status assert 'bar' in status assert 'asdf' not in status assert 'txt' not in status foo.join('a').join('foo.txt').write('BEGIN:VCARD\nUID:foo\nEND:VCARD') result = runner.invoke(['sync'], input='.asdf\n') assert not result.exception assert [x.basename for x in bar.join('a').listdir()] == ['foo.asdf'] def test_key_conflict(monkeypatch, mystrategy): with pytest.raises(ValueError) as excinfo: expand_fetch_params({ 'foo': 'bar', 'foo.fetch': ['mystrategy', 'baz'] }) assert 'Can\'t set foo.fetch and foo.' in str(excinfo.value) @given(s=st.text(), t=st.text(min_size=1)) def test_fuzzing(s, t, mystrategy): config = expand_fetch_params({ '{}.fetch'.format(s): ['mystrategy', t] }) assert config[s] == t @pytest.mark.parametrize('value', [ [], 'lol', 42 ]) def test_invalid_fetch_value(mystrategy, value): with pytest.raises(ValueError) as excinfo: expand_fetch_params({ 'foo.fetch': value }) assert 'Expected a list' in str(excinfo.value) or \ 'Expected list of length > 0' in str(excinfo.value) def test_unknown_strategy(): with pytest.raises(exceptions.UserError) as excinfo: expand_fetch_params({ 'foo.fetch': ['unreal', 'asdf'] }) assert 'Unknown strategy' in str(excinfo.value) def test_caching(monkeypatch, mystrategy, value_cache): orig_cfg = {'foo.fetch': ['mystrategy', 'asdf']} rv = expand_fetch_params(orig_cfg) assert rv['foo'] == 'asdf' assert mystrategy == ['asdf'] assert len(value_cache) == 1 rv = expand_fetch_params(orig_cfg) assert rv['foo'] == 'asdf' assert mystrategy == ['asdf'] assert len(value_cache) == 1 value_cache.clear() rv = expand_fetch_params(orig_cfg) assert rv['foo'] == 'asdf' assert mystrategy == ['asdf'] * 2 assert len(value_cache) == 1 def test_failed_strategy(monkeypatch, value_cache): calls = [] def strategy(x): calls.append(x) raise KeyboardInterrupt() monkeypatch.setitem(STRATEGIES, 'mystrategy', strategy) orig_cfg = {'foo.fetch': ['mystrategy', 'asdf']} for _ in range(2): with pytest.raises(KeyboardInterrupt): expand_fetch_params(orig_cfg) assert len(value_cache) == 1 assert len(calls) == 1 def test_empty_value(monkeypatch, mystrategy): with pytest.raises(exceptions.UserError) as excinfo: expand_fetch_params({ 'foo.fetch': ['mystrategy', ''] }) assert 'Empty value for foo.fetch, this most likely indicates an error' \ in str(excinfo.value) vdirsyncer-0.16.2/tests/system/cli/test_sync.py0000644000175000017500000003354213134636312023661 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import json import sys from textwrap import dedent import hypothesis.strategies as st from hypothesis import example, given import pytest def test_simple_run(tmpdir, runner): runner.write_with_general(dedent(''' [pair my_pair] a = "my_a" b = "my_b" collections = null [storage my_a] type = "filesystem" path = "{0}/path_a/" fileext = ".txt" [storage my_b] type = "filesystem" path = "{0}/path_b/" fileext = ".txt" ''').format(str(tmpdir))) tmpdir.mkdir('path_a') tmpdir.mkdir('path_b') result = runner.invoke(['discover']) assert not result.exception result = runner.invoke(['sync']) assert not result.exception tmpdir.join('path_a/haha.txt').write('UID:haha') result = runner.invoke(['sync']) assert 'Copying (uploading) item haha to my_b' in result.output assert tmpdir.join('path_b/haha.txt').read() == 'UID:haha' def test_sync_inexistant_pair(tmpdir, runner): runner.write_with_general("") result = runner.invoke(['sync', 'foo']) assert result.exception assert 'pair foo does not exist.' in result.output.lower() def test_debug_connections(tmpdir, runner): runner.write_with_general(dedent(''' [pair my_pair] a = "my_a" b = "my_b" collections = null [storage my_a] type = "filesystem" path = "{0}/path_a/" fileext = ".txt" [storage my_b] type = "filesystem" path = "{0}/path_b/" fileext = ".txt" ''').format(str(tmpdir))) tmpdir.mkdir('path_a') tmpdir.mkdir('path_b') result = runner.invoke(['discover']) assert not result.exception result = runner.invoke(['-vdebug', 'sync', '--max-workers=3']) assert 'using 3 maximal workers' in result.output.lower() result = runner.invoke(['-vdebug', 'sync']) assert 'using 1 maximal workers' in result.output.lower() def test_empty_storage(tmpdir, runner): runner.write_with_general(dedent(''' [pair my_pair] a = "my_a" b = "my_b" collections = null [storage my_a] type = "filesystem" path = "{0}/path_a/" fileext = ".txt" [storage my_b] type = "filesystem" path = "{0}/path_b/" fileext = ".txt" ''').format(str(tmpdir))) tmpdir.mkdir('path_a') tmpdir.mkdir('path_b') result = runner.invoke(['discover']) assert not result.exception result = runner.invoke(['sync']) assert not result.exception tmpdir.join('path_a/haha.txt').write('UID:haha') result = runner.invoke(['sync']) assert not result.exception tmpdir.join('path_b/haha.txt').remove() result = runner.invoke(['sync']) lines = result.output.splitlines() assert lines[0] == 'Syncing my_pair' assert lines[1].startswith('error: my_pair: ' 'Storage "my_b" was completely emptied.') assert result.exception def test_verbosity(tmpdir, runner): runner.write_with_general('') result = runner.invoke(['--verbosity=HAHA', 'sync']) assert result.exception assert 'invalid value for "--verbosity"' in result.output.lower() def test_collections_cache_invalidation(tmpdir, runner): foo = tmpdir.mkdir('foo') bar = tmpdir.mkdir('bar') for x in 'abc': foo.mkdir(x) bar.mkdir(x) runner.write_with_general(dedent(''' [storage foo] type = "filesystem" path = "{0}/foo/" fileext = ".txt" [storage bar] type = "filesystem" path = "{0}/bar/" fileext = ".txt" [pair foobar] a = "foo" b = "bar" collections = ["a", "b", "c"] ''').format(str(tmpdir))) foo.join('a/itemone.txt').write('UID:itemone') result = runner.invoke(['discover']) assert not result.exception result = runner.invoke(['sync']) assert not result.exception assert 'detected change in config file' not in result.output.lower() rv = bar.join('a').listdir() assert len(rv) == 1 assert rv[0].basename == 'itemone.txt' runner.write_with_general(dedent(''' [storage foo] type = "filesystem" path = "{0}/foo/" fileext = ".txt" [storage bar] type = "filesystem" path = "{0}/bar2/" fileext = ".txt" [pair foobar] a = "foo" b = "bar" collections = ["a", "b", "c"] ''').format(str(tmpdir))) for entry in tmpdir.join('status').listdir(): if not str(entry).endswith('.collections'): entry.remove() bar2 = tmpdir.mkdir('bar2') for x in 'abc': bar2.mkdir(x) result = runner.invoke(['sync']) assert 'detected change in config file' in result.output.lower() assert result.exception result = runner.invoke(['discover']) assert not result.exception result = runner.invoke(['sync']) assert not result.exception rv = bar.join('a').listdir() rv2 = bar2.join('a').listdir() assert len(rv) == len(rv2) == 1 assert rv[0].basename == rv2[0].basename == 'itemone.txt' def test_invalid_pairs_as_cli_arg(tmpdir, runner): runner.write_with_general(dedent(''' [storage foo] type = "filesystem" path = "{0}/foo/" fileext = ".txt" [storage bar] type = "filesystem" path = "{0}/bar/" fileext = ".txt" [pair foobar] a = "foo" b = "bar" collections = ["a", "b", "c"] ''').format(str(tmpdir))) for base in ('foo', 'bar'): base = tmpdir.mkdir(base) for c in 'abc': base.mkdir(c) result = runner.invoke(['discover']) assert not result.exception result = runner.invoke(['sync', 'foobar/d']) assert result.exception assert 'pair foobar: collection "d" not found' in result.output.lower() def test_multiple_pairs(tmpdir, runner): def get_cfg(): for name_a, name_b in ('foo', 'bar'), ('bam', 'baz'): yield dedent(''' [pair {a}{b}] a = "{a}" b = "{b}" collections = null ''').format(a=name_a, b=name_b) for name in name_a, name_b: yield dedent(''' [storage {name}] type = "filesystem" path = "{path}" fileext = ".txt" ''').format(name=name, path=str(tmpdir.mkdir(name))) runner.write_with_general(''.join(get_cfg())) result = runner.invoke(['discover']) assert not result.exception assert set(result.output.splitlines()) > set([ 'Discovering collections for pair bambaz', 'Discovering collections for pair foobar' ]) result = runner.invoke(['sync']) assert not result.exception assert set(result.output.splitlines()) == set([ 'Syncing bambaz', 'Syncing foobar', ]) # XXX: https://github.com/pimutils/vdirsyncer/issues/617 @pytest.mark.skipif(sys.platform == 'darwin', reason='This test inexplicably fails') @given(collections=st.sets( st.text( st.characters( blacklist_characters=set( u'./\x00' # Invalid chars on POSIX filesystems ), # Surrogates can't be encoded to utf-8 in Python blacklist_categories=set(['Cs']) ), min_size=1, max_size=50 ), min_size=1 )) @example(collections=[u'persönlich']) @example(collections={'a', 'A'}) @example(collections={'\ufffe'}) def test_create_collections(subtest, collections): @subtest def test_inner(tmpdir, runner): runner.write_with_general(dedent(''' [pair foobar] a = "foo" b = "bar" collections = {colls} [storage foo] type = "filesystem" path = "{base}/foo/" fileext = ".txt" [storage bar] type = "filesystem" path = "{base}/bar/" fileext = ".txt" '''.format(base=str(tmpdir), colls=json.dumps(list(collections))))) result = runner.invoke( ['discover'], input='y\n' * 2 * (len(collections) + 1) ) assert not result.exception, result.output result = runner.invoke( ['sync'] + ['foobar/' + x for x in collections] ) assert not result.exception, result.output assert set(x.basename for x in tmpdir.join('foo').listdir()) == \ set(x.basename for x in tmpdir.join('bar').listdir()) def test_ident_conflict(tmpdir, runner): runner.write_with_general(dedent(''' [pair foobar] a = "foo" b = "bar" collections = null [storage foo] type = "filesystem" path = "{base}/foo/" fileext = ".txt" [storage bar] type = "filesystem" path = "{base}/bar/" fileext = ".txt" '''.format(base=str(tmpdir)))) foo = tmpdir.mkdir('foo') tmpdir.mkdir('bar') foo.join('one.txt').write('UID:1') foo.join('two.txt').write('UID:1') foo.join('three.txt').write('UID:1') result = runner.invoke(['discover']) assert not result.exception result = runner.invoke(['sync']) assert result.exception assert ('error: foobar: Storage "foo" contains multiple items with the ' 'same UID or even content') in result.output assert sorted([ 'one.txt' in result.output, 'two.txt' in result.output, 'three.txt' in result.output, ]) == [False, True, True] @pytest.mark.parametrize('existing,missing', [ ('foo', 'bar'), ('bar', 'foo'), ]) def test_unknown_storage(tmpdir, runner, existing, missing): runner.write_with_general(dedent(''' [pair foobar] a = "foo" b = "bar" collections = null [storage {existing}] type = "filesystem" path = "{base}/{existing}/" fileext = ".txt" '''.format(base=str(tmpdir), existing=existing))) tmpdir.mkdir(existing) result = runner.invoke(['discover']) assert result.exception assert ( "Storage '{missing}' not found. " "These are the configured storages: ['{existing}']" .format(missing=missing, existing=existing) ) in result.output @pytest.mark.parametrize('cmd', ['sync', 'metasync']) def test_no_configured_pairs(tmpdir, runner, cmd): runner.write_with_general('') result = runner.invoke([cmd]) assert result.output == 'critical: Nothing to do.\n' assert result.exception.code == 5 @pytest.mark.parametrize('resolution,expect_foo,expect_bar', [ (['command', 'cp'], 'UID:lol\nfööcontent', 'UID:lol\nfööcontent') ]) def test_conflict_resolution(tmpdir, runner, resolution, expect_foo, expect_bar): runner.write_with_general(dedent(''' [pair foobar] a = "foo" b = "bar" collections = null conflict_resolution = {val} [storage foo] type = "filesystem" fileext = ".txt" path = "{base}/foo" [storage bar] type = "filesystem" fileext = ".txt" path = "{base}/bar" '''.format(base=str(tmpdir), val=json.dumps(resolution)))) foo = tmpdir.join('foo') bar = tmpdir.join('bar') fooitem = foo.join('lol.txt').ensure() fooitem.write('UID:lol\nfööcontent') baritem = bar.join('lol.txt').ensure() baritem.write('UID:lol\nbööcontent') r = runner.invoke(['discover']) assert not r.exception r = runner.invoke(['sync']) assert not r.exception assert fooitem.read() == expect_foo assert baritem.read() == expect_bar @pytest.mark.parametrize('partial_sync', ['error', 'ignore', 'revert', None]) def test_partial_sync(tmpdir, runner, partial_sync): runner.write_with_general(dedent(''' [pair foobar] a = "foo" b = "bar" collections = null {partial_sync} [storage foo] type = "filesystem" fileext = ".txt" path = "{base}/foo" [storage bar] type = "filesystem" read_only = true fileext = ".txt" path = "{base}/bar" '''.format( partial_sync=('partial_sync = "{}"\n'.format(partial_sync) if partial_sync else ''), base=str(tmpdir) ))) foo = tmpdir.mkdir('foo') bar = tmpdir.mkdir('bar') foo.join('other.txt').write('UID:other') bar.join('other.txt').write('UID:other') baritem = bar.join('lol.txt') baritem.write('UID:lol') r = runner.invoke(['discover']) assert not r.exception r = runner.invoke(['sync']) assert not r.exception fooitem = foo.join('lol.txt') fooitem.remove() r = runner.invoke(['sync']) if partial_sync == 'error': assert r.exception assert 'Attempted change' in r.output elif partial_sync == 'ignore': assert baritem.exists() r = runner.invoke(['sync']) assert not r.exception assert baritem.exists() else: assert baritem.exists() r = runner.invoke(['sync']) assert not r.exception assert baritem.exists() assert fooitem.exists() def test_fetch_only_necessary_params(tmpdir, runner): fetched_file = tmpdir.join('fetched_flag') fetch_script = tmpdir.join('fetch_script') fetch_script.write(dedent(''' set -e touch "{}" echo ".txt" '''.format(str(fetched_file)))) runner.write_with_general(dedent(''' [pair foobar] a = "foo" b = "bar" collections = null [pair bambar] a = "bam" b = "bar" collections = null [storage foo] type = "filesystem" path = "{path}" fileext = ".txt" [storage bar] type = "filesystem" path = "{path}" fileext = ".txt" [storage bam] type = "filesystem" path = "{path}" fileext.fetch = ["command", "sh", "{script}"] '''.format(path=str(tmpdir.mkdir('bogus')), script=str(fetch_script)))) def fetched(): try: fetched_file.remove() return True except Exception: return False r = runner.invoke(['discover']) assert not r.exception assert fetched() r = runner.invoke(['sync', 'foobar']) assert not r.exception assert not fetched() r = runner.invoke(['sync']) assert not r.exception assert fetched() r = runner.invoke(['sync', 'bambar']) assert not r.exception assert fetched() vdirsyncer-0.16.2/tests/system/cli/test_config.py0000644000175000017500000001137213057746063024160 0ustar untitakeruntitaker00000000000000import io from textwrap import dedent import pytest from vdirsyncer import cli, exceptions from vdirsyncer.cli.config import Config invalid = object() @pytest.fixture def read_config(tmpdir, monkeypatch): def inner(cfg): errors = [] monkeypatch.setattr('vdirsyncer.cli.cli_logger.error', errors.append) f = io.StringIO(dedent(cfg.format(base=str(tmpdir)))) rv = Config.from_fileobject(f) monkeypatch.undo() return errors, rv return inner def test_read_config(read_config): errors, c = read_config(u''' [general] status_path = "/tmp/status/" [pair bob] a = "bob_a" b = "bob_b" collections = null [storage bob_a] type = "filesystem" path = "/tmp/contacts/" fileext = ".vcf" yesno = false number = 42 [storage bob_b] type = "carddav" ''') assert c.general == {'status_path': '/tmp/status/'} assert set(c.pairs) == {'bob'} bob = c.pairs['bob'] assert bob.collections is None assert c.storages == { 'bob_a': {'type': 'filesystem', 'path': '/tmp/contacts/', 'fileext': '.vcf', 'yesno': False, 'number': 42, 'instance_name': 'bob_a'}, 'bob_b': {'type': 'carddav', 'instance_name': 'bob_b'} } def test_missing_collections_param(read_config): with pytest.raises(exceptions.UserError) as excinfo: read_config(u''' [general] status_path = "/tmp/status/" [pair bob] a = "bob_a" b = "bob_b" [storage bob_a] type = "lmao" [storage bob_b] type = "lmao" ''') assert 'collections parameter missing' in str(excinfo.value) def test_invalid_section_type(read_config): with pytest.raises(exceptions.UserError) as excinfo: read_config(u''' [general] status_path = "/tmp/status/" [bogus] ''') assert 'Unknown section' in str(excinfo.value) assert 'bogus' in str(excinfo.value) def test_missing_general_section(read_config): with pytest.raises(exceptions.UserError) as excinfo: read_config(u''' [pair my_pair] a = "my_a" b = "my_b" collections = null [storage my_a] type = "filesystem" path = "{base}/path_a/" fileext = ".txt" [storage my_b] type = "filesystem" path = "{base}/path_b/" fileext = ".txt" ''') assert 'Invalid general section.' in str(excinfo.value) def test_wrong_general_section(read_config): with pytest.raises(exceptions.UserError) as excinfo: read_config(u''' [general] wrong = true ''') assert 'Invalid general section.' in str(excinfo.value) assert excinfo.value.problems == [ 'general section doesn\'t take the parameters: wrong', 'general section is missing the parameters: status_path' ] def test_invalid_storage_name(read_config): with pytest.raises(exceptions.UserError) as excinfo: read_config(u''' [general] status_path = "{base}/status/" [storage foo.bar] ''') assert 'invalid characters' in str(excinfo.value).lower() def test_invalid_collections_arg(read_config): with pytest.raises(exceptions.UserError) as excinfo: read_config(u''' [general] status_path = "/tmp/status/" [pair foobar] a = "foo" b = "bar" collections = [null] [storage foo] type = "filesystem" path = "/tmp/foo/" fileext = ".txt" [storage bar] type = "filesystem" path = "/tmp/bar/" fileext = ".txt" ''') assert 'Expected string' in str(excinfo.value) def test_duplicate_sections(read_config): with pytest.raises(exceptions.UserError) as excinfo: read_config(u''' [general] status_path = "/tmp/status/" [pair foobar] a = "foobar" b = "bar" collections = null [storage foobar] type = "filesystem" path = "/tmp/foo/" fileext = ".txt" [storage bar] type = "filesystem" path = "/tmp/bar/" fileext = ".txt" ''') assert 'Name "foobar" already used' in str(excinfo.value) def test_validate_collections_param(): x = cli.config._validate_collections_param x(None) x(["c", "a", "b"]) pytest.raises(ValueError, x, [None]) pytest.raises(ValueError, x, ["a", "a", "a"]) pytest.raises(ValueError, x, [[None, "a", "b"]]) x([["c", None, "b"]]) x([["c", "a", None]]) x([["c", None, None]]) vdirsyncer-0.16.2/tests/system/cli/test_utils.py0000644000175000017500000000133413057746063024050 0ustar untitakeruntitaker00000000000000from vdirsyncer import exceptions from vdirsyncer.cli.utils import handle_cli_error, \ storage_instance_from_config, storage_names def test_handle_cli_error(capsys): try: raise exceptions.InvalidResponse('ayy lmao') except BaseException: handle_cli_error() out, err = capsys.readouterr() assert 'returned something vdirsyncer doesn\'t understand' in err assert 'ayy lmao' in err def test_storage_instance_from_config(monkeypatch): def lol(**kw): assert kw == {'foo': 'bar', 'baz': 1} return 'OK' monkeypatch.setitem(storage_names._storages, 'lol', lol) config = {'type': 'lol', 'foo': 'bar', 'baz': 1} assert storage_instance_from_config(config) == 'OK' vdirsyncer-0.16.2/tests/__init__.py0000644000175000017500000000404013134636312021301 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- ''' Test suite for vdirsyncer. ''' import hypothesis.strategies as st from vdirsyncer.vobject import normalize_item def blow_up(*a, **kw): raise AssertionError('Did not expect to be called.') def assert_item_equals(a, b): assert normalize_item(a) == normalize_item(b) VCARD_TEMPLATE = u'''BEGIN:VCARD VERSION:3.0 FN:Cyrus Daboo N:Daboo;Cyrus;;; ADR;TYPE=POSTAL:;2822 Email HQ;Suite 2821;RFCVille;PA;15213;USA EMAIL;TYPE=PREF:cyrus@example.com NICKNAME:me NOTE:Example VCard. ORG:Self Employed TEL;TYPE=VOICE:412 605 0499 TEL;TYPE=FAX:412 605 0705 URL;VALUE=URI:http://www.example.com X-SOMETHING:{r} UID:{uid} END:VCARD''' TASK_TEMPLATE = u'''BEGIN:VCALENDAR VERSION:2.0 PRODID:-//dmfs.org//mimedir.icalendar//EN BEGIN:VTODO CREATED:20130721T142233Z DTSTAMP:20130730T074543Z LAST-MODIFIED;VALUE=DATE-TIME:20140122T151338Z SEQUENCE:2 SUMMARY:Book: Kowlani - Tödlicher Staub X-SOMETHING:{r} UID:{uid} END:VTODO END:VCALENDAR''' BARE_EVENT_TEMPLATE = u'''BEGIN:VEVENT DTSTART:19970714T170000Z DTEND:19970715T035959Z SUMMARY:Bastille Day Party X-SOMETHING:{r} UID:{uid} END:VEVENT''' EVENT_TEMPLATE = u'''BEGIN:VCALENDAR VERSION:2.0 PRODID:-//hacksw/handcal//NONSGML v1.0//EN ''' + BARE_EVENT_TEMPLATE + u''' END:VCALENDAR''' EVENT_WITH_TIMEZONE_TEMPLATE = '''BEGIN:VCALENDAR BEGIN:VTIMEZONE TZID:Europe/Rome X-LIC-LOCATION:Europe/Rome BEGIN:DAYLIGHT TZOFFSETFROM:+0100 TZOFFSETTO:+0200 TZNAME:CEST DTSTART:19700329T020000 RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=3 END:DAYLIGHT BEGIN:STANDARD TZOFFSETFROM:+0200 TZOFFSETTO:+0100 TZNAME:CET DTSTART:19701025T030000 RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10 END:STANDARD END:VTIMEZONE ''' + BARE_EVENT_TEMPLATE + ''' END:VCALENDAR''' SIMPLE_TEMPLATE = u'''BEGIN:FOO UID:{uid} X-SOMETHING:{r} HAHA:YES END:FOO''' printable_characters_strategy = st.text( st.characters(blacklist_categories=( 'Cc', 'Cs' )) ) uid_strategy = st.text( st.characters(blacklist_categories=( 'Zs', 'Zl', 'Zp', 'Cc', 'Cs' )), min_size=1 ).filter(lambda x: x.strip() == x) vdirsyncer-0.16.2/vdirsyncer.egg-info/0000755000175000017500000000000013147536465021726 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/vdirsyncer.egg-info/SOURCES.txt0000644000175000017500000000544213147536465023617 0ustar untitakeruntitaker00000000000000.codecov.yml .coveragerc .gitignore .gitmodules .travis.yml AUTHORS.rst CHANGELOG.rst CODE_OF_CONDUCT.rst CONTRIBUTING.rst ISSUE_TEMPLATE.md LICENSE MANIFEST.in Makefile README.rst config.example docs-requirements.txt setup.cfg setup.py test-requirements.txt docs/Makefile docs/changelog.rst docs/conf.py docs/config.rst docs/contact.rst docs/contributing.rst docs/donations.rst docs/index.rst docs/installation.rst docs/keyring.rst docs/license.rst docs/make.bat docs/packaging.rst docs/partial-sync.rst docs/problems.rst docs/ssl-tutorial.rst docs/tutorial.rst docs/vdir.rst docs/when.rst docs/_static/.gitkeep docs/tutorials/baikal.rst docs/tutorials/claws-mail.rst docs/tutorials/davmail.rst docs/tutorials/fastmail.rst docs/tutorials/google.rst docs/tutorials/icloud.rst docs/tutorials/index.rst docs/tutorials/nextcloud.rst docs/tutorials/owncloud.rst docs/tutorials/radicale.rst docs/tutorials/systemd-timer.rst docs/tutorials/todoman.rst docs/tutorials/xandikos.rst tests/__init__.py tests/conftest.py tests/storage/__init__.py tests/storage/conftest.py tests/storage/test_filesystem.py tests/storage/test_http.py tests/storage/test_http_with_singlefile.py tests/storage/test_memory.py tests/storage/test_singlefile.py tests/storage/dav/__init__.py tests/storage/dav/test_caldav.py tests/storage/dav/test_carddav.py tests/storage/dav/test_main.py tests/storage/servers/radicale/__init__.py tests/storage/servers/radicale/install.sh tests/storage/servers/skip/__init__.py tests/storage/servers/skip/install.sh tests/system/cli/__init__.py tests/system/cli/conftest.py tests/system/cli/test_config.py tests/system/cli/test_discover.py tests/system/cli/test_fetchparams.py tests/system/cli/test_repair.py tests/system/cli/test_sync.py tests/system/cli/test_utils.py tests/system/utils/test_main.py tests/unit/test_exceptions.py tests/unit/test_metasync.py tests/unit/test_repair.py tests/unit/test_sync.py tests/unit/cli/test_config.py tests/unit/cli/test_discover.py tests/unit/utils/test_vobject.py vdirsyncer/__init__.py vdirsyncer/__main__.py vdirsyncer/exceptions.py vdirsyncer/http.py vdirsyncer/metasync.py vdirsyncer/repair.py vdirsyncer/sync.py vdirsyncer/utils.py vdirsyncer/version.py vdirsyncer/vobject.py vdirsyncer.egg-info/PKG-INFO vdirsyncer.egg-info/SOURCES.txt vdirsyncer.egg-info/dependency_links.txt vdirsyncer.egg-info/entry_points.txt vdirsyncer.egg-info/requires.txt vdirsyncer.egg-info/top_level.txt vdirsyncer/cli/__init__.py vdirsyncer/cli/config.py vdirsyncer/cli/discover.py vdirsyncer/cli/fetchparams.py vdirsyncer/cli/tasks.py vdirsyncer/cli/utils.py vdirsyncer/storage/__init__.py vdirsyncer/storage/base.py vdirsyncer/storage/dav.py vdirsyncer/storage/etesync.py vdirsyncer/storage/filesystem.py vdirsyncer/storage/google.py vdirsyncer/storage/http.py vdirsyncer/storage/memory.py vdirsyncer/storage/singlefile.pyvdirsyncer-0.16.2/vdirsyncer.egg-info/top_level.txt0000644000175000017500000000001313147536465024452 0ustar untitakeruntitaker00000000000000vdirsyncer vdirsyncer-0.16.2/vdirsyncer.egg-info/entry_points.txt0000644000175000017500000000006413147536465025224 0ustar untitakeruntitaker00000000000000[console_scripts] vdirsyncer = vdirsyncer.cli:main vdirsyncer-0.16.2/vdirsyncer.egg-info/requires.txt0000644000175000017500000000026013147536465024324 0ustar untitakeruntitaker00000000000000click>=5.0 click-log>=0.2.0, <0.3.0 click-threading>=0.2 requests >=2.4.1, !=2.9.0 requests_toolbelt >=0.4.0 atomicwrites>=0.1.7 [etesync] etesync [google] requests-oauthlib vdirsyncer-0.16.2/vdirsyncer.egg-info/PKG-INFO0000644000175000017500000000523013147536465023023 0ustar untitakeruntitaker00000000000000Metadata-Version: 1.1 Name: vdirsyncer Version: 0.16.2 Summary: Synchronize calendars and contacts Home-page: https://github.com/pimutils/vdirsyncer Author: Markus Unterwaditzer Author-email: markus@unterwaditzer.net License: BSD Description: ========== vdirsyncer ========== - `Documentation `_ - `Source code `_ Vdirsyncer synchronizes your calendars and addressbooks between two storages_. The most popular purpose is to synchronize a CalDAV/CardDAV server with a local folder or file. The local data can then be accessed via a variety of programs_, none of which have to know or worry about syncing to a server. .. _storages: https://vdirsyncer.pimutils.org/en/latest/config.html#storages .. _programs: https://vdirsyncer.pimutils.org/en/latest/tutorials/ It aims to be for CalDAV and CardDAV what `OfflineIMAP `_ is for IMAP. .. image:: https://travis-ci.org/pimutils/vdirsyncer.svg?branch=master :target: https://travis-ci.org/pimutils/vdirsyncer .. image:: https://codecov.io/github/pimutils/vdirsyncer/coverage.svg?branch=master :target: https://codecov.io/github/pimutils/vdirsyncer?branch=master .. image:: https://badge.waffle.io/pimutils/vdirsyncer.svg?label=ready&title=Ready :target: https://waffle.io/pimutils/vdirsyncer Links of interest ================= * Check out `the tutorial `_ for basic usage. * `Contact information `_ * `How to contribute to this project `_ * `Donations `_ License ======= Licensed under the 3-clause BSD license, see ``LICENSE``. Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Environment :: Console Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: POSIX Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Topic :: Internet Classifier: Topic :: Utilities vdirsyncer-0.16.2/vdirsyncer.egg-info/dependency_links.txt0000644000175000017500000000000113147536465025774 0ustar untitakeruntitaker00000000000000 vdirsyncer-0.16.2/test-requirements.txt0000644000175000017500000000007513013735125022271 0ustar untitakeruntitaker00000000000000hypothesis>=3.1 pytest pytest-localserver pytest-subtesthack vdirsyncer-0.16.2/README.rst0000644000175000017500000000310613126035226017515 0ustar untitakeruntitaker00000000000000========== vdirsyncer ========== - `Documentation `_ - `Source code `_ Vdirsyncer synchronizes your calendars and addressbooks between two storages_. The most popular purpose is to synchronize a CalDAV/CardDAV server with a local folder or file. The local data can then be accessed via a variety of programs_, none of which have to know or worry about syncing to a server. .. _storages: https://vdirsyncer.pimutils.org/en/latest/config.html#storages .. _programs: https://vdirsyncer.pimutils.org/en/latest/tutorials/ It aims to be for CalDAV and CardDAV what `OfflineIMAP `_ is for IMAP. .. image:: https://travis-ci.org/pimutils/vdirsyncer.svg?branch=master :target: https://travis-ci.org/pimutils/vdirsyncer .. image:: https://codecov.io/github/pimutils/vdirsyncer/coverage.svg?branch=master :target: https://codecov.io/github/pimutils/vdirsyncer?branch=master .. image:: https://badge.waffle.io/pimutils/vdirsyncer.svg?label=ready&title=Ready :target: https://waffle.io/pimutils/vdirsyncer Links of interest ================= * Check out `the tutorial `_ for basic usage. * `Contact information `_ * `How to contribute to this project `_ * `Donations `_ License ======= Licensed under the 3-clause BSD license, see ``LICENSE``. vdirsyncer-0.16.2/vdirsyncer/0000755000175000017500000000000013147536465020234 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/vdirsyncer/sync.py0000644000175000017500000006002513144561565021561 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- ''' The `sync` function in `vdirsyncer.sync` can be called on two instances of `Storage` to synchronize them. Apart from the defined errors, this is the only public API of this module. The algorithm is based on the blogpost "How OfflineIMAP works" by Edward Z. Yang: http://blog.ezyang.com/2012/08/how-offlineimap-works/ Some modifications to it are explained in https://unterwaditzer.net/2016/sync-algorithm.html ''' import abc import contextlib import itertools import logging import sqlite3 import sys from . import exceptions from .utils import uniq sync_logger = logging.getLogger(__name__) @contextlib.contextmanager def _exclusive_transaction(conn): c = None try: c = conn.execute('BEGIN EXCLUSIVE TRANSACTION') yield c c.execute('COMMIT') except BaseException: if c is None: raise _, e, tb = sys.exc_info() c.execute('ROLLBACK') raise e.with_traceback(tb) class SyncError(exceptions.Error): '''Errors related to synchronization.''' class SyncConflict(SyncError): ''' Two items changed since the last sync, they now have different contents and no conflict resolution method was given. :param ident: The ident of the item. :param href_a: The item's href on side A. :param href_b: The item's href on side B. ''' ident = None href_a = None href_b = None class IdentConflict(SyncError): ''' Multiple items on the same storage have the same UID. :param storage: The affected storage. :param hrefs: List of affected hrefs on `storage`. ''' storage = None _hrefs = None @property def hrefs(self): return self._hrefs @hrefs.setter def hrefs(self, val): new_val = set(val) assert len(new_val) > 1, val self._hrefs = new_val class StorageEmpty(SyncError): ''' One storage unexpectedly got completely empty between two synchronizations. The first argument is the empty storage. :param empty_storage: The empty :py:class:`vdirsyncer.storage.base.Storage`. ''' empty_storage = None class BothReadOnly(SyncError): ''' Both storages are marked as read-only. Synchronization is therefore not possible. ''' class PartialSync(SyncError): ''' Attempted change on read-only storage. ''' storage = None class _IdentAlreadyExists(SyncError): '''Like IdentConflict, but for internal state. If this bubbles up, we don't have a data race, but a bug.''' old_href = None new_href = None def to_ident_conflict(self, storage): return IdentConflict(storage=storage, hrefs=[self.old_href, self.new_href]) class _StatusBase(metaclass=abc.ABCMeta): @abc.abstractmethod def transaction(self): raise NotImplementedError() @abc.abstractmethod def insert_ident_a(self, ident, props): raise NotImplementedError() @abc.abstractmethod def insert_ident_b(self, ident, props): raise NotImplementedError() @abc.abstractmethod def update_ident_a(self, ident, props): raise NotImplementedError() @abc.abstractmethod def update_ident_b(self, ident, props): raise NotImplementedError() @abc.abstractmethod def remove_ident(self, ident): raise NotImplementedError() @abc.abstractmethod def get_a(self, ident): raise NotImplementedError() @abc.abstractmethod def get_b(self, ident): raise NotImplementedError() @abc.abstractmethod def get_new_a(self, ident): raise NotImplementedError() @abc.abstractmethod def get_new_b(self, ident): raise NotImplementedError() @abc.abstractmethod def iter_old(self): raise NotImplementedError() @abc.abstractmethod def iter_new(self): raise NotImplementedError() @abc.abstractmethod def get_by_href_a(self, href, default=(None, None)): raise NotImplementedError() @abc.abstractmethod def get_by_href_b(self, href, default=(None, None)): raise NotImplementedError() @abc.abstractmethod def rollback(self, ident): raise NotImplementedError() class SqliteStatus(_StatusBase): SCHEMA_VERSION = 1 def __init__(self, path): self._path = path self._c = sqlite3.connect(path) self._c.isolation_level = None # turn off idiocy of DB-API self._c.row_factory = sqlite3.Row self._update_schema() def load_legacy_status(self, status): for ident, metadata in status.items(): if len(metadata) == 4: href_a, etag_a, href_b, etag_b = metadata params = (ident, href_a, 'UNDEFINED', etag_a, href_b, 'UNDEFINED', etag_b) else: a, b = metadata params = (ident, a.get('href'), a.get('hash', 'UNDEFINED'), a.get('etag'), b.get('href'), b.get('hash', 'UNDEFINED'), b.get('etag')) self._c.execute( 'INSERT INTO status' ' (ident, href_a, hash_a, etag_a,' ' href_b, hash_b, etag_b)' ' VALUES (?, ?, ?, ?, ?, ?, ?)', params ) def to_legacy_status(self): for ident in self.iter_old(): a = self.get_a(ident) b = self.get_b(ident) yield ident, (a.to_status(), b.to_status()) def _update_schema(self): if self._is_latest_version(): return # If we ever bump the schema version, we will need a way to migrate # data. with _exclusive_transaction(self._c) as c: c.execute('CREATE TABLE meta ( "version" INTEGER PRIMARY KEY )') c.execute('INSERT INTO meta (version) VALUES (?)', (self.SCHEMA_VERSION,)) # I know that this is a bad schema, but right there is just too # little gain in deduplicating the .._a and .._b columns. c.execute('''CREATE TABLE status ( "ident" TEXT PRIMARY KEY NOT NULL, "href_a" TEXT, "href_b" TEXT, "hash_a" TEXT NOT NULL, "hash_b" TEXT NOT NULL, "etag_a" TEXT, "etag_b" TEXT ); ''') c.execute('CREATE UNIQUE INDEX by_href_a ON status(href_a)') c.execute('CREATE UNIQUE INDEX by_href_b ON status(href_b)') # We cannot add NOT NULL here because data is first fetched for the # storage a, then storage b. Inbetween the `_b`-columns are filled # with NULL. # # In an ideal world we would be able to start a transaction with # one cursor, write our new data into status and simultaneously # query the old status data using a different cursor. # Unfortunately sqlite enforces NOT NULL constraints immediately, # not just at commit. Since there is also no way to alter # constraints on a table (disable constraints on start of # transaction and reenable on end), it's a separate table now that # just gets copied over before we commit. That's a lot of copying, # sadly. c.execute('''CREATE TABLE new_status ( "ident" TEXT PRIMARY KEY NOT NULL, "href_a" TEXT, "href_b" TEXT, "hash_a" TEXT, "hash_b" TEXT, "etag_a" TEXT, "etag_b" TEXT ); ''') def _is_latest_version(self): try: return bool(self._c.execute( 'SELECT version FROM meta WHERE version = ?', (self.SCHEMA_VERSION,) ).fetchone()) except sqlite3.OperationalError: return False @contextlib.contextmanager def transaction(self): old_c = self._c try: with _exclusive_transaction(self._c) as new_c: self._c = new_c yield self._c.execute('DELETE FROM status') self._c.execute('INSERT INTO status ' 'SELECT * FROM new_status') self._c.execute('DELETE FROM new_status') finally: self._c = old_c def insert_ident_a(self, ident, a_props): # FIXME: Super inefficient old_props = self.get_new_a(ident) if old_props is not None: raise _IdentAlreadyExists(old_href=old_props.href, new_href=a_props.href) b_props = self.get_new_b(ident) or _ItemMetadata() self._c.execute( 'INSERT OR REPLACE INTO new_status ' 'VALUES(?, ?, ?, ?, ?, ?, ?)', (ident, a_props.href, b_props.href, a_props.hash, b_props.hash, a_props.etag, b_props.etag) ) def insert_ident_b(self, ident, b_props): # FIXME: Super inefficient old_props = self.get_new_b(ident) if old_props is not None: raise _IdentAlreadyExists(old_href=old_props.href, new_href=b_props.href) a_props = self.get_new_a(ident) or _ItemMetadata() self._c.execute( 'INSERT OR REPLACE INTO new_status ' 'VALUES(?, ?, ?, ?, ?, ?, ?)', (ident, a_props.href, b_props.href, a_props.hash, b_props.hash, a_props.etag, b_props.etag) ) def update_ident_a(self, ident, props): self._c.execute( 'UPDATE new_status' ' SET href_a=?, hash_a=?, etag_a=?' ' WHERE ident=?', (props.href, props.hash, props.etag, ident) ) assert self._c.rowcount > 0 def update_ident_b(self, ident, props): self._c.execute( 'UPDATE new_status' ' SET href_b=?, hash_b=?, etag_b=?' ' WHERE ident=?', (props.href, props.hash, props.etag, ident) ) assert self._c.rowcount > 0 def remove_ident(self, ident): self._c.execute('DELETE FROM new_status WHERE ident=?', (ident,)) def _get_impl(self, ident, side, table): res = self._c.execute('SELECT href_{side} AS href,' ' hash_{side} AS hash,' ' etag_{side} AS etag ' 'FROM {table} WHERE ident=?' .format(side=side, table=table), (ident,)).fetchone() if res is None: return None if res['hash'] is None: # FIXME: Implement as constraint in db assert res['href'] is None assert res['etag'] is None return None res = dict(res) return _ItemMetadata(**res) def get_a(self, ident): return self._get_impl(ident, side='a', table='status') def get_b(self, ident): return self._get_impl(ident, side='b', table='status') def get_new_a(self, ident): return self._get_impl(ident, side='a', table='new_status') def get_new_b(self, ident): return self._get_impl(ident, side='b', table='new_status') def iter_old(self): return iter(res['ident'] for res in self._c.execute('SELECT ident FROM status').fetchall()) def iter_new(self): return iter(res['ident'] for res in self._c.execute('SELECT ident FROM new_status').fetchall()) def rollback(self, ident): a = self.get_a(ident) b = self.get_b(ident) assert (a is None) == (b is None) if a is None and b is None: self.remove_ident(ident) return self._c.execute( 'INSERT OR REPLACE INTO new_status' ' VALUES (?, ?, ?, ?, ?, ?, ?)', (ident, a.href, b.href, a.hash, b.hash, a.etag, b.etag) ) def _get_by_href_impl(self, href, default=(None, None), side=None): res = self._c.execute( 'SELECT ident, hash_{side} AS hash, etag_{side} AS etag ' 'FROM status WHERE href_{side}=?'.format(side=side), (href,)).fetchone() if not res: return default return res['ident'], _ItemMetadata( href=href, hash=res['hash'], etag=res['etag'], ) def get_by_href_a(self, *a, **kw): kw['side'] = 'a' return self._get_by_href_impl(*a, **kw) def get_by_href_b(self, *a, **kw): kw['side'] = 'b' return self._get_by_href_impl(*a, **kw) class _SubStatus(object): def __init__(self, parent, side): self.parent = parent assert side in 'ab' self.remove_ident = parent.remove_ident if side == 'a': self.insert_ident = parent.insert_ident_a self.update_ident = parent.update_ident_a self.get = parent.get_a self.get_new = parent.get_new_a self.get_by_href = parent.get_by_href_a else: self.insert_ident = parent.insert_ident_b self.update_ident = parent.update_ident_b self.get = parent.get_b self.get_new = parent.get_new_b self.get_by_href = parent.get_by_href_b class _ItemMetadata: href = None hash = None etag = None def __init__(self, **kwargs): for k, v in kwargs.items(): assert hasattr(self, k) setattr(self, k, v) def to_status(self): return { 'href': self.href, 'etag': self.etag, 'hash': self.hash } class _StorageInfo(object): '''A wrapper class that holds prefetched items, the status and other things.''' def __init__(self, storage, status): self.storage = storage self.status = status self._item_cache = {} def prepare_new_status(self): storage_nonempty = False prefetch = [] def _store_props(ident, props): try: self.status.insert_ident(ident, props) except _IdentAlreadyExists as e: raise e.to_ident_conflict(self.storage) for href, etag in self.storage.list(): storage_nonempty = True ident, meta = self.status.get_by_href(href) if meta is None or meta.href != href or meta.etag != etag: # Either the item is completely new, or updated # In both cases we should prefetch prefetch.append(href) else: # Metadata is completely identical _store_props(ident, meta) # Prefetch items for href, item, etag in (self.storage.get_multi(prefetch) if prefetch else ()): _store_props(item.ident, _ItemMetadata( href=href, hash=item.hash, etag=etag )) self.set_item_cache(item.ident, item) return storage_nonempty def is_changed(self, ident): old_meta = self.status.get(ident) if old_meta is None: # new item return True new_meta = self.status.get_new(ident) return ( new_meta.etag != old_meta.etag and # etag changed # item actually changed (old_meta.hash is None or new_meta.hash != old_meta.hash) ) def set_item_cache(self, ident, item): actual_hash = self.status.get_new(ident).hash assert actual_hash == item.hash self._item_cache[ident] = item def get_item_cache(self, ident): return self._item_cache[ident] def sync(storage_a, storage_b, status, conflict_resolution=None, force_delete=False, error_callback=None, partial_sync='revert'): '''Synchronizes two storages. :param storage_a: The first storage :type storage_a: :class:`vdirsyncer.storage.base.Storage` :param storage_b: The second storage :type storage_b: :class:`vdirsyncer.storage.base.Storage` :param status: {ident: (href_a, etag_a, href_b, etag_b)} metadata about the two storages for detection of changes. Will be modified by the function and should be passed to it at the next sync. If this is the first sync, an empty dictionary should be provided. :param conflict_resolution: A function that, given two conflicting item versions A and B, returns a new item with conflicts resolved. The UID must be the same. The strings `"a wins"` and `"b wins"` are also accepted to mean that side's version will always be taken. If none is provided, the sync function will raise :py:exc:`SyncConflict`. :param force_delete: When one storage got completely emptied between two syncs, :py:exc:`StorageEmpty` is raised for safety. Setting this parameter to ``True`` disables this safety measure. :param error_callback: Instead of raising errors when executing actions, call the given function with an `Exception` as the only argument. :param partial_sync: What to do when doing sync actions on read-only storages. - ``error``: Raise an error. - ``ignore``: Those actions are simply skipped. - ``revert`` (default): Revert changes on other side. ''' if storage_a.read_only and storage_b.read_only: raise BothReadOnly() if conflict_resolution == 'a wins': conflict_resolution = lambda a, b: a elif conflict_resolution == 'b wins': conflict_resolution = lambda a, b: b status_nonempty = bool(next(status.iter_old(), None)) with status.transaction(): a_info = _StorageInfo(storage_a, _SubStatus(status, 'a')) b_info = _StorageInfo(storage_b, _SubStatus(status, 'b')) a_nonempty = a_info.prepare_new_status() b_nonempty = b_info.prepare_new_status() if status_nonempty and not force_delete: if a_nonempty and not b_nonempty: raise StorageEmpty(empty_storage=storage_b) elif not a_nonempty and b_nonempty: raise StorageEmpty(empty_storage=storage_a) actions = list(_get_actions(a_info, b_info)) with storage_a.at_once(), storage_b.at_once(): for action in actions: try: action.run( a_info, b_info, conflict_resolution, partial_sync ) except Exception as e: if error_callback: error_callback(e) else: raise class Action: def _run_impl(self, a, b): # pragma: no cover raise NotImplementedError() def run(self, a, b, conflict_resolution, partial_sync): with self.auto_rollback(a, b): if self.dest.storage.read_only: if partial_sync == 'error': raise PartialSync(self.dest.storage) elif partial_sync == 'ignore': self.rollback(a, b) return else: assert partial_sync == 'revert' self._run_impl(a, b) @contextlib.contextmanager def auto_rollback(self, a, b): try: yield except BaseException as e: self.rollback(a, b) raise e def rollback(self, a, b): a.status.parent.rollback(self.ident) class Upload(Action): def __init__(self, item, dest): self.item = item self.ident = item.ident self.dest = dest def _run_impl(self, a, b): if self.dest.storage.read_only: href = etag = None else: sync_logger.info(u'Copying (uploading) item {} to {}' .format(self.ident, self.dest.storage)) href, etag = self.dest.storage.upload(self.item) assert href is not None self.dest.status.insert_ident(self.ident, _ItemMetadata( href=href, hash=self.item.hash, etag=etag )) class Update(Action): def __init__(self, item, dest): self.item = item self.ident = item.ident self.dest = dest def _run_impl(self, a, b): if self.dest.storage.read_only: meta = _ItemMetadata(hash=self.item.hash) else: sync_logger.info(u'Copying (updating) item {} to {}' .format(self.ident, self.dest.storage)) meta = self.dest.status.get_new(self.ident) meta.etag = \ self.dest.storage.update(meta.href, self.item, meta.etag) self.dest.status.update_ident(self.ident, meta) class Delete(Action): def __init__(self, ident, dest): self.ident = ident self.dest = dest def _run_impl(self, a, b): meta = self.dest.status.get_new(self.ident) if not self.dest.storage.read_only: sync_logger.info(u'Deleting item {} from {}' .format(self.ident, self.dest.storage)) self.dest.storage.delete(meta.href, meta.etag) self.dest.status.remove_ident(self.ident) class ResolveConflict(Action): def __init__(self, ident): self.ident = ident def run(self, a, b, conflict_resolution, partial_sync): with self.auto_rollback(a, b): sync_logger.info(u'Doing conflict resolution for item {}...' .format(self.ident)) meta_a = a.status.get_new(self.ident) meta_b = b.status.get_new(self.ident) if meta_a.hash == meta_b.hash: sync_logger.info(u'...same content on both sides.') elif conflict_resolution is None: raise SyncConflict(ident=self.ident, href_a=meta_a.href, href_b=meta_b.href) elif callable(conflict_resolution): item_a = a.get_item_cache(self.ident) item_b = b.get_item_cache(self.ident) new_item = conflict_resolution(item_a, item_b) if new_item.hash != meta_a.hash: Update(new_item, a).run(a, b, conflict_resolution, partial_sync) if new_item.hash != meta_b.hash: Update(new_item, b).run(a, b, conflict_resolution, partial_sync) else: raise exceptions.UserError( 'Invalid conflict resolution mode: {!r}' .format(conflict_resolution)) def _get_actions(a_info, b_info): for ident in uniq(itertools.chain(a_info.status.parent.iter_new(), a_info.status.parent.iter_old())): a = a_info.status.get_new(ident) b = b_info.status.get_new(ident) if a and b: a_changed = a_info.is_changed(ident) b_changed = b_info.is_changed(ident) if a_changed and b_changed: # item was modified on both sides # OR: missing status yield ResolveConflict(ident) elif a_changed and not b_changed: # item was only modified in a yield Update(a_info.get_item_cache(ident), b_info) elif not a_changed and b_changed: # item was only modified in b yield Update(b_info.get_item_cache(ident), a_info) elif a and not b: if a_info.is_changed(ident): # was deleted from b but modified on a # OR: new item was created in a yield Upload(a_info.get_item_cache(ident), b_info) else: # was deleted from b and not modified on a yield Delete(ident, a_info) elif not a and b: if b_info.is_changed(ident): # was deleted from a but modified on b # OR: new item was created in b yield Upload(b_info.get_item_cache(ident), a_info) else: # was deleted from a and not changed on b yield Delete(ident, b_info) vdirsyncer-0.16.2/vdirsyncer/http.py0000644000175000017500000001606413121521602021547 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import logging import requests from .utils import expand_path from . import DOCS_HOME, exceptions, __version__ logger = logging.getLogger(__name__) USERAGENT = 'vdirsyncer/{}'.format(__version__) def _detect_faulty_requests(): # pragma: no cover text = ( 'Error during import: {e}\n\n' 'If you have installed vdirsyncer from a distro package, please file ' 'a bug against that package, not vdirsyncer.\n\n' 'Consult {d}/problems.html#requests-related-importerrors' '-based-distributions on how to work around this.' ) try: from requests_toolbelt.auth.guess import GuessAuth # noqa except ImportError as e: import sys print(text.format(e=str(e), d=DOCS_HOME), file=sys.stderr) sys.exit(1) _detect_faulty_requests() del _detect_faulty_requests def prepare_auth(auth, username, password): if username and password: if auth == 'basic' or auth is None: return (username, password) elif auth == 'digest': from requests.auth import HTTPDigestAuth return HTTPDigestAuth(username, password) elif auth == 'guess': try: from requests_toolbelt.auth.guess import GuessAuth except ImportError: raise exceptions.UserError( 'Your version of requests_toolbelt is too ' 'old for `guess` authentication. At least ' 'version 0.4.0 is required.' ) else: return GuessAuth(username, password) else: raise exceptions.UserError('Unknown authentication method: {}' .format(auth)) elif auth: raise exceptions.UserError('You need to specify username and password ' 'for {} authentication.'.format(auth)) else: return None def prepare_verify(verify, verify_fingerprint): if isinstance(verify, (str, bytes)): verify = expand_path(verify) elif not isinstance(verify, bool): raise exceptions.UserError('Invalid value for verify ({}), ' 'must be a path to a PEM-file or boolean.' .format(verify)) if verify_fingerprint is not None: if not isinstance(verify_fingerprint, (bytes, str)): raise exceptions.UserError('Invalid value for verify_fingerprint ' '({}), must be a string or null.' .format(verify_fingerprint)) elif not verify: raise exceptions.UserError( 'Disabling all SSL validation is forbidden. Consider setting ' 'verify_fingerprint if you have a broken or self-signed cert.' ) return { 'verify': verify, 'verify_fingerprint': verify_fingerprint, } def prepare_client_cert(cert): if isinstance(cert, (str, bytes)): cert = expand_path(cert) elif isinstance(cert, list): cert = tuple(map(prepare_client_cert, cert)) return cert HTTP_STORAGE_PARAMETERS = ''' :param username: Username for authentication. :param password: Password for authentication. :param verify: Verify SSL certificate, default True. This can also be a local path to a self-signed SSL certificate. See :ref:`ssl-tutorial` for more information. :param verify_fingerprint: Optional. SHA1 or MD5 fingerprint of the expected server certificate. See :ref:`ssl-tutorial` for more information. :param auth: Optional. Either ``basic``, ``digest`` or ``guess``. The default is preemptive Basic auth, sending credentials even if server didn't request them. This saves from an additional roundtrip per request. Consider setting ``guess`` if this causes issues with your server. :param auth_cert: Optional. Either a path to a certificate with a client certificate and the key or a list of paths to the files with them. :param useragent: Default ``vdirsyncer``. ''' def _install_fingerprint_adapter(session, fingerprint): prefix = 'https://' try: from requests_toolbelt.adapters.fingerprint import \ FingerprintAdapter except ImportError: raise RuntimeError('`verify_fingerprint` can only be used with ' 'requests-toolbelt versions >= 0.4.0') if not isinstance(session.adapters[prefix], FingerprintAdapter): fingerprint_adapter = FingerprintAdapter(fingerprint) session.mount(prefix, fingerprint_adapter) def request(method, url, session=None, latin1_fallback=True, verify_fingerprint=None, **kwargs): ''' Wrapper method for requests, to ease logging and mocking. Parameters should be the same as for ``requests.request``, except: :param session: A requests session object to use. :param verify_fingerprint: Optional. SHA1 or MD5 fingerprint of the expected server certificate. :param latin1_fallback: RFC-2616 specifies the default Content-Type of text/* to be latin1, which is not always correct, but exactly what requests is doing. Setting this parameter to False will use charset autodetection (usually ending up with utf8) instead of plainly falling back to this silly default. See https://github.com/kennethreitz/requests/issues/2042 ''' if session is None: session = requests.Session() if verify_fingerprint is not None: _install_fingerprint_adapter(session, verify_fingerprint) session.hooks = dict(response=_fix_redirects) func = session.request logger.debug(u'{} {}'.format(method, url)) logger.debug(kwargs.get('headers', {})) logger.debug(kwargs.get('data', None)) logger.debug('Sending request...') assert isinstance(kwargs.get('data', b''), bytes) r = func(method, url, **kwargs) # See https://github.com/kennethreitz/requests/issues/2042 content_type = r.headers.get('Content-Type', '') if not latin1_fallback and \ 'charset' not in content_type and \ content_type.startswith('text/'): logger.debug('Removing latin1 fallback') r.encoding = None logger.debug(r.status_code) logger.debug(r.headers) logger.debug(r.content) if r.status_code == 412: raise exceptions.PreconditionFailed(r.reason) if r.status_code in (404, 410): raise exceptions.NotFoundError(r.reason) r.raise_for_status() return r def _fix_redirects(r, *args, **kwargs): ''' Requests discards of the body content when it is following a redirect that is not a 307 or 308. We never want that to happen. See: https://github.com/kennethreitz/requests/issues/3915 https://github.com/pimutils/vdirsyncer/pull/585 https://github.com/pimutils/vdirsyncer/issues/586 FIXME: This solution isn't very nice. A new hook in requests would be better. ''' if r.is_redirect: logger.debug('Rewriting status code from %s to 307', r.status_code) r.status_code = 307 vdirsyncer-0.16.2/vdirsyncer/__main__.py0000644000175000017500000000011013057746063022313 0ustar untitakeruntitaker00000000000000if __name__ == '__main__': from vdirsyncer.cli import app app() vdirsyncer-0.16.2/vdirsyncer/metasync.py0000644000175000017500000000331613051102032022401 0ustar untitakeruntitaker00000000000000import logging from . import exceptions from .storage.base import normalize_meta_value logger = logging.getLogger(__name__) class MetaSyncError(exceptions.Error): pass class MetaSyncConflict(MetaSyncError): key = None def metasync(storage_a, storage_b, status, keys, conflict_resolution=None): def _a_to_b(): logger.info(u'Copying {} to {}'.format(key, storage_b)) storage_b.set_meta(key, a) status[key] = a def _b_to_a(): logger.info(u'Copying {} to {}'.format(key, storage_a)) storage_a.set_meta(key, b) status[key] = b def _resolve_conflict(): if a == b: status[key] = a elif conflict_resolution == 'a wins': _a_to_b() elif conflict_resolution == 'b wins': _b_to_a() else: if callable(conflict_resolution): logger.warning('Custom commands don\'t work on metasync.') elif conflict_resolution is not None: raise exceptions.UserError( 'Invalid conflict resolution setting.' ) raise MetaSyncConflict(key) for key in keys: a = storage_a.get_meta(key) b = storage_b.get_meta(key) s = normalize_meta_value(status.get(key)) logger.debug(u'Key: {}'.format(key)) logger.debug(u'A: {}'.format(a)) logger.debug(u'B: {}'.format(b)) logger.debug(u'S: {}'.format(s)) if a != s and b != s: _resolve_conflict() elif a != s and b == s: _a_to_b() elif a == s and b != s: _b_to_a() else: assert a == b for key in set(status) - set(keys): del status[key] vdirsyncer-0.16.2/vdirsyncer/exceptions.py0000644000175000017500000000351413134636312022756 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- ''' Contains exception classes used by vdirsyncer. Not all exceptions are here, only the most commonly used ones. ''' class Error(Exception): '''Baseclass for all errors.''' def __init__(self, *args, **kwargs): for key, value in kwargs.items(): if getattr(self, key, object()) is not None: # pragma: no cover raise TypeError('Invalid argument: {}'.format(key)) setattr(self, key, value) super(Error, self).__init__(*args) class UserError(Error, ValueError): '''Wrapper exception to be used to signify the traceback should not be shown to the user.''' problems = None def __str__(self): msg = Error.__str__(self) for problem in self.problems or (): msg += u'\n - {}'.format(problem) return msg class CollectionNotFound(Error): '''Collection not found''' class PairNotFound(Error): '''Pair not found''' pair_name = None class PreconditionFailed(Error): ''' - The item doesn't exist although it should - The item exists although it shouldn't - The etags don't match. Due to CalDAV we can't actually say which error it is. This error may indicate race conditions. ''' class NotFoundError(PreconditionFailed): '''Item not found''' class AlreadyExistingError(PreconditionFailed): '''Item already exists.''' existing_href = None class WrongEtagError(PreconditionFailed): '''Wrong etag''' class ReadOnlyError(Error): '''Storage is read-only.''' class InvalidResponse(Error, ValueError): '''The backend returned an invalid result.''' class UnsupportedMetadataError(Error, NotImplementedError): '''The storage doesn't support this type of metadata.''' class CollectionRequired(Error): '''`collection = null` is not allowed.''' vdirsyncer-0.16.2/vdirsyncer/storage/0000755000175000017500000000000013147536465021700 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/vdirsyncer/storage/google.py0000644000175000017500000001443713121521602023512 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import json import logging import os import urllib.parse as urlparse from atomicwrites import atomic_write import click from click_threading import get_ui_worker from . import base, dav from .. import exceptions from ..utils import checkdir, expand_path, open_graphical_browser logger = logging.getLogger(__name__) TOKEN_URL = 'https://accounts.google.com/o/oauth2/v2/auth' REFRESH_URL = 'https://www.googleapis.com/oauth2/v4/token' try: from requests_oauthlib import OAuth2Session have_oauth2 = True except ImportError: have_oauth2 = False class GoogleSession(dav.DAVSession): def __init__(self, token_file, client_id, client_secret, url=None): # Required for discovering collections if url is not None: self.url = url self.useragent = client_id self._settings = {} if not have_oauth2: raise exceptions.UserError('requests-oauthlib not installed') token_file = expand_path(token_file) ui_worker = get_ui_worker() f = lambda: self._init_token(token_file, client_id, client_secret) ui_worker.put(f) def _init_token(self, token_file, client_id, client_secret): token = None try: with open(token_file) as f: token = json.load(f) except (OSError, IOError): pass except ValueError as e: raise exceptions.UserError( 'Failed to load token file {}, try deleting it. ' 'Original error: {}'.format(token_file, e) ) def _save_token(token): checkdir(expand_path(os.path.dirname(token_file)), create=True) with atomic_write(token_file, mode='w', overwrite=True) as f: json.dump(token, f) self._session = OAuth2Session( client_id=client_id, token=token, redirect_uri='urn:ietf:wg:oauth:2.0:oob', scope=self.scope, auto_refresh_url=REFRESH_URL, auto_refresh_kwargs={ 'client_id': client_id, 'client_secret': client_secret, }, token_updater=_save_token ) if not token: authorization_url, state = self._session.authorization_url( TOKEN_URL, # access_type and approval_prompt are Google specific # extra parameters. access_type='offline', approval_prompt='force') click.echo('Opening {} ...'.format(authorization_url)) try: open_graphical_browser(authorization_url) except Exception as e: logger.warning(str(e)) click.echo("Follow the instructions on the page.") code = click.prompt("Paste obtained code") token = self._session.fetch_token( REFRESH_URL, code=code, # Google specific extra parameter used for client # authentication client_secret=client_secret, ) # FIXME: Ugly _save_token(token) GOOGLE_PARAMS_DOCS = ''' :param token_file: A filepath where access tokens are stored. :param client_id/client_secret: OAuth credentials, obtained from the Google API Manager. ''' class GoogleCalendarStorage(dav.CalDAVStorage): __doc__ = '''Google calendar. Please refer to :storage:`caldav` regarding the ``item_types`` and timerange parameters. ''' + GOOGLE_PARAMS_DOCS class session_class(GoogleSession): url = 'https://apidata.googleusercontent.com/caldav/v2/' scope = ['https://www.googleapis.com/auth/calendar'] class discovery_class(dav.CalDiscover): @staticmethod def _get_collection_from_url(url): # Google CalDAV has collection URLs like: # /user/foouser/calendars/foocalendar/events/ parts = url.rstrip('/').split('/') parts.pop() collection = parts.pop() return urlparse.unquote(collection) storage_name = 'google_calendar' def __init__(self, token_file, client_id, client_secret, start_date=None, end_date=None, item_types=(), **kwargs): if not kwargs.get('collection'): raise exceptions.CollectionRequired() super(GoogleCalendarStorage, self).__init__( token_file=token_file, client_id=client_id, client_secret=client_secret, start_date=start_date, end_date=end_date, item_types=item_types, **kwargs ) # This is ugly: We define/override the entire signature computed for the # docs here because the current way we autogenerate those docs are too # simple for our advanced argspec juggling in `vdirsyncer.storage.dav`. __init__._traverse_superclass = base.Storage class GoogleContactsStorage(dav.CardDAVStorage): __doc__ = '''Google contacts. ''' + GOOGLE_PARAMS_DOCS class session_class(GoogleSession): # Google CardDAV is completely bonkers. Collection discovery doesn't # work properly, well-known URI takes us directly to single collection # from where we can't discover principal or homeset URIs (the PROPFINDs # 404). # # So we configure the well-known URI here again, such that discovery # tries collection enumeration on it directly. That appears to work. url = 'https://www.googleapis.com/.well-known/carddav' scope = ['https://www.googleapis.com/auth/carddav'] class discovery_class(dav.CardDAVStorage.discovery_class): # Google CardDAV doesn't return any resourcetype prop. _resourcetype = None storage_name = 'google_contacts' def __init__(self, token_file, client_id, client_secret, **kwargs): if not kwargs.get('collection'): raise exceptions.CollectionRequired() super(GoogleContactsStorage, self).__init__( token_file=token_file, client_id=client_id, client_secret=client_secret, **kwargs ) # This is ugly: We define/override the entire signature computed for the # docs here because the current way we autogenerate those docs are too # simple for our advanced argspec juggling in `vdirsyncer.storage.dav`. __init__._traverse_superclass = base.Storage vdirsyncer-0.16.2/vdirsyncer/storage/http.py0000644000175000017500000000617213074442004023217 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import urllib.parse as urlparse from .base import Storage from .. import exceptions from ..http import HTTP_STORAGE_PARAMETERS, USERAGENT, prepare_auth, \ prepare_client_cert, prepare_verify, request from ..vobject import Item, split_collection class HttpStorage(Storage): __doc__ = ''' Use a simple ``.ics`` file (or similar) from the web. ``webcal://``-calendars are supposed to be used with this, but you have to replace ``webcal://`` with ``http://``, or better, ``https://``. Too many WebCAL providers generate UIDs of all ``VEVENT``-components on-the-fly, i.e. all UIDs change every time the calendar is downloaded. This leads many synchronization programs to believe that all events have been deleted and new ones created, and accordingly causes a lot of unnecessary uploads and deletions on the other side. Vdirsyncer completely ignores UIDs coming from :storage:`http` and will replace them with a hash of the normalized item content. :param url: URL to the ``.ics`` file. ''' + HTTP_STORAGE_PARAMETERS + ''' A simple example:: [pair holidays] a = holidays_local b = holidays_remote collections = null [storage holidays_local] type = "filesystem" path = ~/.config/vdir/calendars/holidays/ fileext = .ics [storage holidays_remote] type = "http" url = https://example.com/holidays_from_hicksville.ics ''' storage_name = 'http' read_only = True _repr_attributes = ('username', 'url') _items = None # Required for tests. _ignore_uids = True def __init__(self, url, username='', password='', verify=True, auth=None, useragent=USERAGENT, verify_fingerprint=None, auth_cert=None, **kwargs): super(HttpStorage, self).__init__(**kwargs) self._settings = { 'auth': prepare_auth(auth, username, password), 'cert': prepare_client_cert(auth_cert), 'latin1_fallback': False, } self._settings.update(prepare_verify(verify, verify_fingerprint)) self.username, self.password = username, password self.useragent = useragent collection = kwargs.get('collection') if collection is not None: url = urlparse.urljoin(url, collection) self.url = url self.parsed_url = urlparse.urlparse(self.url) def _default_headers(self): return {'User-Agent': self.useragent} def list(self): r = request('GET', self.url, headers=self._default_headers(), **self._settings) self._items = {} for item in split_collection(r.text): item = Item(item) if self._ignore_uids: item = item.with_uid(item.hash) self._items[item.ident] = item, item.hash return ((href, etag) for href, (item, etag) in self._items.items()) def get(self, href): if self._items is None: self.list() try: return self._items[href] except KeyError: raise exceptions.NotFoundError(href) vdirsyncer-0.16.2/vdirsyncer/storage/__init__.py0000644000175000017500000000045613013735125024000 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- ''' There are storage classes which control the access to one vdir-collection and offer basic CRUD-ish methods for modifying those collections. The exact interface is described in `vdirsyncer.storage.base`, the `Storage` class should be a superclass of all storage classes. ''' vdirsyncer-0.16.2/vdirsyncer/storage/etesync.py0000644000175000017500000001742213121521602023705 0ustar untitakeruntitaker00000000000000import contextlib import functools import logging import os import binascii import atomicwrites import click try: import etesync import etesync.exceptions from etesync import AddressBook, Contact, Calendar, Event has_etesync = True except ImportError: has_etesync = False AddressBook = Contact = Calendar = Event = None from .. import exceptions from ..cli.utils import assert_permissions from ..utils import checkdir from ..vobject import Item from .base import Storage logger = logging.getLogger(__name__) def _writing_op(f): @functools.wraps(f) def inner(self, *args, **kwargs): if not self._at_once: self._sync_journal() rv = f(self, *args, **kwargs) if not self._at_once: self._sync_journal() return rv return inner class _Session: def __init__(self, email, secrets_dir, server_url=None, db_path=None): if not has_etesync: raise exceptions.UserError('Dependencies for etesync are not ' 'installed.') server_url = server_url or etesync.API_URL self.email = email self.secrets_dir = os.path.join(secrets_dir, email + '/') self._auth_token_path = os.path.join(self.secrets_dir, 'auth_token') self._key_path = os.path.join(self.secrets_dir, 'key') auth_token = self._get_auth_token() if not auth_token: password = click.prompt('Enter service password for {}' .format(self.email), hide_input=True) auth_token = etesync.Authenticator(server_url) \ .get_auth_token(self.email, password) self._set_auth_token(auth_token) self._db_path = db_path or os.path.join(self.secrets_dir, 'db.sqlite') self.etesync = etesync.EteSync(email, auth_token, remote=server_url, db_path=self._db_path) key = self._get_key() if not key: password = click.prompt('Enter key password', hide_input=True) click.echo('Deriving key for {}'.format(self.email)) self.etesync.derive_key(password) self._set_key(self.etesync.cipher_key) else: self.etesync.cipher_key = key def _get_auth_token(self): try: with open(self._auth_token_path) as f: return f.read().strip() or None except (OSError, IOError): pass def _set_auth_token(self, token): checkdir(os.path.dirname(self._auth_token_path), create=True) with atomicwrites.atomic_write(self._auth_token_path) as f: f.write(token) assert_permissions(self._auth_token_path, 0o600) def _get_key(self): try: with open(self._key_path, 'rb') as f: return f.read() except (OSError, IOError): pass def _set_key(self, content): checkdir(os.path.dirname(self._key_path), create=True) with atomicwrites.atomic_write(self._key_path, mode='wb') as f: f.write(content) assert_permissions(self._key_path, 0o600) class EtesyncStorage(Storage): ''' :param email: The email address of your account. :param secrets_dir: A directory where vdirsyncer can store the encryption key and authentication token. :param server_url: Optional. URL to the root of your custom server. :param db_path: Optional. Use a different path for the database. ''' _collection_type = None _item_type = None _at_once = False def __init__(self, email, secrets_dir, server_url=None, db_path=None, **kwargs): if kwargs.get('collection', None) is None: raise ValueError('Collection argument required') self._session = _Session(email, secrets_dir, server_url, db_path) super(EtesyncStorage, self).__init__(**kwargs) self._journal = self._session.etesync.get(self.collection) def _sync_journal(self): self._session.etesync.sync_journal(self.collection) @classmethod def discover(cls, email, secrets_dir, server_url=None, db_path=None, **kwargs): if kwargs.get('collection', None) is not None: raise TypeError('collection argument must not be given.') session = _Session(email, secrets_dir, server_url, db_path) assert cls._collection_type session.etesync.sync_journal_list() for entry in session.etesync.list(): if isinstance(entry.collection, cls._collection_type): yield dict( email=email, secrets_dir=secrets_dir, db_path=db_path, collection=entry.uid, **kwargs ) else: logger.debug('Skipping collection: {!r}'.format(entry)) @classmethod def create_collection(cls, collection, email, secrets_dir, server_url=None, db_path=None, **kwargs): session = _Session(email, secrets_dir, server_url, db_path) content = {'displayName': collection} c = cls._collection_type.create( session.etesync, binascii.hexlify(os.urandom(32)).decode(), content ) c.save() session.etesync.sync_journal_list() return dict( collection=c.journal.uid, email=email, secrets_dir=secrets_dir, db_path=db_path, server_url=server_url, **kwargs ) def list(self): self._sync_journal() for entry in self._journal.collection.list(): item = Item(entry.content) yield str(entry.uid), item.hash def get(self, href): try: item = Item(self._journal.collection.get(href).content) except etesync.exceptions.DoesNotExist as e: raise exceptions.NotFoundError(e) return item, item.hash @_writing_op def upload(self, item): try: entry = self._item_type.create(self._journal.collection, item.raw) entry.save() except etesync.exceptions.DoesNotExist as e: raise exceptions.NotFoundError(e) except etesync.exceptions.AlreadyExists as e: raise exceptions.AlreadyExistingError(e) return item.uid, item.hash @_writing_op def update(self, href, item, etag): try: entry = self._journal.collection.get(href) except etesync.exceptions.DoesNotExist as e: raise exceptions.NotFoundError(e) old_item = Item(entry.content) if old_item.hash != etag: raise exceptions.WrongEtagError(etag, old_item.hash) entry.content = item.raw entry.save() return item.hash @_writing_op def delete(self, href, etag): try: entry = self._journal.collection.get(href) old_item = Item(entry.content) if old_item.hash != etag: raise exceptions.WrongEtagError(etag, old_item.hash) entry.delete() except etesync.exceptions.DoesNotExist as e: raise exceptions.NotFoundError(e) @contextlib.contextmanager def at_once(self): self._sync_journal() self._at_once = True try: yield self self._sync_journal() finally: self._at_once = False class EtesyncContacts(EtesyncStorage): __doc__ = ''' Contacts for EteSync. ''' + EtesyncStorage.__doc__ _collection_type = AddressBook _item_type = Contact storage_name = 'etesync_contacts' class EtesyncCalendars(EtesyncStorage): __doc__ = ''' Calendars for EteSync. ''' + EtesyncStorage.__doc__ _collection_type = Calendar _item_type = Event storage_name = 'etesync_calendars' vdirsyncer-0.16.2/vdirsyncer/storage/singlefile.py0000644000175000017500000001540613121521602024354 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import collections import contextlib import functools import glob import logging import os from atomicwrites import atomic_write from .base import Storage from .. import exceptions from ..utils import checkfile, expand_path, get_etag_from_file from ..vobject import Item, join_collection, split_collection logger = logging.getLogger(__name__) def _writing_op(f): @functools.wraps(f) def inner(self, *args, **kwargs): if self._items is None or not self._at_once: self.list() rv = f(self, *args, **kwargs) if not self._at_once: self._write() return rv return inner class SingleFileStorage(Storage): '''Save data in single local ``.vcf`` or ``.ics`` file. The storage basically guesses how items should be joined in the file. .. versionadded:: 0.1.6 .. note:: This storage is very slow, and that is unlikely to change. You should consider using :storage:`filesystem` if it fits your usecase. :param path: The filepath to the file to be written to. If collections are used, this should contain ``%s`` as a placeholder for the collection name. :param encoding: Which encoding the file should use. Defaults to UTF-8. Example for syncing with :storage:`caldav`:: [pair my_calendar] a = my_calendar_local b = my_calendar_remote collections = ["from a", "from b"] [storage my_calendar_local] type = "singlefile" path = ~/.calendars/%s.ics [storage my_calendar_remote] type = "caldav" url = https://caldav.example.org/ #username = #password = Example for syncing with :storage:`caldav` using a ``null`` collection:: [pair my_calendar] a = my_calendar_local b = my_calendar_remote [storage my_calendar_local] type = "singlefile" path = ~/my_calendar.ics [storage my_calendar_remote] type = "caldav" url = https://caldav.example.org/username/my_calendar/ #username = #password = ''' storage_name = 'singlefile' _repr_attributes = ('path',) _write_mode = 'wb' _append_mode = 'ab' _read_mode = 'rb' _items = None _last_etag = None def __init__(self, path, encoding='utf-8', **kwargs): super(SingleFileStorage, self).__init__(**kwargs) path = os.path.abspath(expand_path(path)) checkfile(path, create=False) self.path = path self.encoding = encoding self._at_once = False @classmethod def discover(cls, path, **kwargs): if kwargs.pop('collection', None) is not None: raise TypeError('collection argument must not be given.') path = os.path.abspath(expand_path(path)) try: path_glob = path % '*' except TypeError: # If not exactly one '%s' is present, we cannot discover # collections because we wouldn't know which name to assign. raise NotImplementedError() placeholder_pos = path.index('%s') for subpath in glob.iglob(path_glob): if os.path.isfile(subpath): args = dict(kwargs) args['path'] = subpath collection_end = ( placeholder_pos + 2 + # length of '%s' len(subpath) - len(path) ) collection = subpath[placeholder_pos:collection_end] args['collection'] = collection yield args @classmethod def create_collection(cls, collection, **kwargs): path = os.path.abspath(expand_path(kwargs['path'])) if collection is not None: try: path = path % (collection,) except TypeError: raise ValueError('Exactly one %s required in path ' 'if collection is not null.') checkfile(path, create=True) kwargs['path'] = path kwargs['collection'] = collection return kwargs def list(self): self._items = collections.OrderedDict() try: self._last_etag = get_etag_from_file(self.path) with open(self.path, self._read_mode) as f: text = f.read().decode(self.encoding) except OSError as e: import errno if e.errno != errno.ENOENT: # file not found raise IOError(e) text = None if not text: return () for item in split_collection(text): item = Item(item) etag = item.hash self._items[item.ident] = item, etag return ((href, etag) for href, (item, etag) in self._items.items()) def get(self, href): if self._items is None or not self._at_once: self.list() try: return self._items[href] except KeyError: raise exceptions.NotFoundError(href) @_writing_op def upload(self, item): href = item.ident if href in self._items: raise exceptions.AlreadyExistingError(existing_href=href) self._items[href] = item, item.hash return href, item.hash @_writing_op def update(self, href, item, etag): if href not in self._items: raise exceptions.NotFoundError(href) _, actual_etag = self._items[href] if etag != actual_etag: raise exceptions.WrongEtagError(etag, actual_etag) self._items[href] = item, item.hash return item.hash @_writing_op def delete(self, href, etag): if href not in self._items: raise exceptions.NotFoundError(href) _, actual_etag = self._items[href] if etag != actual_etag: raise exceptions.WrongEtagError(etag, actual_etag) del self._items[href] def _write(self): if self._last_etag is not None and \ self._last_etag != get_etag_from_file(self.path): raise exceptions.PreconditionFailed( 'Some other program modified the file {r!}. Re-run the ' 'synchronization and make sure absolutely no other program is ' 'writing into the same file.'.format(self.path)) text = join_collection( item.raw for item, etag in self._items.values() ) try: with atomic_write(self.path, mode='wb', overwrite=True) as f: f.write(text.encode(self.encoding)) finally: self._items = None self._last_etag = None @contextlib.contextmanager def at_once(self): self.list() self._at_once = True try: yield self self._write() finally: self._at_once = False vdirsyncer-0.16.2/vdirsyncer/storage/filesystem.py0000644000175000017500000001561613121521602024422 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import errno import logging import os import subprocess from atomicwrites import atomic_write from .base import Storage, normalize_meta_value from .. import exceptions from ..utils import checkdir, expand_path, generate_href, get_etag_from_file from ..vobject import Item logger = logging.getLogger(__name__) class FilesystemStorage(Storage): ''' Saves each item in its own file, given a directory. Can be used with `khal `_. See :doc:`vdir` for a more formal description of the format. Directories with a leading dot are ignored to make usage of e.g. version control easier. :param path: Absolute path to a vdir/collection. If this is used in combination with the ``collections`` parameter in a pair-section, this should point to a directory of vdirs instead. :param fileext: The file extension to use (e.g. ``.txt``). Contained in the href, so if you change the file extension after a sync, this will trigger a re-download of everything (but *should* not cause data-loss of any kind). :param encoding: File encoding for items, both content and filename. :param post_hook: A command to call for each item creation and modification. The command will be called with the path of the new/updated file. ''' storage_name = 'filesystem' _repr_attributes = ('path',) def __init__(self, path, fileext, encoding='utf-8', post_hook=None, **kwargs): super(FilesystemStorage, self).__init__(**kwargs) path = expand_path(path) checkdir(path, create=False) self.path = path self.encoding = encoding self.fileext = fileext self.post_hook = post_hook @classmethod def discover(cls, path, **kwargs): if kwargs.pop('collection', None) is not None: raise TypeError('collection argument must not be given.') path = expand_path(path) try: collections = os.listdir(path) except OSError as e: if e.errno != errno.ENOENT: raise else: for collection in collections: collection_path = os.path.join(path, collection) if not cls._validate_collection(collection_path): continue args = dict(collection=collection, path=collection_path, **kwargs) yield args @classmethod def _validate_collection(cls, path): if not os.path.isdir(path): return False if os.path.basename(path).startswith('.'): return False return True @classmethod def create_collection(cls, collection, **kwargs): kwargs = dict(kwargs) path = kwargs['path'] if collection is not None: path = os.path.join(path, collection) checkdir(expand_path(path), create=True) kwargs['path'] = path kwargs['collection'] = collection return kwargs def _get_filepath(self, href): return os.path.join(self.path, href) def _get_href(self, ident): return generate_href(ident) + self.fileext def list(self): for fname in os.listdir(self.path): fpath = os.path.join(self.path, fname) if os.path.isfile(fpath) and fname.endswith(self.fileext): yield fname, get_etag_from_file(fpath) def get(self, href): fpath = self._get_filepath(href) try: with open(fpath, 'rb') as f: return (Item(f.read().decode(self.encoding)), get_etag_from_file(fpath)) except IOError as e: if e.errno == errno.ENOENT: raise exceptions.NotFoundError(href) else: raise def upload(self, item): if not isinstance(item.raw, str): raise TypeError('item.raw must be a unicode string.') try: href = self._get_href(item.ident) fpath, etag = self._upload_impl(item, href) except OSError as e: if e.errno in ( errno.ENAMETOOLONG, # Unix errno.ENOENT # Windows ): logger.debug('UID as filename rejected, trying with random ' 'one.') # random href instead of UID-based href = self._get_href(None) fpath, etag = self._upload_impl(item, href) else: raise if self.post_hook: self._run_post_hook(fpath) return href, etag def _upload_impl(self, item, href): fpath = self._get_filepath(href) try: with atomic_write(fpath, mode='wb', overwrite=False) as f: f.write(item.raw.encode(self.encoding)) return fpath, get_etag_from_file(f) except OSError as e: if e.errno == errno.EEXIST: raise exceptions.AlreadyExistingError(existing_href=href) else: raise def update(self, href, item, etag): fpath = self._get_filepath(href) if not os.path.exists(fpath): raise exceptions.NotFoundError(item.uid) actual_etag = get_etag_from_file(fpath) if etag != actual_etag: raise exceptions.WrongEtagError(etag, actual_etag) if not isinstance(item.raw, str): raise TypeError('item.raw must be a unicode string.') with atomic_write(fpath, mode='wb', overwrite=True) as f: f.write(item.raw.encode(self.encoding)) etag = get_etag_from_file(f) if self.post_hook: self._run_post_hook(fpath) return etag def delete(self, href, etag): fpath = self._get_filepath(href) if not os.path.isfile(fpath): raise exceptions.NotFoundError(href) actual_etag = get_etag_from_file(fpath) if etag != actual_etag: raise exceptions.WrongEtagError(etag, actual_etag) os.remove(fpath) def _run_post_hook(self, fpath): logger.info('Calling post_hook={} with argument={}'.format( self.post_hook, fpath)) try: subprocess.call([self.post_hook, fpath]) except OSError as e: logger.warning('Error executing external hook: {}'.format(str(e))) def get_meta(self, key): fpath = os.path.join(self.path, key) try: with open(fpath, 'rb') as f: return normalize_meta_value(f.read().decode(self.encoding)) except IOError as e: if e.errno == errno.ENOENT: return u'' else: raise def set_meta(self, key, value): value = normalize_meta_value(value) fpath = os.path.join(self.path, key) with atomic_write(fpath, mode='wb', overwrite=True) as f: f.write(value.encode(self.encoding)) vdirsyncer-0.16.2/vdirsyncer/storage/memory.py0000644000175000017500000000402613121521602023537 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import random from .base import Storage, normalize_meta_value from .. import exceptions def _random_string(): return '{:.9f}'.format(random.random()) class MemoryStorage(Storage): storage_name = 'memory' ''' Saves data in RAM, only useful for testing. ''' def __init__(self, fileext='', **kwargs): if kwargs.get('collection') is not None: raise exceptions.UserError('MemoryStorage does not support ' 'collections.') self.items = {} # href => (etag, item) self.metadata = {} self.fileext = fileext super(MemoryStorage, self).__init__(**kwargs) def _get_href(self, item): return item.ident + self.fileext def list(self): for href, (etag, _item) in self.items.items(): yield href, etag def get(self, href): etag, item = self.items[href] return item, etag def has(self, href): return href in self.items def upload(self, item): href = self._get_href(item) if href in self.items: raise exceptions.AlreadyExistingError(existing_href=href) etag = _random_string() self.items[href] = (etag, item) return href, etag def update(self, href, item, etag): if href not in self.items: raise exceptions.NotFoundError(href) actual_etag, _ = self.items[href] if etag != actual_etag: raise exceptions.WrongEtagError(etag, actual_etag) new_etag = _random_string() self.items[href] = (new_etag, item) return new_etag def delete(self, href, etag): if not self.has(href): raise exceptions.NotFoundError(href) if etag != self.items[href][0]: raise exceptions.WrongEtagError(etag) del self.items[href] def get_meta(self, key): return normalize_meta_value(self.metadata.get(key)) def set_meta(self, key, value): self.metadata[key] = normalize_meta_value(value) vdirsyncer-0.16.2/vdirsyncer/storage/base.py0000644000175000017500000001725213121521602023146 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import contextlib import functools from .. import exceptions from ..utils import uniq def mutating_storage_method(f): @functools.wraps(f) def inner(self, *args, **kwargs): if self.read_only: raise exceptions.ReadOnlyError('This storage is read-only.') return f(self, *args, **kwargs) return inner class StorageMeta(type): def __init__(cls, name, bases, d): for method in ('update', 'upload', 'delete'): setattr(cls, method, mutating_storage_method(getattr(cls, method))) return super(StorageMeta, cls).__init__(name, bases, d) class Storage(metaclass=StorageMeta): '''Superclass of all storages, interface that all storages have to implement. Terminology: - ITEM: Instance of the Item class, represents a calendar event, task or contact. - HREF: String; Per-storage identifier of item, might be UID. The reason items aren't just referenced by their UID is because the CalDAV and CardDAV specifications make this unperformant to implement. - ETAG: String; Checksum of item, or something similar that changes when the item does. Strings can be either unicode strings or bytestrings. If bytestrings, an ASCII encoding is assumed. :param read_only: Whether the synchronization algorithm should avoid writes to this storage. Some storages accept no value other than ``True``. ''' fileext = '.txt' # The string used in the config to denote the type of storage. Should be # overridden by subclasses. storage_name = None # The string used in the config to denote a particular instance. Will be # overridden during instantiation. instance_name = None # The machine-readable name of this collection. collection = None # A value of True means the storage does not support write-methods such as # upload, update and delete. A value of False means the storage does # support those methods. read_only = False # The attribute values to show in the representation of the storage. _repr_attributes = () def __init__(self, instance_name=None, read_only=None, collection=None): if read_only is None: read_only = self.read_only if self.read_only and not read_only: raise exceptions.UserError('This storage can only be read-only.') self.read_only = bool(read_only) if collection and instance_name: instance_name = '{}/{}'.format(instance_name, collection) self.instance_name = instance_name self.collection = collection @classmethod def discover(cls, **kwargs): '''Discover collections given a basepath or -URL to many collections. :param **kwargs: Keyword arguments to additionally pass to the storage instances returned. You shouldn't pass `collection` here, otherwise TypeError will be raised. :returns: iterable of ``storage_args``. ``storage_args`` is a dictionary of ``**kwargs`` to pass to this class to obtain a storage instance pointing to this collection. It also must contain a ``"collection"`` key. That key's value is used to match two collections together for synchronization. IOW it is a machine-readable identifier for the collection, usually obtained from the last segment of a URL or filesystem path. ''' raise NotImplementedError() @classmethod def create_collection(cls, collection, **kwargs): ''' Create the specified collection and return the new arguments. ``collection=None`` means the arguments are already pointing to a possible collection location. The returned args should contain the collection name, for UI purposes. ''' raise NotImplementedError() def __repr__(self): try: if self.instance_name: return str(self.instance_name) except ValueError: pass return '<{}(**{})>'.format( self.__class__.__name__, dict((x, getattr(self, x)) for x in self._repr_attributes) ) def list(self): ''' :returns: list of (href, etag) ''' raise NotImplementedError() def get(self, href): '''Fetch a single item. :param href: href to fetch :returns: (item, etag) :raises: :exc:`vdirsyncer.exceptions.PreconditionFailed` if item can't be found. ''' raise NotImplementedError() def get_multi(self, hrefs): '''Fetch multiple items. Duplicate hrefs must be ignored. Functionally similar to :py:meth:`get`, but might bring performance benefits on some storages when used cleverly. :param hrefs: list of hrefs to fetch :raises: :exc:`vdirsyncer.exceptions.PreconditionFailed` if one of the items couldn't be found. :returns: iterable of (href, item, etag) ''' for href in uniq(hrefs): item, etag = self.get(href) yield href, item, etag def has(self, href): '''Check if an item exists by its href. :returns: True or False ''' try: self.get(href) except exceptions.PreconditionFailed: return False else: return True def upload(self, item): '''Upload a new item. In cases where the new etag cannot be atomically determined (i.e. in the same "transaction" as the upload itself), this method may return `None` as etag. This special case only exists because of DAV. Avoid this situation whenever possible. :raises: :exc:`vdirsyncer.exceptions.PreconditionFailed` if there is already an item with that href. :returns: (href, etag) ''' raise NotImplementedError() def update(self, href, item, etag): '''Update an item. The etag may be none in some cases, see `upload`. :raises: :exc:`vdirsyncer.exceptions.PreconditionFailed` if the etag on the server doesn't match the given etag or if the item doesn't exist. :returns: etag ''' raise NotImplementedError() def delete(self, href, etag): '''Delete an item by href. :raises: :exc:`vdirsyncer.exceptions.PreconditionFailed` when item has a different etag or doesn't exist. ''' raise NotImplementedError() @contextlib.contextmanager def at_once(self): '''A contextmanager that buffers all writes. Essentially, this:: s.upload(...) s.update(...) becomes this:: with s.at_once(): s.upload(...) s.update(...) Note that this removes guarantees about which exceptions are returned when. ''' yield def get_meta(self, key): '''Get metadata value for collection/storage. See the vdir specification for the keys that *have* to be accepted. :param key: The metadata key. :type key: unicode ''' raise NotImplementedError('This storage does not support metadata.') def set_meta(self, key, value): '''Get metadata value for collection/storage. :param key: The metadata key. :type key: unicode :param value: The value. :type value: unicode ''' raise NotImplementedError('This storage does not support metadata.') def normalize_meta_value(value): # `None` is returned by iCloud for empty properties. if not value or value == 'None': value = '' return value.strip() vdirsyncer-0.16.2/vdirsyncer/storage/dav.py0000644000175000017500000007150113144561565023024 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import datetime import logging import urllib.parse as urlparse import xml.etree.ElementTree as etree from inspect import getfullargspec import requests from requests.exceptions import HTTPError from .base import Storage, normalize_meta_value from .. import exceptions, http, utils from ..http import HTTP_STORAGE_PARAMETERS, USERAGENT, prepare_auth, \ prepare_client_cert, prepare_verify from ..vobject import Item dav_logger = logging.getLogger(__name__) CALDAV_DT_FORMAT = '%Y%m%dT%H%M%SZ' def _generate_path_reserved_chars(): for x in "/?#[]!$&'()*+,;": x = urlparse.quote(x, '') yield x.upper() yield x.lower() _path_reserved_chars = frozenset(_generate_path_reserved_chars()) del _generate_path_reserved_chars def _contains_quoted_reserved_chars(x): for y in _path_reserved_chars: if y in x: dav_logger.debug('Unsafe character: {!r}'.format(y)) return True return False def _assert_multistatus_success(r): # Xandikos returns a multistatus on PUT. try: root = _parse_xml(r.content) except InvalidXMLResponse: return for status in root.findall('.//{DAV:}status'): parts = status.text.strip().split() try: st = int(parts[1]) except (ValueError, IndexError): continue if st < 200 or st >= 400: raise HTTPError('Server error: {}'.format(st)) def _normalize_href(base, href): '''Normalize the href to be a path only relative to hostname and schema.''' orig_href = href if not href: raise ValueError(href) x = urlparse.urljoin(base, href) x = urlparse.urlsplit(x).path # Encoding issues: # - https://github.com/owncloud/contacts/issues/581 # - https://github.com/Kozea/Radicale/issues/298 old_x = None while old_x is None or x != old_x: if _contains_quoted_reserved_chars(x): break old_x = x x = urlparse.unquote(x) x = urlparse.quote(x, '/@%:') if orig_href == x: dav_logger.debug('Already normalized: {!r}'.format(x)) else: dav_logger.debug('Normalized URL from {!r} to {!r}' .format(orig_href, x)) return x class InvalidXMLResponse(exceptions.InvalidResponse): pass def _parse_xml(content): try: return etree.XML(content) except etree.ParseError as e: raise InvalidXMLResponse('Invalid XML encountered: {}\n' 'Double-check the URLs in your config.' .format(e)) def _merge_xml(items): if not items: return None rv = items[0] for item in items[1:]: rv.extend(item.getiterator()) return rv def _fuzzy_matches_mimetype(strict, weak): # different servers give different getcontenttypes: # "text/vcard", "text/x-vcard", "text/x-vcard; charset=utf-8", # "text/directory;profile=vCard", "text/directory", # "text/vcard; charset=utf-8" if strict is None or weak is None: return True mediatype, subtype = strict.split('/') if subtype in weak: return True return False class Discover(object): _namespace = None _resourcetype = None _homeset_xml = None _homeset_tag = None _well_known_uri = None _collection_xml = b""" """ def __init__(self, session, kwargs): if kwargs.pop('collection', None) is not None: raise TypeError('collection argument must not be given.') self.session = session self.kwargs = kwargs @staticmethod def _get_collection_from_url(url): _, collection = url.rstrip('/').rsplit('/', 1) return urlparse.unquote(collection) def find_principal(self): try: return self._find_principal_impl('') except (HTTPError, exceptions.Error): dav_logger.debug('Trying out well-known URI') return self._find_principal_impl(self._well_known_uri) def _find_principal_impl(self, url): headers = self.session.get_default_headers() headers['Depth'] = '0' body = b""" """ response = self.session.request('PROPFIND', url, headers=headers, data=body) root = _parse_xml(response.content) rv = root.find('.//{DAV:}current-user-principal/{DAV:}href') if rv is None: # This is for servers that don't support current-user-principal # E.g. Synology NAS # See https://github.com/pimutils/vdirsyncer/issues/498 dav_logger.debug( 'No current-user-principal returned, re-using URL {}' .format(response.url)) return response.url return urlparse.urljoin(response.url, rv.text).rstrip('/') + '/' def find_home(self): url = self.find_principal() headers = self.session.get_default_headers() headers['Depth'] = '0' response = self.session.request('PROPFIND', url, headers=headers, data=self._homeset_xml) root = etree.fromstring(response.content) # Better don't do string formatting here, because of XML namespaces rv = root.find('.//' + self._homeset_tag + '/{DAV:}href') if rv is None: raise InvalidXMLResponse('Couldn\'t find home-set.') return urlparse.urljoin(response.url, rv.text).rstrip('/') + '/' def find_collections(self): rv = None try: rv = list(self._find_collections_impl('')) except (HTTPError, exceptions.Error): pass if rv: return rv dav_logger.debug('Given URL is not a homeset URL') return self._find_collections_impl(self.find_home()) def _check_collection_resource_type(self, response): if self._resourcetype is None: return True props = _merge_xml(response.findall( '{DAV:}propstat/{DAV:}prop' )) if props is None or not len(props): dav_logger.debug('Skipping, missing : %s', response) return False if props.find('{DAV:}resourcetype/' + self._resourcetype) \ is None: dav_logger.debug('Skipping, not of resource type %s: %s', self._resourcetype, response) return False return True def _find_collections_impl(self, url): headers = self.session.get_default_headers() headers['Depth'] = '1' r = self.session.request('PROPFIND', url, headers=headers, data=self._collection_xml) root = _parse_xml(r.content) done = set() for response in root.findall('{DAV:}response'): if not self._check_collection_resource_type(response): continue href = response.find('{DAV:}href') if href is None: raise InvalidXMLResponse('Missing href tag for collection ' 'props.') href = urlparse.urljoin(r.url, href.text) if href not in done: done.add(href) yield {'href': href} def discover(self): for c in self.find_collections(): url = c['href'] collection = self._get_collection_from_url(url) storage_args = dict(self.kwargs) storage_args.update({'url': url, 'collection': collection}) yield storage_args def create(self, collection): if collection is None: collection = self._get_collection_from_url(self.kwargs['url']) for c in self.discover(): if c['collection'] == collection: return c home = self.find_home() url = urlparse.urljoin( home, urlparse.quote(collection, '/@') ) try: url = self._create_collection_impl(url) except HTTPError as e: raise NotImplementedError(e) else: rv = dict(self.kwargs) rv['collection'] = collection rv['url'] = url return rv def _create_collection_impl(self, url): data = ''' {} '''.format( etree.tostring(etree.Element(self._resourcetype), encoding='unicode') ).encode('utf-8') response = self.session.request( 'MKCOL', url, data=data, headers=self.session.get_default_headers(), ) return response.url class CalDiscover(Discover): _namespace = 'urn:ietf:params:xml:ns:caldav' _resourcetype = '{%s}calendar' % _namespace _homeset_xml = b""" """ _homeset_tag = '{%s}calendar-home-set' % _namespace _well_known_uri = '/.well-known/caldav' class CardDiscover(Discover): _namespace = 'urn:ietf:params:xml:ns:carddav' _resourcetype = '{%s}addressbook' % _namespace _homeset_xml = b""" """ _homeset_tag = '{%s}addressbook-home-set' % _namespace _well_known_uri = '/.well-known/carddav' class DAVSession(object): ''' A helper class to connect to DAV servers. ''' @classmethod def init_and_remaining_args(cls, **kwargs): argspec = getfullargspec(cls.__init__) self_args, remainder = \ utils.split_dict(kwargs, argspec.args.__contains__) return cls(**self_args), remainder def __init__(self, url, username='', password='', verify=True, auth=None, useragent=USERAGENT, verify_fingerprint=None, auth_cert=None): self._settings = { 'cert': prepare_client_cert(auth_cert), 'auth': prepare_auth(auth, username, password) } self._settings.update(prepare_verify(verify, verify_fingerprint)) self.useragent = useragent self.url = url.rstrip('/') + '/' self._session = requests.session() @utils.cached_property def parsed_url(self): return urlparse.urlparse(self.url) def request(self, method, path, **kwargs): url = self.url if path: url = urlparse.urljoin(self.url, path) more = dict(self._settings) more.update(kwargs) return http.request(method, url, session=self._session, **more) def get_default_headers(self): return { 'User-Agent': self.useragent, 'Content-Type': 'application/xml; charset=UTF-8' } class DAVStorage(Storage): __doc__ = ''' :param url: Base URL or an URL to a collection. ''' + HTTP_STORAGE_PARAMETERS + ''' .. note:: Please also see :ref:`supported-servers`, as some servers may not work well. ''' # the file extension of items. Useful for testing against radicale. fileext = None # mimetype of items item_mimetype = None # XML to use when fetching multiple hrefs. get_multi_template = None # The LXML query for extracting results in get_multi get_multi_data_query = None # The Discover subclass to use discovery_class = None # The DAVSession class to use session_class = DAVSession _repr_attributes = ('username', 'url') _property_table = { 'displayname': ('displayname', 'DAV:'), } def __init__(self, **kwargs): # defined for _repr_attributes self.username = kwargs.get('username') self.url = kwargs.get('url') self.session, kwargs = \ self.session_class.init_and_remaining_args(**kwargs) super(DAVStorage, self).__init__(**kwargs) import inspect __init__.__signature__ = inspect.signature(session_class.__init__) @classmethod def discover(cls, **kwargs): session, _ = cls.session_class.init_and_remaining_args(**kwargs) d = cls.discovery_class(session, kwargs) return d.discover() @classmethod def create_collection(cls, collection, **kwargs): session, _ = cls.session_class.init_and_remaining_args(**kwargs) d = cls.discovery_class(session, kwargs) return d.create(collection) def _normalize_href(self, *args, **kwargs): return _normalize_href(self.session.url, *args, **kwargs) def _get_href(self, item): href = utils.generate_href(item.ident) return self._normalize_href(href + self.fileext) def _is_item_mimetype(self, mimetype): return _fuzzy_matches_mimetype(self.item_mimetype, mimetype) def get(self, href): ((actual_href, item, etag),) = self.get_multi([href]) assert href == actual_href return item, etag def get_multi(self, hrefs): hrefs = set(hrefs) href_xml = [] for href in hrefs: if href != self._normalize_href(href): raise exceptions.NotFoundError(href) href_xml.append('{}'.format(href)) if not href_xml: return () data = self.get_multi_template \ .format(hrefs='\n'.join(href_xml)).encode('utf-8') response = self.session.request( 'REPORT', '', data=data, headers=self.session.get_default_headers() ) root = _parse_xml(response.content) # etree only can handle bytes rv = [] hrefs_left = set(hrefs) for href, etag, prop in self._parse_prop_responses(root): raw = prop.find(self.get_multi_data_query) if raw is None: dav_logger.warning('Skipping {}, the item content is missing.' .format(href)) continue raw = raw.text or u'' if isinstance(raw, bytes): raw = raw.decode(response.encoding) if isinstance(etag, bytes): etag = etag.decode(response.encoding) try: hrefs_left.remove(href) except KeyError: if href in hrefs: dav_logger.warning('Server sent item twice: {}' .format(href)) else: dav_logger.warning('Server sent unsolicited item: {}' .format(href)) else: rv.append((href, Item(raw), etag)) for href in hrefs_left: raise exceptions.NotFoundError(href) return rv def _put(self, href, item, etag): headers = self.session.get_default_headers() headers['Content-Type'] = self.item_mimetype if etag is None: headers['If-None-Match'] = '*' else: headers['If-Match'] = etag response = self.session.request( 'PUT', href, data=item.raw.encode('utf-8'), headers=headers ) _assert_multistatus_success(response) # The server may not return an etag under certain conditions: # # An origin server MUST NOT send a validator header field (Section # 7.2), such as an ETag or Last-Modified field, in a successful # response to PUT unless the request's representation data was saved # without any transformation applied to the body (i.e., the # resource's new representation data is identical to the # representation data received in the PUT request) and the validator # field value reflects the new representation. # # -- https://tools.ietf.org/html/rfc7231#section-4.3.4 # # In such cases we return a constant etag. The next synchronization # will then detect an etag change and will download the new item. etag = response.headers.get('etag', None) href = self._normalize_href(response.url) return href, etag def update(self, href, item, etag): if etag is None: raise ValueError('etag must be given and must not be None.') href, etag = self._put(self._normalize_href(href), item, etag) return etag def upload(self, item): href = self._get_href(item) return self._put(href, item, None) def delete(self, href, etag): href = self._normalize_href(href) headers = self.session.get_default_headers() headers.update({ 'If-Match': etag }) self.session.request( 'DELETE', href, headers=headers ) def _parse_prop_responses(self, root, handled_hrefs=None): if handled_hrefs is None: handled_hrefs = set() for response in root.iter('{DAV:}response'): href = response.find('{DAV:}href') if href is None: dav_logger.error('Skipping response, href is missing.') continue href = self._normalize_href(href.text) if href in handled_hrefs: # Servers that send duplicate hrefs: # - Zimbra # https://github.com/pimutils/vdirsyncer/issues/88 # - Davmail # https://github.com/pimutils/vdirsyncer/issues/144 dav_logger.warning('Skipping identical href: {!r}' .format(href)) continue props = response.findall('{DAV:}propstat/{DAV:}prop') if props is None or not len(props): dav_logger.debug('Skipping {!r}, properties are missing.' .format(href)) continue else: props = _merge_xml(props) if props.find('{DAV:}resourcetype/{DAV:}collection') is not None: dav_logger.debug('Skipping {!r}, is collection.'.format(href)) continue etag = getattr(props.find('{DAV:}getetag'), 'text', '') if not etag: dav_logger.debug('Skipping {!r}, etag property is missing.' .format(href)) continue contenttype = getattr(props.find('{DAV:}getcontenttype'), 'text', None) if not self._is_item_mimetype(contenttype): dav_logger.debug('Skipping {!r}, {!r} != {!r}.' .format(href, contenttype, self.item_mimetype)) continue handled_hrefs.add(href) yield href, etag, props def list(self): headers = self.session.get_default_headers() headers['Depth'] = '1' data = ''' '''.encode('utf-8') # We use a PROPFIND request instead of addressbook-query due to issues # with Zimbra. See https://github.com/pimutils/vdirsyncer/issues/83 response = self.session.request('PROPFIND', '', data=data, headers=headers) root = _parse_xml(response.content) rv = self._parse_prop_responses(root) for href, etag, _prop in rv: yield href, etag def get_meta(self, key): try: tagname, namespace = self._property_table[key] except KeyError: raise exceptions.UnsupportedMetadataError() xpath = '{%s}%s' % (namespace, tagname) data = ''' {} '''.format( etree.tostring(etree.Element(xpath), encoding='unicode') ).encode('utf-8') headers = self.session.get_default_headers() headers['Depth'] = '0' response = self.session.request( 'PROPFIND', '', data=data, headers=headers ) root = _parse_xml(response.content) for prop in root.findall('.//' + xpath): text = normalize_meta_value(getattr(prop, 'text', None)) if text: return text return u'' def set_meta(self, key, value): try: tagname, namespace = self._property_table[key] except KeyError: raise exceptions.UnsupportedMetadataError() lxml_selector = '{%s}%s' % (namespace, tagname) element = etree.Element(lxml_selector) element.text = normalize_meta_value(value) data = ''' {} '''.format(etree.tostring(element, encoding='unicode')).encode('utf-8') self.session.request( 'PROPPATCH', '', data=data, headers=self.session.get_default_headers() ) # XXX: Response content is currently ignored. Though exceptions are # raised for HTTP errors, a multistatus with errorcodes inside is not # parsed yet. Not sure how common those are, or how they look like. It # might be easier (and safer in case of a stupid server) to just issue # a PROPFIND to see if the value got actually set. class CalDAVStorage(DAVStorage): __doc__ = ''' CalDAV. You can set a timerange to synchronize with the parameters ``start_date`` and ``end_date``. Inside those parameters, you can use any Python expression to return a valid :py:class:`datetime.datetime` object. For example, the following would synchronize the timerange from one year in the past to one year in the future:: start_date = datetime.now() - timedelta(days=365) end_date = datetime.now() + timedelta(days=365) Either both or none have to be specified. The default is to synchronize everything. You can set ``item_types`` to restrict the *kind of items* you want to synchronize. For example, if you want to only synchronize events (but don't download any tasks from the server), set ``item_types = ["VEVENT"]``. If you want to synchronize events and tasks, but have some ``VJOURNAL`` items on the server you don't want to synchronize, use ``item_types = ["VEVENT", "VTODO"]``. :param start_date: Start date of timerange to show, default -inf. :param end_date: End date of timerange to show, default +inf. :param item_types: Kind of items to show. The default, the empty list, is to show all. This depends on particular features on the server, the results are not validated. ''' + DAVStorage.__doc__ storage_name = 'caldav' fileext = '.ics' item_mimetype = 'text/calendar' discovery_class = CalDiscover start_date = None end_date = None get_multi_template = ''' {hrefs} ''' get_multi_data_query = '{urn:ietf:params:xml:ns:caldav}calendar-data' _property_table = dict(DAVStorage._property_table) _property_table.update({ 'color': ('calendar-color', 'http://apple.com/ns/ical/'), }) def __init__(self, start_date=None, end_date=None, item_types=(), **kwargs): super(CalDAVStorage, self).__init__(**kwargs) if not isinstance(item_types, (list, tuple)): raise exceptions.UserError('item_types must be a list.') self.item_types = tuple(item_types) if (start_date is None) != (end_date is None): raise exceptions.UserError('If start_date is given, ' 'end_date has to be given too.') elif start_date is not None and end_date is not None: namespace = dict(datetime.__dict__) namespace['start_date'] = self.start_date = \ (eval(start_date, namespace) if isinstance(start_date, (bytes, str)) else start_date) self.end_date = \ (eval(end_date, namespace) if isinstance(end_date, (bytes, str)) else end_date) @staticmethod def _get_list_filters(components, start, end): if components: caldavfilter = ''' {timefilter} ''' if start is not None and end is not None: start = start.strftime(CALDAV_DT_FORMAT) end = end.strftime(CALDAV_DT_FORMAT) timefilter = ('' .format(start=start, end=end)) else: timefilter = '' for component in components: yield caldavfilter.format(component=component, timefilter=timefilter) else: if start is not None and end is not None: for x in CalDAVStorage._get_list_filters(('VTODO', 'VEVENT'), start, end): yield x def list(self): caldavfilters = list(self._get_list_filters( self.item_types, self.start_date, self.end_date )) if not caldavfilters: # If we don't have any filters (which is the default), taking the # risk of sending a calendar-query is not necessary. There doesn't # seem to be a widely-usable way to send calendar-queries with the # same semantics as a PROPFIND request... so why not use PROPFIND # instead? # # See https://github.com/dmfs/tasks/issues/118 for backstory. for x in DAVStorage.list(self): yield x data = ''' {caldavfilter} ''' headers = self.session.get_default_headers() # https://github.com/pimutils/vdirsyncer/issues/166 # The default in CalDAV's calendar-queries is 0, but the examples use # an explicit value of 1 for querying items. it is extremely unclear in # the spec which values from WebDAV are actually allowed. headers['Depth'] = '1' handled_hrefs = set() for caldavfilter in caldavfilters: xml = data.format(caldavfilter=caldavfilter).encode('utf-8') response = self.session.request('REPORT', '', data=xml, headers=headers) root = _parse_xml(response.content) rv = self._parse_prop_responses(root, handled_hrefs) for href, etag, _prop in rv: yield href, etag class CardDAVStorage(DAVStorage): __doc__ = ''' CardDAV. ''' + DAVStorage.__doc__ storage_name = 'carddav' fileext = '.vcf' item_mimetype = 'text/vcard' discovery_class = CardDiscover get_multi_template = ''' {hrefs} ''' get_multi_data_query = '{urn:ietf:params:xml:ns:carddav}address-data' vdirsyncer-0.16.2/vdirsyncer/version.py0000644000175000017500000000016513147536465022275 0ustar untitakeruntitaker00000000000000# coding: utf-8 # file generated by setuptools_scm # don't change, don't track in version control version = '0.16.2' vdirsyncer-0.16.2/vdirsyncer/__init__.py0000644000175000017500000000143713121521602022325 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- ''' Vdirsyncer synchronizes calendars and contacts. ''' from __future__ import print_function PROJECT_HOME = 'https://github.com/pimutils/vdirsyncer' BUGTRACKER_HOME = PROJECT_HOME + '/issues' DOCS_HOME = 'https://vdirsyncer.pimutils.org/en/stable' try: from .version import version as __version__ # noqa except ImportError: # pragma: no cover raise ImportError( 'Failed to find (autogenerated) version.py. ' 'This might be because you are installing from GitHub\'s tarballs, ' 'use the PyPI ones.' ) def _check_python_version(): # pragma: no cover import sys if sys.version_info < (3, 3, 0): print('vdirsyncer requires at least Python 3.3.') sys.exit(1) _check_python_version() del _check_python_version vdirsyncer-0.16.2/vdirsyncer/repair.py0000644000175000017500000000400513132657103022052 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import logging from os.path import basename from .utils import generate_href, href_safe logger = logging.getLogger(__name__) class IrreparableItem(Exception): pass def repair_storage(storage, repair_unsafe_uid): seen_uids = set() all_hrefs = list(storage.list()) for i, (href, _) in enumerate(all_hrefs): item, etag = storage.get(href) logger.info(u'[{}/{}] Processing {}' .format(i, len(all_hrefs), href)) try: new_item = repair_item(href, item, seen_uids, repair_unsafe_uid) except IrreparableItem: logger.error('Item {!r} is malformed beyond repair. ' 'The PRODID property may indicate which software ' 'created this item.' .format(href)) logger.error('Item content: {!r}'.format(item.raw)) continue seen_uids.add(new_item.uid) if new_item.raw != item.raw: if new_item.uid != item.uid: storage.upload(new_item) storage.delete(href, etag) else: storage.update(href, new_item, etag) def repair_item(href, item, seen_uids, repair_unsafe_uid): if item.parsed is None: raise IrreparableItem() new_item = item if not item.uid: logger.warning('No UID, assigning random UID.') new_item = item.with_uid(generate_href()) elif item.uid in seen_uids: logger.warning('Duplicate UID, assigning random UID.') new_item = item.with_uid(generate_href()) elif not href_safe(item.uid) or not href_safe(basename(href)): if not repair_unsafe_uid: logger.warning('UID may cause problems, add ' '--repair-unsafe-uid to repair.') else: logger.warning('UID or href is unsafe, assigning random UID.') new_item = item.with_uid(generate_href()) if not new_item.uid: raise IrreparableItem() return new_item vdirsyncer-0.16.2/vdirsyncer/vobject.py0000644000175000017500000002673113134636312022237 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import hashlib from itertools import chain, tee from .utils import cached_property, uniq IGNORE_PROPS = ( # PRODID is changed by radicale for some reason after upload 'PRODID', # Sometimes METHOD:PUBLISH is added by WebCAL providers, for us it doesn't # make a difference 'METHOD', # X-RADICALE-NAME is used by radicale, because hrefs don't really exist in # their filesystem backend 'X-RADICALE-NAME', # Apparently this is set by Horde? # https://github.com/pimutils/vdirsyncer/issues/318 'X-WR-CALNAME', # Those are from the VCARD specification and is supposed to change when the # item does -- however, we can determine that ourselves 'REV', 'LAST-MODIFIED', 'CREATED', # Some iCalendar HTTP calendars generate the DTSTAMP at request time, so # this property always changes when the rest of the item didn't. Some do # the same with the UID. # # - Google's read-only calendar links # - http://www.feiertage-oesterreich.at/ 'DTSTAMP', 'UID', ) class Item(object): '''Immutable wrapper class for VCALENDAR (VEVENT, VTODO) and VCARD''' def __init__(self, raw): assert isinstance(raw, str), type(raw) self._raw = raw def with_uid(self, new_uid): parsed = _Component.parse(self.raw) stack = [parsed] while stack: component = stack.pop() stack.extend(component.subcomponents) if component.name in ('VEVENT', 'VTODO', 'VJOURNAL', 'VCARD'): del component['UID'] if new_uid: component['UID'] = new_uid return Item('\r\n'.join(parsed.dump_lines())) @cached_property def raw(self): '''Raw content of the item, as unicode string. Vdirsyncer doesn't validate the content in any way. ''' return self._raw @cached_property def uid(self): '''Global identifier of the item, across storages, doesn't change after a modification of the item.''' # Don't actually parse component, but treat all lines as single # component, avoiding traversal through all subcomponents. x = _Component('TEMP', self.raw.splitlines(), []) try: return x['UID'].strip() or None except KeyError: return None @cached_property def hash(self): '''Hash of self.raw, used for etags.''' return hash_item(self.raw) @cached_property def ident(self): '''Used for generating hrefs and matching up items during synchronization. This is either the UID or the hash of the item's content.''' # We hash the item instead of directly using its raw content, because # # 1. The raw content might be really large, e.g. when it's a contact # with a picture, which bloats the status file. # # 2. The status file would contain really sensitive information. return self.uid or self.hash @property def parsed(self): '''Don't cache because the rv is mutable.''' try: return _Component.parse(self.raw) except Exception: return None def normalize_item(item, ignore_props=IGNORE_PROPS): '''Create syntactically invalid mess that is equal for similar items.''' if not isinstance(item, Item): item = Item(item) item = _strip_timezones(item) x = _Component('TEMP', item.raw.splitlines(), []) for prop in IGNORE_PROPS: del x[prop] x.props.sort() return u'\r\n'.join(filter(bool, (line.strip() for line in x.props))) def _strip_timezones(item): parsed = item.parsed if not parsed or parsed.name != 'VCALENDAR': return item parsed.subcomponents = [c for c in parsed.subcomponents if c.name != 'VTIMEZONE'] return Item('\r\n'.join(parsed.dump_lines())) def hash_item(text): return hashlib.sha256(normalize_item(text).encode('utf-8')).hexdigest() def split_collection(text): assert isinstance(text, str) inline = [] items = {} # uid => item ungrouped_items = [] for main in _Component.parse(text, multiple=True): _split_collection_impl(main, main, inline, items, ungrouped_items) for item in chain(items.values(), ungrouped_items): item.subcomponents.extend(inline) yield u'\r\n'.join(item.dump_lines()) def _split_collection_impl(item, main, inline, items, ungrouped_items): if item.name == u'VTIMEZONE': inline.append(item) elif item.name == u'VCARD': ungrouped_items.append(item) elif item.name in (u'VTODO', u'VEVENT', u'VJOURNAL'): uid = item.get(u'UID', u'') wrapper = _Component(main.name, main.props[:], []) if uid.strip(): wrapper = items.setdefault(uid, wrapper) else: ungrouped_items.append(wrapper) wrapper.subcomponents.append(item) elif item.name in (u'VCALENDAR', u'VADDRESSBOOK'): if item.name == 'VCALENDAR': del item['METHOD'] for subitem in item.subcomponents: _split_collection_impl(subitem, item, inline, items, ungrouped_items) else: raise ValueError('Unknown component: {}' .format(item.name)) _default_join_wrappers = { u'VCALENDAR': u'VCALENDAR', u'VEVENT': u'VCALENDAR', u'VTODO': u'VCALENDAR', u'VCARD': u'VADDRESSBOOK' } def join_collection(items, wrappers=_default_join_wrappers): ''' :param wrappers: { item_type: wrapper_type } ''' items1, items2 = tee((_Component.parse(x) for x in items), 2) item_type, wrapper_type = _get_item_type(items1, wrappers) wrapper_props = [] def _get_item_components(x): if x.name == wrapper_type: wrapper_props.extend(x.props) return x.subcomponents else: return [x] components = chain(*(_get_item_components(x) for x in items2)) lines = chain(*uniq(tuple(x.dump_lines()) for x in components)) if wrapper_type is not None: lines = chain(*( [u'BEGIN:{}'.format(wrapper_type)], # XXX: wrapper_props is a list of lines (with line-wrapping), so # filtering out duplicate lines will almost certainly break # multiline-values. Since the only props we usually need to # support are PRODID and VERSION, I don't care. uniq(wrapper_props), lines, [u'END:{}'.format(wrapper_type)] )) return u''.join(line + u'\r\n' for line in lines) def _get_item_type(components, wrappers): i = 0 for component in components: i += 1 try: item_type = component.name wrapper_type = wrappers[item_type] except KeyError: pass else: return item_type, wrapper_type if not i: return None, None else: raise ValueError('Not sure how to join components.') class _Component(object): ''' Raw outline of the components. Vdirsyncer's operations on iCalendar and VCard objects are limited to retrieving the UID and splitting larger files into items. Consequently this parser is very lazy, with the downside that manipulation of item properties are extremely costly. Other features: - Preserve the original property order and wrapping. - Don't choke on irrelevant details like invalid datetime formats. Original version from https://github.com/collective/icalendar/, but apart from the similar API, very few parts have been reused. ''' def __init__(self, name, lines, subcomponents): ''' :param name: The component name. :param lines: The component's own properties, as list of lines (strings). :param subcomponents: List of components. ''' self.name = name self.props = lines self.subcomponents = subcomponents @classmethod def parse(cls, lines, multiple=False): if isinstance(lines, bytes): lines = lines.decode('utf-8') if isinstance(lines, str): lines = lines.splitlines() stack = [] rv = [] try: for _i, line in enumerate(lines): if line.startswith(u'BEGIN:'): c_name = line[len(u'BEGIN:'):].strip().upper() stack.append(cls(c_name, [], [])) elif line.startswith(u'END:'): component = stack.pop() if stack: stack[-1].subcomponents.append(component) else: rv.append(component) else: if line.strip(): stack[-1].props.append(line) except IndexError: raise ValueError('Parsing error at line {}'.format(_i + 1)) if multiple: return rv elif len(rv) != 1: raise ValueError('Found {} components, expected one.' .format(len(rv))) else: return rv[0] def dump_lines(self): yield u'BEGIN:{}'.format(self.name) for line in self.props: yield line for c in self.subcomponents: for line in c.dump_lines(): yield line yield u'END:{}'.format(self.name) def __delitem__(self, key): prefix = (u'{}:'.format(key), u'{};'.format(key)) new_lines = [] lineiter = iter(self.props) while True: for line in lineiter: if line.startswith(prefix): break else: new_lines.append(line) else: break for line in lineiter: if not line.startswith((u' ', u'\t')): new_lines.append(line) break self.props = new_lines def __setitem__(self, key, val): assert isinstance(val, str) assert u'\n' not in val del self[key] line = u'{}:{}'.format(key, val) self.props.append(line) def __contains__(self, obj): if isinstance(obj, type(self)): return obj not in self.subcomponents and \ not any(obj in x for x in self.subcomponents) elif isinstance(obj, str): return self.get(obj, None) is not None else: raise ValueError(obj) def __getitem__(self, key): prefix_without_params = '{}:'.format(key) prefix_with_params = '{};'.format(key) iterlines = iter(self.props) for line in iterlines: if line.startswith(prefix_without_params): rv = line[len(prefix_without_params):] break elif line.startswith(prefix_with_params): rv = line[len(prefix_with_params):].split(':', 1)[-1] break else: raise KeyError() for line in iterlines: if line.startswith((u' ', u'\t')): rv += line[1:] else: break return rv def get(self, key, default=None): try: return self[key] except KeyError: return default def __eq__(self, other): return ( isinstance(other, type(self)) and self.name == other.name and self.props == other.props and self.subcomponents == other.subcomponents ) vdirsyncer-0.16.2/vdirsyncer/cli/0000755000175000017500000000000013147536465021003 5ustar untitakeruntitaker00000000000000vdirsyncer-0.16.2/vdirsyncer/cli/discover.py0000644000175000017500000001670313144561565023176 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import hashlib import json import logging import sys from .utils import handle_collection_not_found, handle_storage_init_error, \ load_status, save_status, storage_class_from_config, \ storage_instance_from_config from .. import exceptions from ..utils import cached_property # Increase whenever upgrade potentially breaks discovery cache and collections # should be re-discovered DISCOVERY_CACHE_VERSION = 1 logger = logging.getLogger(__name__) def _get_collections_cache_key(pair): m = hashlib.sha256() j = json.dumps([ DISCOVERY_CACHE_VERSION, pair.collections, pair.config_a, pair.config_b, ], sort_keys=True) m.update(j.encode('utf-8')) return m.hexdigest() def collections_for_pair(status_path, pair, from_cache=True, list_collections=False): '''Determine all configured collections for a given pair. Takes care of shortcut expansion and result caching. :param status_path: The path to the status directory. :param from_cache: Whether to load from cache (aborting on cache miss) or discover and save to cache. :returns: iterable of (collection, (a_args, b_args)) ''' cache_key = _get_collections_cache_key(pair) if from_cache: rv = load_status(status_path, pair.name, data_type='collections') if rv and rv.get('cache_key', None) == cache_key: return list(_expand_collections_cache( rv['collections'], pair.config_a, pair.config_b )) elif rv: raise exceptions.UserError('Detected change in config file, ' 'please run `vdirsyncer discover {}`.' .format(pair.name)) else: raise exceptions.UserError('Please run `vdirsyncer discover {}` ' ' before synchronization.' .format(pair.name)) logger.info('Discovering collections for pair {}' .format(pair.name)) a_discovered = _DiscoverResult(pair.config_a) b_discovered = _DiscoverResult(pair.config_b) if list_collections: _print_collections(pair.config_a['instance_name'], a_discovered.get_self) _print_collections(pair.config_b['instance_name'], b_discovered.get_self) # We have to use a list here because the special None/null value would get # mangled to string (because JSON objects always have string keys). rv = list(expand_collections( shortcuts=pair.collections, config_a=pair.config_a, config_b=pair.config_b, get_a_discovered=a_discovered.get_self, get_b_discovered=b_discovered.get_self, _handle_collection_not_found=handle_collection_not_found )) _sanity_check_collections(rv) save_status(status_path, pair.name, data_type='collections', data={ 'collections': list( _compress_collections_cache(rv, pair.config_a, pair.config_b) ), 'cache_key': cache_key }) return rv def _sanity_check_collections(collections): for _, (a_args, b_args) in collections: storage_instance_from_config(a_args) storage_instance_from_config(b_args) def _compress_collections_cache(collections, config_a, config_b): def deduplicate(x, y): rv = {} for key, value in x.items(): if key not in y or y[key] != value: rv[key] = value return rv for name, (a, b) in collections: yield name, (deduplicate(a, config_a), deduplicate(b, config_b)) def _expand_collections_cache(collections, config_a, config_b): for name, (a_delta, b_delta) in collections: a = dict(config_a) a.update(a_delta) b = dict(config_b) b.update(b_delta) yield name, (a, b) class _DiscoverResult: def __init__(self, config): self._cls, _ = storage_class_from_config(config) self._config = config def get_self(self): return self._discovered @cached_property def _discovered(self): try: discovered = list(self._cls.discover(**self._config)) except NotImplementedError: return {} except Exception: return handle_storage_init_error(self._cls, self._config) else: storage_type = self._config['type'] rv = {} for args in discovered: args['type'] = storage_type rv[args['collection']] = args return rv def expand_collections(shortcuts, config_a, config_b, get_a_discovered, get_b_discovered, _handle_collection_not_found): handled_collections = set() if shortcuts is None: shortcuts = [None] for shortcut in shortcuts: if shortcut == 'from a': collections = get_a_discovered() elif shortcut == 'from b': collections = get_b_discovered() else: collections = [shortcut] for collection in collections: if isinstance(collection, list): collection, collection_a, collection_b = collection else: collection_a = collection_b = collection if collection in handled_collections: continue handled_collections.add(collection) a_args = _collection_from_discovered( get_a_discovered, collection_a, config_a, _handle_collection_not_found ) b_args = _collection_from_discovered( get_b_discovered, collection_b, config_b, _handle_collection_not_found ) yield collection, (a_args, b_args) def _collection_from_discovered(get_discovered, collection, config, _handle_collection_not_found): if collection is None: args = dict(config) args['collection'] = None return args try: return get_discovered()[collection] except KeyError: return _handle_collection_not_found(config, collection) def _print_collections(instance_name, get_discovered): try: discovered = get_discovered() except exceptions.UserError: raise except Exception: # Unless discovery failed due to a user-inflicted error (instanceof # UserError), we don't even know if the storage supports discovery # properly. So we can't abort. import traceback logger.debug(''.join(traceback.format_tb(sys.exc_info()[2]))) logger.warning('Failed to discover collections for {}, use `-vdebug` ' 'to see the full traceback.'.format(instance_name)) return logger.info('{}:'.format(instance_name)) for args in discovered.values(): collection = args['collection'] if collection is None: continue args['instance_name'] = instance_name try: storage = storage_instance_from_config(args, create=False) displayname = storage.get_meta('displayname') except Exception: displayname = u'' logger.info(' - {}{}'.format( json.dumps(collection), ' ("{}")'.format(displayname) if displayname and displayname != collection else '' )) vdirsyncer-0.16.2/vdirsyncer/cli/fetchparams.py0000644000175000017500000000506713013735125023644 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import logging import click from . import AppContext from .. import exceptions from ..utils import expand_path, synchronized SUFFIX = '.fetch' logger = logging.getLogger(__name__) def expand_fetch_params(config): config = dict(config) for key in list(config): if not key.endswith(SUFFIX): continue newkey = key[:-len(SUFFIX)] if newkey in config: raise ValueError('Can\'t set {} and {}.'.format(key, newkey)) config[newkey] = _fetch_value(config[key], key) del config[key] return config @synchronized() def _fetch_value(opts, key): if not isinstance(opts, list): raise ValueError('Invalid value for {}: Expected a list, found {!r}.' .format(key, opts)) if not opts: raise ValueError('Expected list of length > 0.') try: ctx = click.get_current_context().find_object(AppContext) if ctx is None: raise RuntimeError() password_cache = ctx.fetched_params except RuntimeError: password_cache = {} cache_key = tuple(opts) if cache_key in password_cache: rv = password_cache[cache_key] logger.debug('Found cached value for {!r}.'.format(opts)) if isinstance(rv, BaseException): raise rv return rv strategy = opts[0] try: strategy_fn = STRATEGIES[strategy] except KeyError: raise exceptions.UserError('Unknown strategy: {}'.format(strategy)) logger.debug('Fetching value for {} with {} strategy.' .format(key, strategy)) try: rv = strategy_fn(*opts[1:]) except (click.Abort, KeyboardInterrupt) as e: password_cache[cache_key] = e raise else: if not rv: raise exceptions.UserError('Empty value for {}, this most likely ' 'indicates an error.' .format(key)) password_cache[cache_key] = rv return rv def _strategy_command(*command): import subprocess command = (expand_path(command[0]),) + command[1:] try: stdout = subprocess.check_output(command, universal_newlines=True) return stdout.strip('\n') except OSError as e: raise exceptions.UserError('Failed to execute command: {}\n{}' .format(' '.join(command), str(e))) def _strategy_prompt(text): return click.prompt(text, hide_input=True) STRATEGIES = { 'command': _strategy_command, 'prompt': _strategy_prompt, } vdirsyncer-0.16.2/vdirsyncer/cli/__init__.py0000644000175000017500000001614313147533673023117 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import functools import logging import sys import click import click_log from .. import BUGTRACKER_HOME, __version__ cli_logger = logging.getLogger(__name__) click_log.basic_config('vdirsyncer') class AppContext(object): def __init__(self): self.config = None self.fetched_params = {} self.logger = None pass_context = click.make_pass_decorator(AppContext, ensure=True) def catch_errors(f): @functools.wraps(f) def inner(*a, **kw): try: f(*a, **kw) except BaseException: from .utils import handle_cli_error handle_cli_error() sys.exit(1) return inner @click.group() @click_log.simple_verbosity_option('vdirsyncer') @click.version_option(version=__version__) @click.option('--config', '-c', metavar='FILE', help='Config file to use.') @pass_context @catch_errors def app(ctx, config): ''' Synchronize calendars and contacts ''' if sys.platform == 'win32': cli_logger.warning('Vdirsyncer currently does not support Windows. ' 'You will likely encounter bugs. ' 'See {}/535 for more information.' .format(BUGTRACKER_HOME)) if not ctx.config: from .config import load_config ctx.config = load_config(config) main = app def max_workers_callback(ctx, param, value): if value == 0 and logging.getLogger('vdirsyncer').level == logging.DEBUG: value = 1 cli_logger.debug('Using {} maximal workers.'.format(value)) return value def max_workers_option(default=0): help = 'Use at most this many connections. ' if default == 0: help += 'The default is 0, which means "as many as necessary". ' \ 'With -vdebug enabled, the default is 1.' else: help += 'The default is {}.'.format(default) return click.option( '--max-workers', default=default, type=click.IntRange(min=0, max=None), callback=max_workers_callback, help=help ) def collections_arg_callback(ctx, param, value): ''' Expand the various CLI shortforms ("pair, pair/collection") to an iterable of (pair, collections). ''' # XXX: Ugly! pass_context should work everywhere. config = ctx.find_object(AppContext).config rv = {} for pair_and_collection in (value or config.pairs): pair, collection = pair_and_collection, None if '/' in pair: pair, collection = pair.split('/') collections = rv.setdefault(pair, set()) if collection: collections.add(collection) return rv.items() collections_arg = click.argument('collections', nargs=-1, callback=collections_arg_callback) @app.command() @collections_arg @click.option('--force-delete/--no-force-delete', help=('Do/Don\'t abort synchronization when all items are about ' 'to be deleted from both sides.')) @max_workers_option() @pass_context @catch_errors def sync(ctx, collections, force_delete, max_workers): ''' Synchronize the given collections or pairs. If no arguments are given, all will be synchronized. This command will not synchronize metadata, use `vdirsyncer metasync` for that. \b \b\bExamples: # Sync everything configured vdirsyncer sync \b # Sync the pairs "bob" and "frank" vdirsyncer sync bob frank \b # Sync only "first_collection" from the pair "bob" vdirsyncer sync bob/first_collection ''' from .tasks import prepare_pair, sync_collection from .utils import WorkerQueue wq = WorkerQueue(max_workers) with wq.join(): for pair_name, collections in collections: wq.put(functools.partial(prepare_pair, pair_name=pair_name, collections=collections, config=ctx.config, force_delete=force_delete, callback=sync_collection)) wq.spawn_worker() @app.command() @collections_arg @max_workers_option() @pass_context @catch_errors def metasync(ctx, collections, max_workers): ''' Synchronize metadata of the given collections or pairs. See the `sync` command for usage. ''' from .tasks import prepare_pair, metasync_collection from .utils import WorkerQueue wq = WorkerQueue(max_workers) with wq.join(): for pair_name, collections in collections: wq.put(functools.partial(prepare_pair, pair_name=pair_name, collections=collections, config=ctx.config, callback=metasync_collection)) wq.spawn_worker() @app.command() @click.argument('pairs', nargs=-1) @click.option( '--list/--no-list', default=True, help=( 'Whether to list all collections from both sides during discovery, ' 'for debugging. This is slow and may crash for broken servers.' ) ) @max_workers_option(default=1) @pass_context @catch_errors def discover(ctx, pairs, max_workers, list): ''' Refresh collection cache for the given pairs. ''' from .tasks import discover_collections from .utils import WorkerQueue config = ctx.config wq = WorkerQueue(max_workers) with wq.join(): for pair_name in (pairs or config.pairs): pair = config.get_pair(pair_name) wq.put(functools.partial( discover_collections, status_path=config.general['status_path'], pair=pair, from_cache=False, list_collections=list, )) wq.spawn_worker() @app.command() @click.argument('collection') @click.option('--repair-unsafe-uid/--no-repair-unsafe-uid', default=False, help=('Some characters in item UIDs and URLs may cause problems ' 'with buggy software. Adding this option will reassign ' 'new UIDs to those items. This is disabled by default, ' 'which is equivalent to `--no-repair-unsafe-uid`.')) @pass_context @catch_errors def repair(ctx, collection, repair_unsafe_uid): ''' Repair a given collection. Runs a few checks on the collection and applies some fixes to individual items that may improve general stability, also with other CalDAV/CardDAV clients. In particular, if you encounter URL-encoding-related issues with other clients, this command with --repair-unsafe-uid might help. \b \b\bExamples: # Repair the `foo` collection of the `calendars_local` storage vdirsyncer repair calendars_local/foo ''' from .tasks import repair_collection cli_logger.warning('This operation will take a very long time.') cli_logger.warning('It\'s recommended to make a backup and ' 'turn off other client\'s synchronization features.') click.confirm('Do you want to continue?', abort=True) repair_collection(ctx.config, collection, repair_unsafe_uid=repair_unsafe_uid) vdirsyncer-0.16.2/vdirsyncer/cli/config.py0000644000175000017500000002706513132657103022617 0ustar untitakeruntitaker00000000000000import json import os import string from configparser import RawConfigParser from itertools import chain from click_threading import get_ui_worker from .fetchparams import expand_fetch_params from .utils import storage_class_from_config from .. import PROJECT_HOME, exceptions from ..utils import cached_property, expand_path GENERAL_ALL = frozenset(['status_path']) GENERAL_REQUIRED = frozenset(['status_path']) SECTION_NAME_CHARS = frozenset(chain(string.ascii_letters, string.digits, '_')) def validate_section_name(name, section_type): invalid = set(name) - SECTION_NAME_CHARS if invalid: chars_display = ''.join(sorted(SECTION_NAME_CHARS)) raise exceptions.UserError( 'The {}-section "{}" contains invalid characters. Only ' 'the following characters are allowed for storage and ' 'pair names:\n{}'.format(section_type, name, chars_display)) def _validate_general_section(general_config): invalid = set(general_config) - GENERAL_ALL missing = GENERAL_REQUIRED - set(general_config) problems = [] if invalid: problems.append(u'general section doesn\'t take the parameters: {}' .format(u', '.join(invalid))) if missing: problems.append(u'general section is missing the parameters: {}' .format(u', '.join(missing))) if problems: raise exceptions.UserError( u'Invalid general section. Copy the example ' u'config from the repository and edit it: {}' .format(PROJECT_HOME), problems=problems) def _validate_collections_param(collections): if collections is None: return if not isinstance(collections, list): raise ValueError('`collections` parameter must be a list or `null`.') collection_names = set() for i, collection in enumerate(collections): try: if isinstance(collection, (str, bytes)): collection_name = collection elif isinstance(collection, list): e = ValueError( 'Expected list of format ' '["config_name", "storage_a_name", "storage_b_name"]' .format(len(collection))) if len(collection) != 3: raise e if not isinstance(collection[0], (str, bytes)): raise e for x in collection[1:]: if x is not None and not isinstance(x, (str, bytes)): raise e collection_name = collection[0] else: raise ValueError('Expected string or list of three strings.') if collection_name in collection_names: raise ValueError('Duplicate value.') collection_names.add(collection_name) except ValueError as e: raise ValueError('`collections` parameter, position {i}: {e}' .format(i=i, e=str(e))) class _ConfigReader: def __init__(self, f): self._file = f self._parser = c = RawConfigParser() c.read_file(f) self._seen_names = set() self._general = {} self._pairs = {} self._storages = {} def _parse_section(self, section_type, name, options): validate_section_name(name, section_type) if name in self._seen_names: raise ValueError('Name "{}" already used.'.format(name)) self._seen_names.add(name) if section_type == 'general': if self._general: raise ValueError('More than one general section.') self._general = options elif section_type == 'storage': self._storages[name] = options elif section_type == 'pair': self._pairs[name] = options else: raise ValueError('Unknown section type.') def parse(self): for section in self._parser.sections(): if ' ' in section: section_type, name = section.split(' ', 1) else: section_type = name = section try: self._parse_section( section_type, name, dict(_parse_options(self._parser.items(section), section=section)) ) except ValueError as e: raise exceptions.UserError( 'Section "{}": {}'.format(section, str(e))) _validate_general_section(self._general) if getattr(self._file, 'name', None): self._general['status_path'] = os.path.join( os.path.dirname(self._file.name), expand_path(self._general['status_path']) ) return self._general, self._pairs, self._storages def _parse_options(items, section=None): for key, value in items: try: yield key, json.loads(value) except ValueError as e: raise ValueError('Section "{}", option "{}": {}' .format(section, key, e)) class Config(object): def __init__(self, general, pairs, storages): self.general = general self.storages = storages for name, options in storages.items(): options['instance_name'] = name self.pairs = {} for name, options in pairs.items(): try: self.pairs[name] = PairConfig(self, name, options) except ValueError as e: raise exceptions.UserError('Pair {}: {}'.format(name, e)) @classmethod def from_fileobject(cls, f): reader = _ConfigReader(f) return cls(*reader.parse()) @classmethod def from_filename_or_environment(cls, fname=None): if fname is None: fname = os.environ.get('VDIRSYNCER_CONFIG', None) if fname is None: fname = expand_path('~/.vdirsyncer/config') if not os.path.exists(fname): xdg_config_dir = os.environ.get('XDG_CONFIG_HOME', expand_path('~/.config/')) fname = os.path.join(xdg_config_dir, 'vdirsyncer/config') try: with open(fname) as f: return cls.from_fileobject(f) except Exception as e: raise exceptions.UserError( 'Error during reading config {}: {}' .format(fname, e) ) def get_storage_args(self, storage_name): try: args = self.storages[storage_name] except KeyError: raise exceptions.UserError( 'Storage {!r} not found. ' 'These are the configured storages: {}' .format(storage_name, list(self.storages)) ) else: return expand_fetch_params(args) def get_pair(self, pair_name): try: return self.pairs[pair_name] except KeyError as e: raise exceptions.PairNotFound(e, pair_name=pair_name) class PairConfig(object): def __init__(self, full_config, name, options): self._config = full_config self.name = name self.name_a = options.pop('a') self.name_b = options.pop('b') self._partial_sync = options.pop('partial_sync', None) self.metadata = options.pop('metadata', None) or () self.conflict_resolution = \ self._process_conflict_resolution_param( options.pop('conflict_resolution', None)) try: self.collections = options.pop('collections') except KeyError: raise ValueError( 'collections parameter missing.\n\n' 'As of 0.9.0 this parameter has no default anymore. ' 'Set `collections = null` explicitly in your pair config.' ) else: _validate_collections_param(self.collections) if options: raise ValueError('Unknown options: {}'.format(', '.join(options))) def _process_conflict_resolution_param(self, conflict_resolution): if conflict_resolution in (None, 'a wins', 'b wins'): return conflict_resolution elif isinstance(conflict_resolution, list) and \ len(conflict_resolution) > 1 and \ conflict_resolution[0] == 'command': def resolve(a, b): a_name = self.config_a['instance_name'] b_name = self.config_b['instance_name'] command = conflict_resolution[1:] def inner(): return _resolve_conflict_via_command(a, b, command, a_name, b_name) ui_worker = get_ui_worker() return ui_worker.put(inner) return resolve else: raise ValueError('Invalid value for `conflict_resolution`.') # The following parameters are lazily evaluated because evaluating # self.config_a would expand all `x.fetch` parameters. This is costly and # unnecessary if the pair is not actually synced. @cached_property def config_a(self): return self._config.get_storage_args(self.name_a) @cached_property def config_b(self): return self._config.get_storage_args(self.name_b) @cached_property def partial_sync(self): partial_sync = self._partial_sync # We need to use UserError here because ValueError is not # caught at the time this is expanded. if partial_sync is not None: cls_a, _ = storage_class_from_config(self.config_a) cls_b, _ = storage_class_from_config(self.config_b) if not cls_a.read_only and \ not self.config_a.get('read_only', False) and \ not cls_b.read_only and \ not self.config_b.get('read_only', False): raise exceptions.UserError( '`partial_sync` is only effective if one storage is ' 'read-only. Use `read_only = true` in exactly one storage ' 'section.' ) if partial_sync is None: partial_sync = 'revert' if partial_sync not in ('ignore', 'revert', 'error'): raise exceptions.UserError('Invalid value for `partial_sync`.') return partial_sync class CollectionConfig(object): def __init__(self, pair, name, config_a, config_b): self.pair = pair self._config = pair._config self.name = name self.config_a = config_a self.config_b = config_b #: Public API. Khal's config wizard depends on this function. load_config = Config.from_filename_or_environment def _resolve_conflict_via_command(a, b, command, a_name, b_name, _check_call=None): import tempfile import shutil if _check_call is None: from subprocess import check_call as _check_call from ..vobject import Item dir = tempfile.mkdtemp(prefix='vdirsyncer-conflict.') try: a_tmp = os.path.join(dir, a_name) b_tmp = os.path.join(dir, b_name) with open(a_tmp, 'w') as f: f.write(a.raw) with open(b_tmp, 'w') as f: f.write(b.raw) command[0] = expand_path(command[0]) _check_call(command + [a_tmp, b_tmp]) with open(a_tmp) as f: new_a = f.read() with open(b_tmp) as f: new_b = f.read() if new_a != new_b: raise exceptions.UserError('The two files are not completely ' 'equal.') return Item(new_a) finally: shutil.rmtree(dir) vdirsyncer-0.16.2/vdirsyncer/cli/tasks.py0000644000175000017500000001134113121521602022455 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import functools import json from .config import CollectionConfig from .discover import collections_for_pair, storage_class_from_config, \ storage_instance_from_config from .utils import JobFailed, cli_logger, get_status_name, \ handle_cli_error, load_status, manage_sync_status, save_status from .. import exceptions, sync def prepare_pair(wq, pair_name, collections, config, callback, **kwargs): pair = config.get_pair(pair_name) all_collections = dict(collections_for_pair( status_path=config.general['status_path'], pair=pair )) # spawn one worker less because we can reuse the current one new_workers = -1 for collection_name in (collections or all_collections): try: config_a, config_b = all_collections[collection_name] except KeyError: raise exceptions.UserError( 'Pair {}: Collection {} not found. These are the ' 'configured collections:\n{}' .format(pair_name, json.dumps(collection_name), list(all_collections))) new_workers += 1 collection = CollectionConfig(pair, collection_name, config_a, config_b) wq.put(functools.partial(callback, collection=collection, general=config.general, **kwargs)) for _ in range(new_workers): wq.spawn_worker() def sync_collection(wq, collection, general, force_delete): pair = collection.pair status_name = get_status_name(pair.name, collection.name) try: cli_logger.info('Syncing {}'.format(status_name)) a = storage_instance_from_config(collection.config_a) b = storage_instance_from_config(collection.config_b) sync_failed = False def error_callback(e): nonlocal sync_failed sync_failed = True handle_cli_error(status_name, e) with manage_sync_status(general['status_path'], pair.name, collection.name) as status: sync.sync( a, b, status, conflict_resolution=pair.conflict_resolution, force_delete=force_delete, error_callback=error_callback, partial_sync=pair.partial_sync ) if sync_failed: raise JobFailed() except JobFailed: raise except BaseException: handle_cli_error(status_name) raise JobFailed() def discover_collections(wq, pair, **kwargs): rv = collections_for_pair(pair=pair, **kwargs) collections = list(c for c, (a, b) in rv) if collections == [None]: collections = None cli_logger.info('Saved for {}: collections = {}' .format(pair.name, json.dumps(collections))) def repair_collection(config, collection, repair_unsafe_uid): from ..repair import repair_storage storage_name, collection = collection, None if '/' in storage_name: storage_name, collection = storage_name.split('/') config = config.get_storage_args(storage_name) storage_type = config['type'] if collection is not None: cli_logger.info('Discovering collections (skipping cache).') cls, config = storage_class_from_config(config) for config in cls.discover(**config): if config['collection'] == collection: break else: raise exceptions.UserError( 'Couldn\'t find collection {} for storage {}.' .format(collection, storage_name) ) config['type'] = storage_type storage = storage_instance_from_config(config) cli_logger.info('Repairing {}/{}'.format(storage_name, collection)) cli_logger.warning('Make sure no other program is talking to the server.') repair_storage(storage, repair_unsafe_uid=repair_unsafe_uid) def metasync_collection(wq, collection, general): from ..metasync import metasync pair = collection.pair status_name = get_status_name(pair.name, collection.name) try: cli_logger.info('Metasyncing {}'.format(status_name)) status = load_status(general['status_path'], pair.name, collection.name, data_type='metadata') or {} a = storage_instance_from_config(collection.config_a) b = storage_instance_from_config(collection.config_b) metasync( a, b, status, conflict_resolution=pair.conflict_resolution, keys=pair.metadata ) except BaseException: handle_cli_error(status_name) raise JobFailed() save_status(general['status_path'], pair.name, collection.name, data_type='metadata', data=status) vdirsyncer-0.16.2/vdirsyncer/cli/utils.py0000644000175000017500000003255513126225101022502 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import contextlib import errno import importlib import itertools import json import os import queue import sys from atomicwrites import atomic_write import click import click_threading from . import cli_logger from .. import BUGTRACKER_HOME, DOCS_HOME, exceptions from ..sync import IdentConflict, PartialSync, StorageEmpty, SyncConflict, \ SqliteStatus from ..utils import expand_path, get_storage_init_args STATUS_PERMISSIONS = 0o600 STATUS_DIR_PERMISSIONS = 0o700 class _StorageIndex(object): def __init__(self): self._storages = dict( caldav='vdirsyncer.storage.dav.CalDAVStorage', carddav='vdirsyncer.storage.dav.CardDAVStorage', filesystem='vdirsyncer.storage.filesystem.FilesystemStorage', http='vdirsyncer.storage.http.HttpStorage', singlefile='vdirsyncer.storage.singlefile.SingleFileStorage', google_calendar='vdirsyncer.storage.google.GoogleCalendarStorage', google_contacts='vdirsyncer.storage.google.GoogleContactsStorage', etesync_calendars='vdirsyncer.storage.etesync.EtesyncCalendars', etesync_contacts='vdirsyncer.storage.etesync.EtesyncContacts' ) def __getitem__(self, name): item = self._storages[name] if not isinstance(item, str): return item modname, clsname = item.rsplit('.', 1) mod = importlib.import_module(modname) self._storages[name] = rv = getattr(mod, clsname) assert rv.storage_name == name return rv storage_names = _StorageIndex() del _StorageIndex class JobFailed(RuntimeError): pass def handle_cli_error(status_name=None, e=None): ''' Print a useful error message for the current exception. This is supposed to catch all exceptions, and should never raise any exceptions itself. ''' try: if e is not None: raise e else: raise except exceptions.UserError as e: cli_logger.critical(e) except StorageEmpty as e: cli_logger.error( '{status_name}: Storage "{name}" was completely emptied. If you ' 'want to delete ALL entries on BOTH sides, then use ' '`vdirsyncer sync --force-delete {status_name}`. ' 'Otherwise delete the files for {status_name} in your status ' 'directory.'.format( name=e.empty_storage.instance_name, status_name=status_name ) ) except PartialSync as e: cli_logger.error( '{status_name}: Attempted change on {storage}, which is read-only' '. Set `partial_sync` in your pair section to `ignore` to ignore ' 'those changes, or `revert` to revert them on the other side.' .format(status_name=status_name, storage=e.storage) ) except SyncConflict as e: cli_logger.error( '{status_name}: One item changed on both sides. Resolve this ' 'conflict manually, or by setting the `conflict_resolution` ' 'parameter in your config file.\n' 'See also {docs}/config.html#pair-section\n' 'Item ID: {e.ident}\n' 'Item href on side A: {e.href_a}\n' 'Item href on side B: {e.href_b}\n' .format(status_name=status_name, e=e, docs=DOCS_HOME) ) except IdentConflict as e: cli_logger.error( '{status_name}: Storage "{storage.instance_name}" contains ' 'multiple items with the same UID or even content. Vdirsyncer ' 'will now abort the synchronization of this collection, because ' 'the fix for this is not clear; It could be the result of a badly ' 'behaving server. You can try running:\n\n' ' vdirsyncer repair {storage.instance_name}\n\n' 'But make sure to have a backup of your data in some form. The ' 'offending hrefs are:\n\n{href_list}\n' .format(status_name=status_name, storage=e.storage, href_list='\n'.join(map(repr, e.hrefs))) ) except (click.Abort, KeyboardInterrupt, JobFailed): pass except exceptions.PairNotFound as e: cli_logger.error( 'Pair {pair_name} does not exist. Please check your ' 'configuration file and make sure you\'ve typed the pair name ' 'correctly'.format(pair_name=e.pair_name) ) except exceptions.InvalidResponse as e: cli_logger.error( 'The server returned something vdirsyncer doesn\'t understand. ' 'Error message: {!r}\n' 'While this is most likely a serverside problem, the vdirsyncer ' 'devs are generally interested in such bugs. Please report it in ' 'the issue tracker at {}' .format(e, BUGTRACKER_HOME) ) except exceptions.CollectionRequired as e: cli_logger.error( 'One or more storages don\'t support `collections = null`. ' 'You probably want to set `collections = ["from a", "from b"]`.' ) except Exception as e: tb = sys.exc_info()[2] import traceback tb = traceback.format_tb(tb) if status_name: msg = 'Unknown error occured for {}'.format(status_name) else: msg = 'Unknown error occured' msg += ': {}\nUse `-vdebug` to see the full traceback.'.format(e) cli_logger.error(msg) cli_logger.debug(''.join(tb)) def get_status_name(pair, collection): if collection is None: return pair return pair + '/' + collection def get_status_path(base_path, pair, collection=None, data_type=None): assert data_type is not None status_name = get_status_name(pair, collection) path = expand_path(os.path.join(base_path, status_name)) if os.path.isfile(path) and data_type == 'items': new_path = path + '.items' # XXX: Legacy migration cli_logger.warning('Migrating statuses: Renaming {} to {}' .format(path, new_path)) os.rename(path, new_path) path += '.' + data_type return path def load_status(base_path, pair, collection=None, data_type=None): path = get_status_path(base_path, pair, collection, data_type) if not os.path.exists(path): return None assert_permissions(path, STATUS_PERMISSIONS) with open(path) as f: try: return dict(json.load(f)) except ValueError: pass return {} def prepare_status_path(path): dirname = os.path.dirname(path) try: os.makedirs(dirname, STATUS_DIR_PERMISSIONS) except OSError as e: if e.errno != errno.EEXIST: raise @contextlib.contextmanager def manage_sync_status(base_path, pair_name, collection_name): path = get_status_path(base_path, pair_name, collection_name, 'items') status = None legacy_status = None try: # XXX: Legacy migration with open(path, 'rb') as f: if f.read(1) == b'{': f.seek(0) # json.load doesn't work on binary files for Python 3.4/3.5 legacy_status = dict(json.loads(f.read().decode('utf-8'))) except (OSError, IOError, ValueError): pass if legacy_status is not None: cli_logger.warning('Migrating legacy status to sqlite') os.remove(path) status = SqliteStatus(path) status.load_legacy_status(legacy_status) else: prepare_status_path(path) status = SqliteStatus(path) yield status def save_status(base_path, pair, collection=None, data_type=None, data=None): assert data_type is not None assert data is not None status_name = get_status_name(pair, collection) path = expand_path(os.path.join(base_path, status_name)) + '.' + data_type prepare_status_path(path) with atomic_write(path, mode='w', overwrite=True) as f: json.dump(data, f) os.chmod(path, STATUS_PERMISSIONS) def storage_class_from_config(config): config = dict(config) storage_name = config.pop('type') try: cls = storage_names[storage_name] except KeyError: raise exceptions.UserError( 'Unknown storage type: {}'.format(storage_name)) return cls, config def storage_instance_from_config(config, create=True): ''' :param config: A configuration dictionary to pass as kwargs to the class corresponding to config['type'] ''' cls, new_config = storage_class_from_config(config) try: return cls(**new_config) except exceptions.CollectionNotFound as e: if create: config = handle_collection_not_found( config, config.get('collection', None), e=str(e)) return storage_instance_from_config(config, create=False) else: raise except Exception: return handle_storage_init_error(cls, new_config) def handle_storage_init_error(cls, config): e = sys.exc_info()[1] if not isinstance(e, TypeError) or '__init__' not in repr(e): raise all, required = get_storage_init_args(cls) given = set(config) missing = required - given invalid = given - all problems = [] if missing: problems.append( u'{} storage requires the parameters: {}' .format(cls.storage_name, u', '.join(missing))) if invalid: problems.append( u'{} storage doesn\'t take the parameters: {}' .format(cls.storage_name, u', '.join(invalid))) if not problems: raise e raise exceptions.UserError( u'Failed to initialize {}'.format(config['instance_name']), problems=problems ) class WorkerQueue(object): ''' A simple worker-queue setup. Note that workers quit if queue is empty. That means you have to first put things into the queue before spawning the worker! ''' def __init__(self, max_workers): self._queue = queue.Queue() self._workers = [] self._max_workers = max_workers self._shutdown_handlers = [] # According to http://stackoverflow.com/a/27062830, those are # threadsafe compared to increasing a simple integer variable. self.num_done_tasks = itertools.count() self.num_failed_tasks = itertools.count() def shutdown(self): while self._shutdown_handlers: try: self._shutdown_handlers.pop()() except Exception: pass def _worker(self): while True: try: func = self._queue.get(False) except queue.Empty: break try: func(wq=self) except Exception: handle_cli_error() next(self.num_failed_tasks) finally: self._queue.task_done() next(self.num_done_tasks) if not self._queue.unfinished_tasks: self.shutdown() def spawn_worker(self): if self._max_workers and len(self._workers) >= self._max_workers: return t = click_threading.Thread(target=self._worker) t.start() self._workers.append(t) @contextlib.contextmanager def join(self): assert self._workers or not self._queue.unfinished_tasks ui_worker = click_threading.UiWorker() self._shutdown_handlers.append(ui_worker.shutdown) _echo = click.echo with ui_worker.patch_click(): yield if not self._workers: # Ugly hack, needed because ui_worker is not running. click.echo = _echo cli_logger.critical('Nothing to do.') sys.exit(5) ui_worker.run() self._queue.join() for worker in self._workers: worker.join() tasks_failed = next(self.num_failed_tasks) tasks_done = next(self.num_done_tasks) if tasks_failed > 0: cli_logger.error('{} out of {} tasks failed.' .format(tasks_failed, tasks_done)) sys.exit(1) def put(self, f): return self._queue.put(f) def assert_permissions(path, wanted): permissions = os.stat(path).st_mode & 0o777 if permissions > wanted: cli_logger.warning('Correcting permissions of {} from {:o} to {:o}' .format(path, permissions, wanted)) os.chmod(path, wanted) def handle_collection_not_found(config, collection, e=None): storage_name = config.get('instance_name', None) cli_logger.warning('{}No collection {} found for storage {}.' .format('{}\n'.format(e) if e else '', json.dumps(collection), storage_name)) if click.confirm('Should vdirsyncer attempt to create it?'): storage_type = config['type'] cls, config = storage_class_from_config(config) config['collection'] = collection try: args = cls.create_collection(**config) args['type'] = storage_type return args except NotImplementedError as e: cli_logger.error(e) raise exceptions.UserError( 'Unable to find or create collection "{collection}" for ' 'storage "{storage}". Please create the collection ' 'yourself.'.format(collection=collection, storage=storage_name)) vdirsyncer-0.16.2/vdirsyncer/utils.py0000644000175000017500000001467413121521602021735 0ustar untitakeruntitaker00000000000000# -*- coding: utf-8 -*- import functools import os import sys import uuid from inspect import getfullargspec from . import exceptions # This is only a subset of the chars allowed per the spec. In particular `@` is # not included, because there are some servers that (incorrectly) encode it to # `%40` when it's part of a URL path, and reject or "repair" URLs that contain # `@` in the path. So it's better to just avoid it. SAFE_UID_CHARS = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '0123456789_.-+') _missing = object() def expand_path(p): p = os.path.expanduser(p) p = os.path.normpath(p) return p def split_dict(d, f): '''Puts key into first dict if f(key), otherwise in second dict''' a, b = split_sequence(d.items(), lambda item: f(item[0])) return dict(a), dict(b) def split_sequence(s, f): '''Puts item into first list if f(item), else in second list''' a = [] b = [] for item in s: if f(item): a.append(item) else: b.append(item) return a, b def uniq(s): '''Filter duplicates while preserving order. ``set`` can almost always be used instead of this, but preserving order might prove useful for debugging.''' d = set() for x in s: if x not in d: d.add(x) yield x def get_etag_from_file(f): '''Get etag from a filepath or file-like object. This function will flush/sync the file as much as necessary to obtain a correct value. ''' if hasattr(f, 'read'): f.flush() # Only this is necessary on Linux if sys.platform == 'win32': os.fsync(f.fileno()) # Apparently necessary on Windows stat = os.fstat(f.fileno()) else: stat = os.stat(f) mtime = getattr(stat, 'st_mtime_ns', None) if mtime is None: mtime = stat.st_mtime return '{:.9f};{}'.format(mtime, stat.st_ino) def get_storage_init_specs(cls, stop_at=object): if cls is stop_at: return () spec = getfullargspec(cls.__init__) traverse_superclass = getattr(cls.__init__, '_traverse_superclass', True) if traverse_superclass: if traverse_superclass is True: # noqa supercls = next(getattr(x.__init__, '__objclass__', x) for x in cls.__mro__[1:]) else: supercls = traverse_superclass superspecs = get_storage_init_specs(supercls, stop_at=stop_at) else: superspecs = () return (spec,) + superspecs def get_storage_init_args(cls, stop_at=object): ''' Get args which are taken during class initialization. Assumes that all classes' __init__ calls super().__init__ with the rest of the arguments. :param cls: The class to inspect. :returns: (all, required), where ``all`` is a set of all arguments the class can take, and ``required`` is the subset of arguments the class requires. ''' all, required = set(), set() for spec in get_storage_init_specs(cls, stop_at=stop_at): all.update(spec.args[1:]) last = -len(spec.defaults) if spec.defaults else len(spec.args) required.update(spec.args[1:last]) return all, required def checkdir(path, create=False, mode=0o750): ''' Check whether ``path`` is a directory. :param create: Whether to create the directory (and all parent directories) if it does not exist. :param mode: Mode to create missing directories with. ''' if not os.path.isdir(path): if os.path.exists(path): raise IOError('{} is not a directory.'.format(path)) if create: os.makedirs(path, mode) else: raise exceptions.CollectionNotFound('Directory {} does not exist.' .format(path)) def checkfile(path, create=False): ''' Check whether ``path`` is a file. :param create: Whether to create the file's parent directories if they do not exist. ''' checkdir(os.path.dirname(path), create=create) if not os.path.isfile(path): if os.path.exists(path): raise IOError('{} is not a file.'.format(path)) if create: with open(path, 'wb'): pass else: raise exceptions.CollectionNotFound('File {} does not exist.' .format(path)) class cached_property(object): '''A read-only @property that is only evaluated once. Only usable on class instances' methods. ''' def __init__(self, fget, doc=None): self.__name__ = fget.__name__ self.__module__ = fget.__module__ self.__doc__ = doc or fget.__doc__ self.fget = fget def __get__(self, obj, cls): if obj is None: # pragma: no cover return self obj.__dict__[self.__name__] = result = self.fget(obj) return result def href_safe(ident, safe=SAFE_UID_CHARS): return not bool(set(ident) - set(safe)) def generate_href(ident=None, safe=SAFE_UID_CHARS): ''' Generate a safe identifier, suitable for URLs, storage hrefs or UIDs. If the given ident string is safe, it will be returned, otherwise a random UUID. ''' if not ident or not href_safe(ident, safe): return str(uuid.uuid4()) else: return ident def synchronized(lock=None): if lock is None: from threading import Lock lock = Lock() def inner(f): @functools.wraps(f) def wrapper(*args, **kwargs): with lock: return f(*args, **kwargs) return wrapper return inner def open_graphical_browser(url, new=0, autoraise=True): '''Open a graphical web browser. This is basically like `webbrowser.open`, but without trying to launch CLI browsers at all. We're excluding those since it's undesirable to launch those when you're using vdirsyncer on a server. Rather copypaste the URL into the local browser, or use the URL-yanking features of your terminal emulator. ''' import webbrowser cli_names = set(['www-browser', 'links', 'links2', 'elinks', 'lynx', 'w3m']) for name in webbrowser._tryorder: if name in cli_names: continue browser = webbrowser.get(name) if browser.open(url, new, autoraise): return raise RuntimeError('No graphical browser found. Please open the URL ' 'manually.')