pax_global_header00006660000000000000000000000064135546121750014523gustar00rootroot0000000000000052 comment=acd1adbc77a9bfbfe1a25ddf4dfe7168487575a3 magic-wormhole-mailbox-server-0.4.1/000077500000000000000000000000001355461217500173745ustar00rootroot00000000000000magic-wormhole-mailbox-server-0.4.1/.coveragerc000066400000000000000000000013651355461217500215220ustar00rootroot00000000000000# -*- mode: conf -*- [run] # only record trace data for wormhole_mailbox_server.* source = wormhole_mailbox_server # and don't trace the test files themselves, or Versioneer's stuff omit = src/wormhole_mailbox_server/test/* src/wormhole_mailbox_server/_version.py # This allows 'coverage combine' to correlate the tracing data built while # running tests in multiple tox virtualenvs. To take advantage of this # properly, use "coverage erase" before tox, "coverage run --parallel-mode" # inside tox to avoid overwriting the output data (by writing it into # .coverage-XYZ instead of just .coverage), and run "coverage combine" # afterwards. [paths] source = src/ .tox/*/lib/python*/site-packages/ .tox/pypy*/site-packages/ magic-wormhole-mailbox-server-0.4.1/LICENSE000066400000000000000000000020701355461217500204000ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2015 Brian Warner Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. magic-wormhole-mailbox-server-0.4.1/MANIFEST.in000066400000000000000000000007121355461217500211320ustar00rootroot00000000000000include versioneer.py include src/wormhole_mailbox_server/_version.py include LICENSE README.md NEWS.md recursive-include docs *.md *.rst *.dot include docs/conf.py docs/Makefile include .coveragerc tox.ini snapcraft.yaml include misc/windows-build.cmd include misc/*.py include misc/munin/wormhole_active include misc/munin/wormhole_errors include misc/munin/wormhole_event_rate include misc/munin/wormhole_events include misc/munin/wormhole_events_alltime magic-wormhole-mailbox-server-0.4.1/NEWS.md000066400000000000000000000013441355461217500204740ustar00rootroot00000000000000 User-visible changes in "magic-wormhole-mailbox-server": ## Release 0.4.1 (11-Sep-2019) * listen on IPv4+IPv6 properly (#16) ## Release 0.4.0 (10-Sep-2019) * listen on IPv4+IPv6 socket by default (#16) * deallocate AppNamespace objects when empty (#12) * add client-version-uptake munin plugin * drop support for py3.3 and py3.4 ## Release 0.3.1 (23-Jun-2018) Record 'None' for when client doesn't supply a version, to make the math easier. ## Release 0.3.0 (23-Jun-2018) Fix munin plugins, record client versions in usageDB. ## Release 0.2.0 (16-Jun-2018) Improve install docs, clean up Munin plugins, add DB migration tool. ## Release 0.1.0 (19-Feb-2018) Initial release: Forked from magic-wormhole-0.10.5 (14-Feb-2018) magic-wormhole-mailbox-server-0.4.1/PKG-INFO000066400000000000000000000112431355461217500204720ustar00rootroot00000000000000Metadata-Version: 2.1 Name: magic-wormhole-mailbox-server Version: 0.4.1 Summary: Securely transfer data between computers Home-page: https://github.com/warner/magic-wormhole-mailbox-server Author: Brian Warner Author-email: warner-magic-wormhole@lothar.com License: MIT Description: # Magic Wormhole Mailbox Server [![PyPI](http://img.shields.io/pypi/v/magic-wormhole-mailbox-server.svg)](https://pypi.python.org/pypi/magic-wormhole-mailbox-server) [![Build Status](https://travis-ci.org/warner/magic-wormhole-mailbox-server.svg?branch=master)](https://travis-ci.org/warner/magic-wormhole-mailbox-server) [![Windows Build Status](https://ci.appveyor.com/api/projects/status/mfnn5rsyfnrq576a/branch/master?svg=true)](https://ci.appveyor.com/project/warner/magic-wormhole-mailbox-server) [![codecov.io](https://codecov.io/github/warner/magic-wormhole-mailbox-server/coverage.svg?branch=master)](https://codecov.io/github/warner/magic-wormhole-mailbox-server?branch=master) This repository holds the code for the main server that [Magic-Wormhole](http://magic-wormhole.io) clients connect to. The server performs store-and-forward delivery for small key-exchange and control messages. Bulk data is sent over a direct TCP connection, or through a [transit-relay](https://github.com/warner/magic-wormhole-transit-relay). Clients connect with WebSockets, for low-latency delivery in the happy case where both clients are attached at the same time. Message are stored to enable non-simultaneous clients to make forward progress. The server uses a small SQLite database for persistence (and clients will reconnect automatically, allowing the server to be rebooted without losing state). An optional "usage DB" tracks historical activity for status monitoring and operational maintenance. ## Installation ``` pip install magic-wormhole-mailbox-server ``` You either want to do this into a "user" environment (putting the ``twist`` and ``twistd`` executables in ``~/.local/bin/``) like this: ``` pip install --user magic-wormhole-mailbox-server ``` or put it into a virtualenv, to avoid modifying the system python's libraries, like this: ``` virtualenv venv source venv/bin/activate pip install magic-wormhole-mailbox-server ``` You probably *don't* want to use ``sudo`` when you run ``pip``, since the dependencies that get installed may conflict with other python programs on your computer. ``pipsi`` is usually a good way to install into isolated environments, but unfortunately it doesn't work for magic-wormhole-mailbox-server, because we don't have a dedicated command to start the server (``twist``, described below, comes from the ``twisted`` package, and pipsi doesn't expose executables from dependencies). For the installation from source, ``clone`` this repo, ``cd`` into the folder, create and activate a virtualenv, and run ``pip install .``. ## Running A Server Note that the standard [Magic-Wormhole](http://magic-wormhole.io) command-line tool is preconfigured to use a mailbox server hosted by the project, so running your own server is only necessary for custom applications that use magic-wormhole as a library. The mailbox server is deployed as a twist/twistd plugin. Running a basic server looks like this: ``` twist wormhole-mailbox --usage-db=usage.sqlite ``` Use ``twist wormhole-mailbox --help`` for more details. If you use the default ``--port=tcp:4000``, on a machine named ``example.com``, then clients can reach your server with the following option: ``` wormhole --relay-url=ws://example.com:4000/v1 send FILENAME ``` ## License, Compatibility This library is released under the MIT license, see LICENSE for details. This library is compatible with python2.7, and python3 (3.5 and higher). Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Environment :: Console Classifier: License :: OSI Approved :: MIT License Classifier: Topic :: Security :: Cryptography Classifier: Topic :: System :: Networking Classifier: Topic :: System :: Systems Administration Classifier: Topic :: Utilities Description-Content-Type: text/markdown Provides-Extra: dev magic-wormhole-mailbox-server-0.4.1/README.md000066400000000000000000000065721355461217500206650ustar00rootroot00000000000000# Magic Wormhole Mailbox Server [![PyPI](http://img.shields.io/pypi/v/magic-wormhole-mailbox-server.svg)](https://pypi.python.org/pypi/magic-wormhole-mailbox-server) [![Build Status](https://travis-ci.org/warner/magic-wormhole-mailbox-server.svg?branch=master)](https://travis-ci.org/warner/magic-wormhole-mailbox-server) [![Windows Build Status](https://ci.appveyor.com/api/projects/status/mfnn5rsyfnrq576a/branch/master?svg=true)](https://ci.appveyor.com/project/warner/magic-wormhole-mailbox-server) [![codecov.io](https://codecov.io/github/warner/magic-wormhole-mailbox-server/coverage.svg?branch=master)](https://codecov.io/github/warner/magic-wormhole-mailbox-server?branch=master) This repository holds the code for the main server that [Magic-Wormhole](http://magic-wormhole.io) clients connect to. The server performs store-and-forward delivery for small key-exchange and control messages. Bulk data is sent over a direct TCP connection, or through a [transit-relay](https://github.com/warner/magic-wormhole-transit-relay). Clients connect with WebSockets, for low-latency delivery in the happy case where both clients are attached at the same time. Message are stored to enable non-simultaneous clients to make forward progress. The server uses a small SQLite database for persistence (and clients will reconnect automatically, allowing the server to be rebooted without losing state). An optional "usage DB" tracks historical activity for status monitoring and operational maintenance. ## Installation ``` pip install magic-wormhole-mailbox-server ``` You either want to do this into a "user" environment (putting the ``twist`` and ``twistd`` executables in ``~/.local/bin/``) like this: ``` pip install --user magic-wormhole-mailbox-server ``` or put it into a virtualenv, to avoid modifying the system python's libraries, like this: ``` virtualenv venv source venv/bin/activate pip install magic-wormhole-mailbox-server ``` You probably *don't* want to use ``sudo`` when you run ``pip``, since the dependencies that get installed may conflict with other python programs on your computer. ``pipsi`` is usually a good way to install into isolated environments, but unfortunately it doesn't work for magic-wormhole-mailbox-server, because we don't have a dedicated command to start the server (``twist``, described below, comes from the ``twisted`` package, and pipsi doesn't expose executables from dependencies). For the installation from source, ``clone`` this repo, ``cd`` into the folder, create and activate a virtualenv, and run ``pip install .``. ## Running A Server Note that the standard [Magic-Wormhole](http://magic-wormhole.io) command-line tool is preconfigured to use a mailbox server hosted by the project, so running your own server is only necessary for custom applications that use magic-wormhole as a library. The mailbox server is deployed as a twist/twistd plugin. Running a basic server looks like this: ``` twist wormhole-mailbox --usage-db=usage.sqlite ``` Use ``twist wormhole-mailbox --help`` for more details. If you use the default ``--port=tcp:4000``, on a machine named ``example.com``, then clients can reach your server with the following option: ``` wormhole --relay-url=ws://example.com:4000/v1 send FILENAME ``` ## License, Compatibility This library is released under the MIT license, see LICENSE for details. This library is compatible with python2.7, and python3 (3.5 and higher). magic-wormhole-mailbox-server-0.4.1/docs/000077500000000000000000000000001355461217500203245ustar00rootroot00000000000000magic-wormhole-mailbox-server-0.4.1/docs/Makefile000066400000000000000000000011631355461217500217650ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = Magic-Wormhole-Mailbox-Server SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) magic-wormhole-mailbox-server-0.4.1/docs/conf.py000066400000000000000000000132771355461217500216350ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals # Magic-Wormhole documentation build configuration file, created by # sphinx-quickstart on Sun Nov 12 10:24:09 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) from recommonmark.parser import CommonMarkParser source_parsers = { ".md": CommonMarkParser, } # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] #source_suffix = '.md' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Magic-Wormhole-Mailbox-Server' copyright = u'2018, Brian Warner' author = u'Brian Warner' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # def _get_versions(): import os.path, sys, subprocess here = os.path.dirname(os.path.abspath(__file__)) parent = os.path.dirname(here) v = subprocess.check_output([sys.executable, "setup.py", "--version"], cwd=parent) v = v.decode("ascii") short = ".".join(v.split(".")[:2]) long = v return short, long version, release = _get_versions() # The short X.Y version. #version = u'0.10' # The full version, including alpha/beta/rc tags. #release = u'0.10.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { '**': [ 'relations.html', # needs 'show_related': True theme option to display 'searchbox.html', ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'Magic-Wormhole-Mailbox-Serverdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Magic-Wormhole-Mailbox-Server.tex', u'Magic-Wormhole-Mailbox-Server Documentation', u'Brian Warner', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'magic-wormhole-mailbox-server', u'Magic-Wormhole-Mailbox-Server Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Magic-Wormhole-Mailbox-Server', u'Magic-Wormhole-Mailbox-Server Documentation', author, 'Magic-Wormhole-Mailbox-Server', 'One line description of project.', 'Miscellaneous'), ] magic-wormhole-mailbox-server-0.4.1/docs/index.rst000066400000000000000000000010441355461217500221640ustar00rootroot00000000000000.. Magic-Wormhole-Mailbox-Server documentation master file, created by sphinx-quickstart on Sun Nov 12 10:24:09 2017. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Magic-Wormhole-Mailbox-Server: backend server for magic-wormhole ================================================================ .. toctree:: :maxdepth: 2 :caption: Contents: welcome server-protocol Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` magic-wormhole-mailbox-server-0.4.1/docs/server-protocol.md000066400000000000000000000277571355461217500240350ustar00rootroot00000000000000# Rendezvous Server Protocol ## Concepts The Rendezvous Server provides queued delivery of binary messages from one client to a second, and vice versa. Each message contains a "phase" (a string) and a body (bytestring). These messages are queued in a "Mailbox" until the other side connects and retrieves them, but are delivered immediately if both sides are connected to the server at the same time. Mailboxes are identified by a large random string. "Nameplates", in contrast, have short numeric identities: in a wormhole code like "4-purple-sausages", the "4" is the nameplate. Each client has a randomly-generated "side", a short hex string, used to differentiate between echoes of a client's own message, and real messages from the other client. ## Application IDs The server isolates each application from the others. Each client provides an "App Id" when it first connects (via the "BIND" message), and all subsequent commands are scoped to this application. This means that nameplates (described below) and mailboxes can be re-used between different apps. The AppID is a unicode string. Both sides of the wormhole must use the same AppID, of course, or they'll never see each other. The server keeps track of which applications are in use for maintenance purposes. Each application should use a unique AppID. Developers are encouraged to use "DNSNAME/APPNAME" to obtain a unique one: e.g. the `bin/wormhole` file-transfer tool uses `lothar.com/wormhole/text-or-file-xfer`. ## WebSocket Transport At the lowest level, each client establishes (and maintains) a WebSocket connection to the Rendezvous Server. If the connection is lost (which could happen because the server was rebooted for maintenance, or because the client's network connection migrated from one network to another, or because the resident network gremlins decided to mess with you today), clients should reconnect after waiting a random (and exponentially-growing) delay. The Python implementation waits about 1 second after the first connection loss, growing by 50% each time, capped at 1 minute. Each message to the server is a dictionary, with at least a `type` key, and other keys that depend upon the particular message type. Messages from server to client follow the same format. `misc/dump-timing.py` is a debug tool which renders timing data gathered from the server and both clients, to identify protocol slowdowns and guide optimization efforts. To support this, the client/server messages include additional keys. Client->Server messages include a random `id` key, which is copied into the `ack` that is immediately sent back to the client for all commands (logged for the timing tool but otherwise ignored). Some client->server messages (`list`, `allocate`, `claim`, `release`, `close`, `ping`) provoke a direct response by the server: for these, `id` is copied into the response. This helps the tool correlate the command and response. All server->client messages have a `server_tx` timestamp (seconds since epoch, as a float), which records when the message left the server. Direct responses include a `server_rx` timestamp, to record when the client's command was received. The tool combines these with local timestamps (recorded by the client and not shared with the server) to build a full picture of network delays and round-trip times. All messages are serialized as JSON, encoded to UTF-8, and the resulting bytes sent as a single "binary-mode" WebSocket payload. Servers can signal `error` for any message type it does not recognize. Clients and Servers must ignore unrecognized keys in otherwise-recognized messages. Clients must ignore unrecognized message types from the Server. ## Connection-Specific (Client-to-Server) Messages The first thing each client sends to the server, immediately after the WebSocket connection is established, is a `bind` message. This specifies the AppID and side (in keys `appid` and `side`, respectively) that all subsequent messages will be scoped to. While technically each message could be independent (with its own `appid` and `side`), I thought it would be less confusing to use exactly one WebSocket per logical wormhole connection. The first thing the server sends to each client is the `welcome` message. This is intended to deliver important status information to the client that might influence its operation. The Python client currently reacts to the following keys (and ignores all others): * `current_cli_version`: prompts the user to upgrade if the server's advertised version is greater than the client's version (as derived from the git tag) * `motd`: prints this message, if present; intended to inform users about performance problems, scheduled downtime, or to beg for donations to keep the server running * `error`: causes the client to print the message and then terminate. If a future version of the protocol requires a rate-limiting CAPTCHA ticket or other authorization record, the server can send `error` (explaining the requirement) if it does not see this ticket arrive before the `bind`. A `ping` will provoke a `pong`: these are only used by unit tests for synchronization purposes (to detect when a batch of messages have been fully processed by the server). NAT-binding refresh messages are handled by the WebSocket layer (by asking Autobahn to send a keepalive messages every 60 seconds), and do not use `ping`. If any client->server command is invalid (e.g. it lacks a necessary key, or was sent in the wrong order), an `error` response will be sent, This response will include the error string in the `error` key, and a full copy of the original message dictionary in `orig`. ## Nameplates Wormhole codes look like `4-purple-sausages`, consisting of a number followed by some random words. This number is called a "Nameplate". On the Rendezvous Server, the Nameplate contains a pointer to a Mailbox. Clients can "claim" a nameplate, and then later "release" it. Each claim is for a specific side (so one client claiming the same nameplate multiple times only counts as one claim). Nameplates are deleted once the last client has released it, or after some period of inactivity. Clients can either make up nameplates themselves, or (more commonly) ask the server to allocate one for them. Allocating a nameplate automatically claims it (to avoid a race condition), but for simplicity, clients send a claim for all nameplates, even ones which they've allocated themselves. Nameplates (on the server) must live until the second client has learned about the associated mailbox, after which point they can be reused by other clients. So if two clients connect quickly, but then maintain a long-lived wormhole connection, the do not need to consume the limited space of short nameplates for that whole time. The `allocate` command allocates a nameplate (the server returns one that is as short as possible), and the `allocated` response provides the answer. Clients can also send a `list` command to get back a `nameplates` response with all allocated nameplates for the bound AppID: this helps the code-input tab-completion feature know which prefixes to offer. The `nameplates` response returns a list of dictionaries, one per claimed nameplate, with at least an `id` key in each one (with the nameplate string). Future versions may record additional attributes in the nameplate records, specifically a wordlist identifier and a code length (again to help with code-completion on the receiver). ## Mailboxes The server provides a single "Mailbox" to each pair of connecting Wormhole clients. This holds an unordered set of messages, delivered immediately to connected clients, and queued for delivery to clients which connect later. Messages from both clients are merged together: clients use the included `side` identifier to distinguish echoes of their own messages from those coming from the other client. Each mailbox is "opened" by some number of clients at a time, until all clients have closed it. Mailboxes are kept alive by either an open client, or a Nameplate which points to the mailbox (so when a Nameplate is deleted from inactivity, the corresponding Mailbox will be too). The `open` command both marks the mailbox as being opened by the bound side, and also adds the WebSocket as subscribed to that mailbox, so new messages are delivered immediately to the connected client. There is no explicit ack to the `open` command, but since all clients add a message to the mailbox as soon as they connect, there will always be a `message` reponse shortly after the `open` goes through. The `close` command provokes a `closed` response. The `close` command accepts an optional "mood" string: this allows clients to tell the server (in general terms) about their experiences with the wormhole interaction. The server records the mood in its "usage" record, so the server operator can get a sense of how many connections are succeeding and failing. The moods currently recognized by the Rendezvous Server are: * `happy` (default): the PAKE key-establishment worked, and the client saw at least one valid encrypted message from its peer * `lonely`: the client gave up without hearing anything from its peer * `scary`: the client saw an invalid encrypted message from its peer, indicating that either the wormhole code was typed in wrong, or an attacker tried (and failed) to guess the code * `errory`: the client encountered some other error: protocol problem or internal error The server will also record `pruney` if it deleted the mailbox due to inactivity, or `crowded` if more than two sides tried to access the mailbox. When clients use the `add` command to add a client-to-client message, they will put the body (a bytestring) into the command as a hex-encoded string in the `body` key. They will also put the message's "phase", as a string, into the `phase` key. See client-protocol.md for details about how different phases are used. When a client sends `open`, it will get back a `message` response for every message in the mailbox. It will also get a real-time `message` for every `add` performed by clients later. These `message` responses include "side" and "phase" from the sending client, and "body" (as a hex string, encoding the binary message body). The decoded "body" will either by a random-looking cryptographic value (for the PAKE message), or a random-looking encrypted blob (for the VERSION message, as well as all application-provided payloads). The `message` response will also include `id`, copied from the `id` of the `add` message (and used only by the timing-diagram tool). The Rendezvous Server does not de-duplicate messages, nor does it retain ordering: clients must do both if they need to. ## All Message Types This lists all message types, along with the type-specific keys for each (if any), and which ones provoke direct responses: * S->C welcome {welcome:} * (C->S) bind {appid:, side:} * (C->S) list {} -> nameplates * S->C nameplates {nameplates: [{id: str},..]} * (C->S) allocate {} -> allocated * S->C allocated {nameplate:} * (C->S) claim {nameplate:} -> claimed * S->C claimed {mailbox:} * (C->S) release {nameplate:?} -> released * S->C released * (C->S) open {mailbox:} * (C->S) add {phase: str, body: hex} -> message (to all connected clients) * S->C message {side:, phase:, body:, id:} * (C->S) close {mailbox:?, mood:?} -> closed * S->C closed * S->C ack * (C->S) ping {ping: int} -> ping * S->C pong {pong: int} * S->C error {error: str, orig:} ## Persistence The server stores all messages in a database, so it should not lose any information when it is restarted. The server will not send a direct response until any side-effects (such as the message being added to the mailbox) have been safely committed to the database. The client library knows how to resume the protocol after a reconnection event, assuming the client process itself continues to run. Clients which terminate entirely between messages (e.g. a secure chat application, which requires multiple wormhole messages to exchange address-book entries, and which must function even if the two apps are never both running at the same time) can use "Journal Mode" to ensure forward progress is made: see "journal.md" for details. magic-wormhole-mailbox-server-0.4.1/docs/welcome.md000066400000000000000000000042721355461217500223060ustar00rootroot00000000000000# Magic Wormhole Mailbox Server [![Build Status](https://travis-ci.org/warner/magic-wormhole-mailbox-server.svg?branch=master)](https://travis-ci.org/warner/magic-wormhole-mailbox-server) [![Windows Build Status](https://ci.appveyor.com/api/projects/status/mfnn5rsyfnrq576a/branch/master?svg=true)](https://ci.appveyor.com/project/warner/magic-wormhole-mailbox-server) [![codecov.io](https://codecov.io/github/warner/magic-wormhole-mailbox-server/coverage.svg?branch=master)](https://codecov.io/github/warner/magic-wormhole-mailbox-server?branch=master) This repository holds the code for the main server that [Magic-Wormhole](http://magic-wormhole.io) clients connect to. The server performs store-and-forward delivery for small key-exchange and control messages. Bulk data is sent over a direct TCP connection, or through a [transit-relay](https://github.com/warner/magit-wormhole-transit-relay). Clients connect with WebSockets, for low-latency delivery in the happy case where both clients are attached at the same time. Message are stored in to enable non-simultaneous clients to make forward progress. The server uses a small SQLite database for persistence (and clients will reconnect automatically, allowing the server to be rebooted without losing state). An optional "usage DB" tracks historical activity for status monitoring and operational maintenance. ## Running A Server Note that the standard [Magic-Wormhole](http://magic-wormhole.io) command-line tool is preconfigured to use a mailbox server hosted by the project, so running your own server is only necessary for custom applications that use magic-wormhole as a library. The mailbox server is deployed as a twist/twistd plugin. Running a basic server looks like this: ``` twist wormhole-mailbox --usage-db=usage.sqlite ``` Use ``twist wormhole-mailbox --help`` for more details. If you use the default ``--port=tcp:4000``, on a machine named ``example.com``, then clients can reach your server with the following option: ``` wormhole --relay-url=ws://example.com:4000/v1 send FILENAME ``` ## License, Compatibility This library is released under the MIT license, see LICENSE for details. This library is compatible with python2.7, 3.4, 3.5, and 3.6 . magic-wormhole-mailbox-server-0.4.1/misc/000077500000000000000000000000001355461217500203275ustar00rootroot00000000000000magic-wormhole-mailbox-server-0.4.1/misc/migrate_channel_db.py000066400000000000000000000067401355461217500244750ustar00rootroot00000000000000"""Migrate the channel data from the old bundled Mailbox Server database. The magic-wormhole package used to include both servers (Rendezvous and Transit). "wormhole server" started both of these, and used the "relay.sqlite" database to store both immediate server state and long-term usage data. These were split out to their own packages: version 0.11 omitted the Transit Relay, and 0.12 removed the Mailbox Server in favor of the new "magic-wormhole-mailbox-server" distribution. This script reads the short-term channel data from the pre-0.12 wormhole-server relay.sqlite, and copies it into a new "relay.sqlite" database in the current directory. It will refuse to touch an existing "relay.sqlite" file. The resuting "relay.sqlite" should be passed into --channel-db=, e.g. "twist wormhole-mailbox --channel-db=.../PATH/TO/relay.sqlite". However in most cases you can just store it in the default location of "./relay.sqlite" and omit the --channel-db= argument. Note that an idle server will have no channel data, so you could instead just wait for the server to be empty (sqlite3 relay.sqlite message |grep INSERT). """ from __future__ import unicode_literals, print_function import sys from wormhole_mailbox_server.database import (open_existing_db, create_channel_db) source_fn = sys.argv[1] source_db = open_existing_db(source_fn) target_db = create_channel_db("relay.sqlite") num_rows = 0 for row in source_db.execute("SELECT * FROM `mailboxes`").fetchall(): target_db.execute("INSERT INTO `mailboxes`" " (`app_id`, `id`, `updated`, `for_nameplate`)" " VALUES(?,?,?,?)", (row["app_id"], row["id"], row["updated"], row["for_nameplate"])) num_rows += 1 for row in source_db.execute("SELECT * FROM `mailbox_sides`").fetchall(): target_db.execute("INSERT INTO `mailbox_sides`" " (`mailbox_id`, `opened`, `side`, `added`, `mood`)" " VALUES(?,?,?,?,?)", (row["mailbox_id"], row["opened"], row["side"], row["added"], row["mood"])) num_rows += 1 for row in source_db.execute("SELECT * FROM `nameplates`").fetchall(): target_db.execute("INSERT INTO `nameplates`" " (`id`, `app_id`, `name`, `mailbox_id`, `request_id`)" " VALUES(?,?,?,?,?)", (row["id"], row["app_id"], row["name"], row["mailbox_id"], row["request_id"])) num_rows += 1 for row in source_db.execute("SELECT * FROM `nameplate_sides`").fetchall(): target_db.execute("INSERT INTO `nameplate_sides`" " (`nameplates_id`, `claimed`, `side`, `added`)" " VALUES(?,?,?,?)", (row["nameplates_id"], row["claimed"], row["side"], row["added"])) num_rows += 1 for row in source_db.execute("SELECT * FROM `messages`").fetchall(): target_db.execute("INSERT INTO `messages`" " (`app_id`, `mailbox_id`, `side`, `phase`, `body`, " " `server_rx`, `msg_id`)" " VALUES(?,?,?,?,?,?,?)", (row["app_id"], row["mailbox_id"], row["side"], row["phase"], row["body"], row["server_rx"], row["msg_id"])) num_rows += 1 target_db.commit() print("channel database migrated (%d rows) into 'relay.sqlite'" % num_rows) sys.exit(0) magic-wormhole-mailbox-server-0.4.1/misc/migrate_usage_db.py000066400000000000000000000050171355461217500241650ustar00rootroot00000000000000"""Migrate the usage data from the old bundled Mailbox Server database. The magic-wormhole package used to include both servers (Rendezvous and Transit). "wormhole server" started both of these, and used the "relay.sqlite" database to store both immediate server state and long-term usage data. These were split out to their own packages: version 0.11 omitted the Transit Relay, and 0.12 removed the Mailbox Server in favor of the new "magic-wormhole-mailbox-server" distribution. This script reads the long-term usage data from the pre-0.12 wormhole-server relay.sqlite, and copies it into a new "usage.sqlite" database in the current directory. It will refuse to touch an existing "usage.sqlite" file. The resuting "usage.sqlite" should be passed into --usage-db=, e.g. "twist wormhole-mailbox --usage-db=.../PATH/TO/usage.sqlite". """ from __future__ import unicode_literals, print_function import sys from wormhole_mailbox_server.database import open_existing_db, create_usage_db source_fn = sys.argv[1] source_db = open_existing_db(source_fn) target_db = create_usage_db("usage.sqlite") num_nameplate_rows = 0 for row in source_db.execute("SELECT * FROM `nameplate_usage`" " ORDER BY `started`").fetchall(): target_db.execute("INSERT INTO `nameplates`" " (`app_id`, `started`, `waiting_time`," " `total_time`, `result`)" " VALUES(?,?,?,?,?)", (row["app_id"], row["started"], row["waiting_time"], row["total_time"], row["result"])) num_nameplate_rows += 1 num_mailbox_rows = 0 for row in source_db.execute("SELECT * FROM `mailbox_usage`" " ORDER BY `started`").fetchall(): target_db.execute("INSERT INTO `mailboxes`" " (`app_id`, `for_nameplate`," " `started`, `total_time`, `waiting_time`," " `result`)" " VALUES(?,?,?,?,?,?)", (row["app_id"], row["for_nameplate"], row["started"], row["total_time"], row["waiting_time"], row["result"])) num_mailbox_rows += 1 target_db.execute("INSERT INTO `current`" " (`rebooted`, `updated`, `blur_time`," " `connections_websocket`)" " VALUES(?,?,?,?)", (0, 0, 0, 0)) target_db.commit() print("usage database migrated (%d+%d rows) into 'usage.sqlite'" % (num_nameplate_rows, num_mailbox_rows)) sys.exit(0) magic-wormhole-mailbox-server-0.4.1/misc/munin/000077500000000000000000000000001355461217500214555ustar00rootroot00000000000000magic-wormhole-mailbox-server-0.4.1/misc/munin/wormhole_active000077500000000000000000000026471355461217500246030ustar00rootroot00000000000000#! /usr/bin/env python """ Use the following in /etc/munin/plugin-conf.d/wormhole : [wormhole_*] env.channeldb /path/to/your/wormhole/server/channel.sqlite env.usagedb /path/to/your/wormhole/server/usage.sqlite """ from __future__ import print_function import os, sys, time, sqlite3 CONFIG = """\ graph_title Magic-Wormhole Active Channels graph_vlabel Channels graph_category wormhole nameplates.label Nameplates nameplates.draw LINE2 nameplates.type GAUGE mailboxes.label Mailboxes mailboxes.draw LINE2 mailboxes.type GAUGE messages.label Messages messages.draw LINE1 messages.type GAUGE """ if len(sys.argv) > 1 and sys.argv[1] == "config": print(CONFIG.rstrip()) sys.exit(0) usagedbfile = os.environ["usagedb"] assert os.path.exists(usagedbfile) usage_db = sqlite3.connect(usagedbfile) channeldbfile = os.environ["channeldb"] assert os.path.exists(channeldbfile) channel_db = sqlite3.connect(channeldbfile) MINUTE = 60.0 updated,rebooted = usage_db.execute("SELECT `updated`,`rebooted` FROM `current`").fetchone() if time.time() > updated + 6*MINUTE: sys.exit(1) # expired nameplates = channel_db.execute("SELECT COUNT() FROM `nameplates`").fetchone()[0] mailboxes = channel_db.execute("SELECT COUNT() FROM `mailboxes`").fetchone()[0] messages = channel_db.execute("SELECT COUNT() FROM `messages`").fetchone()[0] print("nameplates.value", nameplates) print("mailboxes.value", mailboxes) print("messages.value", messages) magic-wormhole-mailbox-server-0.4.1/misc/munin/wormhole_errors000077500000000000000000000035411355461217500246360ustar00rootroot00000000000000#! /usr/bin/env python """ Use the following in /etc/munin/plugin-conf.d/wormhole : [wormhole_*] env.usagedb /path/to/your/wormhole/server/usage.sqlite """ from __future__ import print_function import os, sys, time, sqlite3 CONFIG = """\ graph_title Magic-Wormhole Server Errors graph_vlabel Events Since Reboot graph_category wormhole nameplates.label Nameplate Errors (total) nameplates.draw LINE1 nameplates.type GAUGE mailboxes.label Mailboxes (total) mailboxes.draw LINE1 mailboxes.type GAUGE mailboxes_scary.label Mailboxes (scary) mailboxes_scary.draw LINE1 mailboxes_scary.type GAUGE """ if len(sys.argv) > 1 and sys.argv[1] == "config": print(CONFIG.rstrip()) sys.exit(0) usagedbfile = os.environ["usagedb"] assert os.path.exists(usagedbfile) usage_db = sqlite3.connect(usagedbfile) MINUTE = 60.0 updated,rebooted = usage_db.execute("SELECT `updated`,`rebooted` FROM `current`").fetchone() if time.time() > updated + 6*MINUTE: sys.exit(1) # expired r1 = usage_db.execute("SELECT COUNT() FROM `nameplates` WHERE `started` >= ?", (rebooted,)).fetchone()[0] r2 = usage_db.execute("SELECT COUNT() FROM `nameplates`" " WHERE `started` >= ?" " AND `result` = 'happy'", (rebooted,)).fetchone()[0] print("nameplates.value", (r1 - r2)) r1 = usage_db.execute("SELECT COUNT() FROM `mailboxes` WHERE `started` >= ?", (rebooted,)).fetchone()[0] r2 = usage_db.execute("SELECT COUNT() FROM `mailboxes` WHERE `started` >= ?" " AND `result` = 'happy'", (rebooted,)).fetchone()[0] print("mailboxes.value", (r1 - r2)) r = usage_db.execute("SELECT COUNT() FROM `mailboxes` WHERE `started` >= ?" " AND `result` = 'scary'", (rebooted,)).fetchone()[0] print("mailboxes_scary.value", r) magic-wormhole-mailbox-server-0.4.1/misc/munin/wormhole_event_rate000077500000000000000000000026751355461217500254650ustar00rootroot00000000000000#! /usr/bin/env python """ Use the following in /etc/munin/plugin-conf.d/wormhole : [wormhole_*] env.usagedb /path/to/your/wormhole/server/usage.sqlite """ from __future__ import print_function import os, sys, time, sqlite3 from collections import defaultdict CONFIG = """\ graph_title Magic-Wormhole Server Events graph_vlabel Events per Hour graph_category wormhole happy.label Happy happy.draw LINE happy.type DERIVE happy.min 0 happy.max 60 happy.cdef happy,3600,* incomplete.label Incomplete incomplete.draw LINE incomplete.type DERIVE incomplete.min 0 incomplete.max 60 incomplete.cdef incomplete,3600,* scary.label Scary scary.draw LINE scary.type DERIVE scary.min 0 scary.max 60 scary.cdef scary,3600,* """ if len(sys.argv) > 1 and sys.argv[1] == "config": print(CONFIG.rstrip()) sys.exit(0) usagedbfile = os.environ["usagedb"] assert os.path.exists(usagedbfile) usage_db = sqlite3.connect(usagedbfile) MINUTE = 60.0 updated,rebooted = usage_db.execute("SELECT `updated`,`rebooted` FROM `current`").fetchone() if time.time() > updated + 6*MINUTE: sys.exit(1) # expired atm = defaultdict(int) for mood in ["happy", "scary", "lonely", "errory", "pruney", "crowded"]: atm[mood] = usage_db.execute("SELECT COUNT() FROM `mailboxes`" " WHERE `result` = ?", (mood,)).fetchone()[0] print("happy.value", atm["happy"]) print("incomplete.value", (atm["pruney"] + atm["lonely"])) print("scary.value", atm["scary"]) magic-wormhole-mailbox-server-0.4.1/misc/munin/wormhole_events000077500000000000000000000037651355461217500246360ustar00rootroot00000000000000#! /usr/bin/env python """ Use the following in /etc/munin/plugin-conf.d/wormhole : [wormhole_*] env.usagedb /path/to/your/wormhole/server/usage.sqlite """ from __future__ import print_function import os, sys, time, sqlite3 CONFIG = """\ graph_title Magic-Wormhole Mailbox Events (since reboot) graph_vlabel Events Since Reboot graph_category wormhole happy.label Happy happy.draw LINE2 happy.type GAUGE total.label Total total.draw LINE1 total.type GAUGE scary.label Scary scary.draw LINE2 scary.type GAUGE pruney.label Pruney pruney.draw LINE1 pruney.type GAUGE lonely.label Lonely lonely.draw LINE2 lonely.type GAUGE errory.label Errory errory.draw LINE1 errory.type GAUGE """ if len(sys.argv) > 1 and sys.argv[1] == "config": print(CONFIG.rstrip()) sys.exit(0) usagedbfile = os.environ["usagedb"] assert os.path.exists(usagedbfile) usage_db = sqlite3.connect(usagedbfile) MINUTE = 60.0 updated,rebooted,blur = usage_db.execute( "SELECT `updated`,`rebooted`,`blur_time` FROM `current`").fetchone() if time.time() > updated + 6*MINUTE: sys.exit(1) # expired if blur is not None: rebooted = blur * (rebooted // blur) # After a reboot, the operator will get to see events that happen during # the first blur window (without this adjustment, those events would be # hidden since they'd appear to start before the reboot). The downside is # that the counter won't drop down to zero at a reboot (if there are recent # events). #r = usage_db.execute("SELECT COUNT(`mood`) FROM `mailboxes` WHERE `started` > ?", # (rebooted,)).fetchone() for mood in ["happy", "scary", "lonely", "errory", "pruney", "crowded"]: r = usage_db.execute("SELECT COUNT() FROM `mailboxes` WHERE `started` >= ?" " AND `result` = ?", (rebooted, mood)).fetchone()[0] print("%s.value" % mood, r) r = usage_db.execute("SELECT COUNT() FROM `mailboxes` WHERE `started` >= ?", (rebooted,)).fetchone()[0] print("total.value", r) magic-wormhole-mailbox-server-0.4.1/misc/munin/wormhole_events_alltime000077500000000000000000000025541355461217500263400ustar00rootroot00000000000000#! /usr/bin/env python """ Use the following in /etc/munin/plugin-conf.d/wormhole : [wormhole_*] env.usagedb /path/to/your/wormhole/server/usage.sqlite """ from __future__ import print_function import os, sys, time, sqlite3 CONFIG = """\ graph_title Magic-Wormhole Mailbox Events (all time) graph_vlabel Events Since DB Creation graph_category wormhole happy.label Happy happy.draw LINE2 happy.type GAUGE total.label Total total.draw LINE1 total.type GAUGE scary.label Scary scary.draw LINE2 scary.type GAUGE pruney.label Pruney pruney.draw LINE1 pruney.type GAUGE lonely.label Lonely lonely.draw LINE2 lonely.type GAUGE errory.label Errory errory.draw LINE1 errory.type GAUGE """ if len(sys.argv) > 1 and sys.argv[1] == "config": print(CONFIG.rstrip()) sys.exit(0) usagedbfile = os.environ["usagedb"] assert os.path.exists(usagedbfile) usage_db = sqlite3.connect(usagedbfile) MINUTE = 60.0 updated,rebooted = usage_db.execute("SELECT `updated`,`rebooted` FROM `current`").fetchone() if time.time() > updated + 6*MINUTE: sys.exit(1) # expired for mood in ["happy", "scary", "lonely", "errory", "pruney", "crowded"]: r = usage_db.execute("SELECT COUNT() FROM `mailboxes` WHERE `result` = ?", (mood,)).fetchone()[0] print("%s.value" % mood, r) r = usage_db.execute("SELECT COUNT() FROM `mailboxes`").fetchone()[0] print("total.value", r) magic-wormhole-mailbox-server-0.4.1/misc/windows-build.cmd000066400000000000000000000015061355461217500236050ustar00rootroot00000000000000@echo off :: To build extensions for 64 bit Python 3, we need to configure environment :: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of: :: MS Windows SDK for Windows 7 and .NET Framework 4 :: :: More details at: :: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows IF "%DISTUTILS_USE_SDK%"=="1" ( ECHO Configuring environment to build with MSVC on a 64bit architecture ECHO Using Windows SDK 7.1 "C:\Program Files\Microsoft SDKs\Windows\v7.1\Setup\WindowsSdkVer.exe" -q -version:v7.1 CALL "C:\Program Files\Microsoft SDKs\Windows\v7.1\Bin\SetEnv.cmd" /x64 /release SET MSSdk=1 REM Need the following to allow tox to see the SDK compiler SET TOX_TESTENV_PASSENV=DISTUTILS_USE_SDK MSSdk INCLUDE LIB ) ELSE ( ECHO Using default MSVC build environment ) CALL %* magic-wormhole-mailbox-server-0.4.1/setup.cfg000066400000000000000000000004111355461217500212110ustar00rootroot00000000000000[wheel] universal = 1 [versioneer] vcs = git versionfile_source = src/wormhole_mailbox_server/_version.py versionfile_build = wormhole_mailbox_server/_version.py tag_prefix = parentdir_prefix = magic-wormhole-mailbox-server [egg_info] tag_build = tag_date = 0 magic-wormhole-mailbox-server-0.4.1/setup.py000066400000000000000000000027361355461217500211160ustar00rootroot00000000000000from setuptools import setup import versioneer commands = versioneer.get_cmdclass() trove_classifiers = [ "Development Status :: 4 - Beta", "Environment :: Console", "License :: OSI Approved :: MIT License", "Topic :: Security :: Cryptography", "Topic :: System :: Networking", "Topic :: System :: Systems Administration", "Topic :: Utilities", ] setup(name="magic-wormhole-mailbox-server", version=versioneer.get_version(), description="Securely transfer data between computers", long_description=open('README.md', 'rU').read(), long_description_content_type='text/markdown', author="Brian Warner", author_email="warner-magic-wormhole@lothar.com", license="MIT", url="https://github.com/warner/magic-wormhole-mailbox-server", classifiers=trove_classifiers, package_dir={"": "src"}, packages=["wormhole_mailbox_server", "wormhole_mailbox_server.test", "twisted.plugins", ], package_data={"wormhole_mailbox_server": ["db-schemas/*.sql"]}, install_requires=[ "six", "attrs >= 16.3.0", # 16.3.0 adds __attrs_post_init__ "twisted[tls] >= 17.5.0", "autobahn[twisted] >= 0.14.1", ], extras_require={ ':sys_platform=="win32"': ["pywin32"], "dev": ["mock", "treq", "tox", "pyflakes"], }, test_suite="wormhole_mailbox_server.test", cmdclass=commands, ) magic-wormhole-mailbox-server-0.4.1/src/000077500000000000000000000000001355461217500201635ustar00rootroot00000000000000magic-wormhole-mailbox-server-0.4.1/src/magic_wormhole_mailbox_server.egg-info/000077500000000000000000000000001355461217500277525ustar00rootroot00000000000000magic-wormhole-mailbox-server-0.4.1/src/magic_wormhole_mailbox_server.egg-info/PKG-INFO000066400000000000000000000112431355461217500310500ustar00rootroot00000000000000Metadata-Version: 2.1 Name: magic-wormhole-mailbox-server Version: 0.4.1 Summary: Securely transfer data between computers Home-page: https://github.com/warner/magic-wormhole-mailbox-server Author: Brian Warner Author-email: warner-magic-wormhole@lothar.com License: MIT Description: # Magic Wormhole Mailbox Server [![PyPI](http://img.shields.io/pypi/v/magic-wormhole-mailbox-server.svg)](https://pypi.python.org/pypi/magic-wormhole-mailbox-server) [![Build Status](https://travis-ci.org/warner/magic-wormhole-mailbox-server.svg?branch=master)](https://travis-ci.org/warner/magic-wormhole-mailbox-server) [![Windows Build Status](https://ci.appveyor.com/api/projects/status/mfnn5rsyfnrq576a/branch/master?svg=true)](https://ci.appveyor.com/project/warner/magic-wormhole-mailbox-server) [![codecov.io](https://codecov.io/github/warner/magic-wormhole-mailbox-server/coverage.svg?branch=master)](https://codecov.io/github/warner/magic-wormhole-mailbox-server?branch=master) This repository holds the code for the main server that [Magic-Wormhole](http://magic-wormhole.io) clients connect to. The server performs store-and-forward delivery for small key-exchange and control messages. Bulk data is sent over a direct TCP connection, or through a [transit-relay](https://github.com/warner/magic-wormhole-transit-relay). Clients connect with WebSockets, for low-latency delivery in the happy case where both clients are attached at the same time. Message are stored to enable non-simultaneous clients to make forward progress. The server uses a small SQLite database for persistence (and clients will reconnect automatically, allowing the server to be rebooted without losing state). An optional "usage DB" tracks historical activity for status monitoring and operational maintenance. ## Installation ``` pip install magic-wormhole-mailbox-server ``` You either want to do this into a "user" environment (putting the ``twist`` and ``twistd`` executables in ``~/.local/bin/``) like this: ``` pip install --user magic-wormhole-mailbox-server ``` or put it into a virtualenv, to avoid modifying the system python's libraries, like this: ``` virtualenv venv source venv/bin/activate pip install magic-wormhole-mailbox-server ``` You probably *don't* want to use ``sudo`` when you run ``pip``, since the dependencies that get installed may conflict with other python programs on your computer. ``pipsi`` is usually a good way to install into isolated environments, but unfortunately it doesn't work for magic-wormhole-mailbox-server, because we don't have a dedicated command to start the server (``twist``, described below, comes from the ``twisted`` package, and pipsi doesn't expose executables from dependencies). For the installation from source, ``clone`` this repo, ``cd`` into the folder, create and activate a virtualenv, and run ``pip install .``. ## Running A Server Note that the standard [Magic-Wormhole](http://magic-wormhole.io) command-line tool is preconfigured to use a mailbox server hosted by the project, so running your own server is only necessary for custom applications that use magic-wormhole as a library. The mailbox server is deployed as a twist/twistd plugin. Running a basic server looks like this: ``` twist wormhole-mailbox --usage-db=usage.sqlite ``` Use ``twist wormhole-mailbox --help`` for more details. If you use the default ``--port=tcp:4000``, on a machine named ``example.com``, then clients can reach your server with the following option: ``` wormhole --relay-url=ws://example.com:4000/v1 send FILENAME ``` ## License, Compatibility This library is released under the MIT license, see LICENSE for details. This library is compatible with python2.7, and python3 (3.5 and higher). Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Environment :: Console Classifier: License :: OSI Approved :: MIT License Classifier: Topic :: Security :: Cryptography Classifier: Topic :: System :: Networking Classifier: Topic :: System :: Systems Administration Classifier: Topic :: Utilities Description-Content-Type: text/markdown Provides-Extra: dev magic-wormhole-mailbox-server-0.4.1/src/magic_wormhole_mailbox_server.egg-info/SOURCES.txt000066400000000000000000000035271355461217500316450ustar00rootroot00000000000000.coveragerc LICENSE MANIFEST.in NEWS.md README.md setup.cfg setup.py tox.ini versioneer.py docs/Makefile docs/conf.py docs/index.rst docs/server-protocol.md docs/welcome.md misc/migrate_channel_db.py misc/migrate_usage_db.py misc/windows-build.cmd misc/munin/wormhole_active misc/munin/wormhole_errors misc/munin/wormhole_event_rate misc/munin/wormhole_events misc/munin/wormhole_events_alltime src/magic_wormhole_mailbox_server.egg-info/PKG-INFO src/magic_wormhole_mailbox_server.egg-info/SOURCES.txt src/magic_wormhole_mailbox_server.egg-info/dependency_links.txt src/magic_wormhole_mailbox_server.egg-info/requires.txt src/magic_wormhole_mailbox_server.egg-info/top_level.txt src/twisted/plugins/magic_wormhole_mailbox.py src/wormhole_mailbox_server/__init__.py src/wormhole_mailbox_server/_version.py src/wormhole_mailbox_server/database.py src/wormhole_mailbox_server/increase_rlimits.py src/wormhole_mailbox_server/server.py src/wormhole_mailbox_server/server_tap.py src/wormhole_mailbox_server/server_websocket.py src/wormhole_mailbox_server/util.py src/wormhole_mailbox_server/web.py src/wormhole_mailbox_server/db-schemas/channel-v1.sql src/wormhole_mailbox_server/db-schemas/upgrade-usage-to-v2.sql src/wormhole_mailbox_server/db-schemas/usage-v1.sql src/wormhole_mailbox_server/db-schemas/usage-v2.sql src/wormhole_mailbox_server/test/__init__.py src/wormhole_mailbox_server/test/common.py src/wormhole_mailbox_server/test/test_config.py src/wormhole_mailbox_server/test/test_database.py src/wormhole_mailbox_server/test/test_rlimits.py src/wormhole_mailbox_server/test/test_server.py src/wormhole_mailbox_server/test/test_service.py src/wormhole_mailbox_server/test/test_stats.py src/wormhole_mailbox_server/test/test_util.py src/wormhole_mailbox_server/test/test_web.py src/wormhole_mailbox_server/test/test_ws_client.py src/wormhole_mailbox_server/test/ws_client.pymagic-wormhole-mailbox-server-0.4.1/src/magic_wormhole_mailbox_server.egg-info/dependency_links.txt000066400000000000000000000000011355461217500340200ustar00rootroot00000000000000 magic-wormhole-mailbox-server-0.4.1/src/magic_wormhole_mailbox_server.egg-info/requires.txt000066400000000000000000000002011355461217500323430ustar00rootroot00000000000000six attrs>=16.3.0 twisted[tls]>=17.5.0 autobahn[twisted]>=0.14.1 [:sys_platform=="win32"] pywin32 [dev] mock treq tox pyflakes magic-wormhole-mailbox-server-0.4.1/src/magic_wormhole_mailbox_server.egg-info/top_level.txt000066400000000000000000000000401355461217500324760ustar00rootroot00000000000000twisted wormhole_mailbox_server magic-wormhole-mailbox-server-0.4.1/src/twisted/000077500000000000000000000000001355461217500216465ustar00rootroot00000000000000magic-wormhole-mailbox-server-0.4.1/src/twisted/plugins/000077500000000000000000000000001355461217500233275ustar00rootroot00000000000000magic-wormhole-mailbox-server-0.4.1/src/twisted/plugins/magic_wormhole_mailbox.py000066400000000000000000000004321355461217500304070ustar00rootroot00000000000000from twisted.application.service import ServiceMaker Mailbox = ServiceMaker( "Magic-Wormhole Mailbox Server", # name "wormhole_mailbox_server.server_tap", # module "Provide the Mailbox server for Magic-Wormhole clients.", # desc "wormhole-mailbox", # tapname ) magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/000077500000000000000000000000001355461217500251205ustar00rootroot00000000000000magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/__init__.py000066400000000000000000000001351355461217500272300ustar00rootroot00000000000000 from ._version import get_versions __version__ = get_versions()['version'] del get_versions magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/_version.py000066400000000000000000000007611355461217500273220ustar00rootroot00000000000000 # This file was generated by 'versioneer.py' (0.18) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' { "date": "2019-09-11T00:16:54-0700", "dirty": false, "error": null, "full-revisionid": "b24af3af32ebb4ea0e28d92d956588764b8e47dd", "version": "0.4.1" } ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/database.py000066400000000000000000000140301355461217500272340ustar00rootroot00000000000000from __future__ import unicode_literals import os, shutil import sqlite3 import tempfile from pkg_resources import resource_string from twisted.python import log class DBError(Exception): pass def get_schema(name, version): schema_bytes = resource_string("wormhole_mailbox_server", "db-schemas/%s-v%d.sql" % (name, version)) return schema_bytes.decode("utf-8") def get_upgrader(name, new_version): try: schema_bytes = resource_string("wormhole_mailbox_server", "db-schemas/upgrade-%s-to-v%d.sql" % (name, new_version)) except EnvironmentError: # includes FileNotFoundError on py3 raise ValueError("no upgrader for %d" % new_version) return schema_bytes.decode("utf-8") CHANNELDB_TARGET_VERSION = 1 USAGEDB_TARGET_VERSION = 2 def dict_factory(cursor, row): d = {} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d def _initialize_db_schema(db, name, target_version): """Creates the application schema in the given database. """ log.msg("populating new database with schema %s v%s" % (name, target_version)) schema = get_schema(name, target_version) db.executescript(schema) db.execute("INSERT INTO version (version) VALUES (?)", (target_version,)) db.commit() def _initialize_db_connection(db): """Sets up the db connection object with a row factory and with necessary foreign key settings. """ db.row_factory = dict_factory db.execute("PRAGMA foreign_keys = ON") problems = db.execute("PRAGMA foreign_key_check").fetchall() if problems: raise DBError("failed foreign key check: %s" % (problems,)) def _open_db_connection(dbfile): """Open a new connection to the SQLite3 database at the given path. """ try: db = sqlite3.connect(dbfile) _initialize_db_connection(db) except (EnvironmentError, sqlite3.OperationalError, sqlite3.DatabaseError) as e: # this indicates that the file is not a compatible database format. # Perhaps it was created with an old version, or it might be junk. raise DBError("Unable to create/open db file %s: %s" % (dbfile, e)) return db def _get_temporary_dbfile(dbfile): """Get a temporary filename near the given path. """ fd, name = tempfile.mkstemp( prefix=os.path.basename(dbfile) + ".", dir=os.path.dirname(dbfile) ) os.close(fd) return name def _atomic_create_and_initialize_db(dbfile, name, target_version): """Create and return a new database, initialized with the application schema. If anything goes wrong, nothing is left at the ``dbfile`` path. """ temp_dbfile = _get_temporary_dbfile(dbfile) db = _open_db_connection(temp_dbfile) _initialize_db_schema(db, name, target_version) db.close() os.rename(temp_dbfile, dbfile) return _open_db_connection(dbfile) def _get_db(dbfile, name, target_version): """Open or create the given db file. The parent directory must exist. Returns the db connection object, or raises DBError. """ if dbfile == ":memory:": db = _open_db_connection(dbfile) _initialize_db_schema(db, name, target_version) elif os.path.exists(dbfile): db = _open_db_connection(dbfile) else: db = _atomic_create_and_initialize_db(dbfile, name, target_version) version = db.execute("SELECT version FROM version").fetchone()["version"] if version < target_version and dbfile != ":memory:": backup_fn = "%s-backup-v%d" % (dbfile, version) log.msg(" storing backup of v%d db in %s" % (version, backup_fn)) shutil.copy(dbfile, backup_fn) while version < target_version: log.msg(" need to upgrade from %s to %s" % (version, target_version)) try: upgrader = get_upgrader(name, version+1) except ValueError: log.msg(" unable to upgrade %s to %s" % (version, version+1)) raise DBError("Unable to upgrade %s to version %s, left at %s" % (dbfile, version+1, version)) log.msg(" executing upgrader v%s->v%s" % (version, version+1)) db.executescript(upgrader) db.commit() version = version+1 if version != target_version: raise DBError("Unable to handle db version %s" % version) return db def create_or_upgrade_channel_db(dbfile): return _get_db(dbfile, "channel", CHANNELDB_TARGET_VERSION) def create_or_upgrade_usage_db(dbfile): if dbfile is None: return None return _get_db(dbfile, "usage", USAGEDB_TARGET_VERSION) class DBDoesntExist(Exception): pass def open_existing_db(dbfile): assert dbfile != ":memory:" if not os.path.exists(dbfile): raise DBDoesntExist() return _open_db_connection(dbfile) class DBAlreadyExists(Exception): pass def create_channel_db(dbfile): """Create the given db file. Refuse to touch a pre-existing file. This is meant for use by migration tools, to create the output target""" if dbfile == ":memory:": db = _open_db_connection(dbfile) _initialize_db_schema(db, "channel", CHANNELDB_TARGET_VERSION) elif os.path.exists(dbfile): raise DBAlreadyExists() else: db = _atomic_create_and_initialize_db(dbfile, "channel", CHANNELDB_TARGET_VERSION) return db def create_usage_db(dbfile): if dbfile == ":memory:": db = _open_db_connection(dbfile) _initialize_db_schema(db, "usage", USAGEDB_TARGET_VERSION) elif os.path.exists(dbfile): raise DBAlreadyExists() else: db = _atomic_create_and_initialize_db(dbfile, "usage", USAGEDB_TARGET_VERSION) return db def dump_db(db): # to let _iterdump work, we need to restore the original row factory orig = db.row_factory try: db.row_factory = sqlite3.Row return "".join(db.iterdump()) finally: db.row_factory = orig magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/db-schemas/000077500000000000000000000000001355461217500271265ustar00rootroot00000000000000magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/db-schemas/channel-v1.sql000066400000000000000000000040351355461217500316050ustar00rootroot00000000000000 -- note: anything which isn't an boolean, integer, or human-readable unicode -- string, (i.e. binary strings) will be stored as hex CREATE TABLE `version` ( `version` INTEGER -- contains one row, set to 1 ); -- Wormhole codes use a "nameplate": a short name which is only used to -- reference a specific (long-named) mailbox. The codes only use numeric -- nameplates, but the protocol and server allow can use arbitrary strings. CREATE TABLE `nameplates` ( `id` INTEGER PRIMARY KEY AUTOINCREMENT, `app_id` VARCHAR, `name` VARCHAR, `mailbox_id` VARCHAR REFERENCES `mailboxes`(`id`), `request_id` VARCHAR -- from 'allocate' message, for future deduplication ); CREATE INDEX `nameplates_idx` ON `nameplates` (`app_id`, `name`); CREATE INDEX `nameplates_mailbox_idx` ON `nameplates` (`app_id`, `mailbox_id`); CREATE INDEX `nameplates_request_idx` ON `nameplates` (`app_id`, `request_id`); CREATE TABLE `nameplate_sides` ( `nameplates_id` REFERENCES `nameplates`(`id`), `claimed` BOOLEAN, -- True after claim(), False after release() `side` VARCHAR, `added` INTEGER -- time when this side first claimed the nameplate ); -- Clients exchange messages through a "mailbox", which has a long (randomly -- unique) identifier and a queue of messages. -- `id` is randomly-generated and unique across all apps. CREATE TABLE `mailboxes` ( `app_id` VARCHAR, `id` VARCHAR PRIMARY KEY, `updated` INTEGER, -- time of last activity, used for pruning `for_nameplate` BOOLEAN -- allocated for a nameplate, not standalone ); CREATE INDEX `mailboxes_idx` ON `mailboxes` (`app_id`, `id`); CREATE TABLE `mailbox_sides` ( `mailbox_id` REFERENCES `mailboxes`(`id`), `opened` BOOLEAN, -- True after open(), False after close() `side` VARCHAR, `added` INTEGER, -- time when this side first opened the mailbox `mood` VARCHAR ); CREATE TABLE `messages` ( `app_id` VARCHAR, `mailbox_id` VARCHAR, `side` VARCHAR, `phase` VARCHAR, -- numeric or string `body` VARCHAR, `server_rx` INTEGER, `msg_id` VARCHAR ); CREATE INDEX `messages_idx` ON `messages` (`app_id`, `mailbox_id`); magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/db-schemas/upgrade-usage-to-v2.sql000066400000000000000000000011311355461217500333410ustar00rootroot00000000000000CREATE TABLE `client_versions` ( `app_id` VARCHAR, `side` VARCHAR, -- for deduplication of reconnects `connect_time` INTEGER, -- seconds since epoch, rounded to "blur time" -- the client sends us a 'client_version' tuple of (implementation, version) -- the Python client sends e.g. ("python", "0.11.0") `implementation` VARCHAR, `version` VARCHAR ); CREATE INDEX `client_versions_time_idx` on `client_versions` (`connect_time`); CREATE INDEX `client_versions_appid_time_idx` on `client_versions` (`app_id`, `connect_time`); DELETE FROM `version`; INSERT INTO `version` (`version`) VALUES (2); magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/db-schemas/usage-v1.sql000066400000000000000000000036721355461217500313070ustar00rootroot00000000000000CREATE TABLE `version` ( `version` INTEGER -- contains one row ); CREATE TABLE `current` ( `rebooted` INTEGER, -- seconds since epoch of most recent reboot `updated` INTEGER, -- when `current` was last updated `blur_time` INTEGER, -- `started` is rounded to this, or None `connections_websocket` INTEGER -- number of live clients via websocket ); -- one row is created each time a nameplate is retired CREATE TABLE `nameplates` ( `app_id` VARCHAR, `started` INTEGER, -- seconds since epoch, rounded to "blur time" `waiting_time` INTEGER, -- seconds from start to 2nd side appearing, or None `total_time` INTEGER, -- seconds from open to last close/prune `result` VARCHAR -- happy, lonely, pruney, crowded -- nameplate moods: -- "happy": two sides open and close -- "lonely": one side opens and closes (no response from 2nd side) -- "pruney": channels which get pruned for inactivity -- "crowded": three or more sides were involved ); CREATE INDEX `nameplates_idx` ON `nameplates` (`app_id`, `started`); -- one row is created each time a mailbox is retired CREATE TABLE `mailboxes` ( `app_id` VARCHAR, `for_nameplate` BOOLEAN, -- allocated for a nameplate, not standalone `started` INTEGER, -- seconds since epoch, rounded to "blur time" `total_time` INTEGER, -- seconds from open to last close `waiting_time` INTEGER, -- seconds from start to 2nd side appearing, or None `result` VARCHAR -- happy, scary, lonely, errory, pruney -- rendezvous moods: -- "happy": both sides close with mood=happy -- "scary": any side closes with mood=scary (bad MAC, probably wrong pw) -- "lonely": any side closes with mood=lonely (no response from 2nd side) -- "errory": any side closes with mood=errory (other errors) -- "pruney": channels which get pruned for inactivity -- "crowded": three or more sides were involved ); CREATE INDEX `mailboxes_idx` ON `mailboxes` (`app_id`, `started`); CREATE INDEX `mailboxes_result_idx` ON `mailboxes` (`result`); magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/db-schemas/usage-v2.sql000066400000000000000000000047161355461217500313100ustar00rootroot00000000000000CREATE TABLE `version` ( `version` INTEGER -- contains one row ); CREATE TABLE `current` ( `rebooted` INTEGER, -- seconds since epoch of most recent reboot `updated` INTEGER, -- when `current` was last updated `blur_time` INTEGER, -- `started` is rounded to this, or None `connections_websocket` INTEGER -- number of live clients via websocket ); -- one row is created each time a nameplate is retired CREATE TABLE `nameplates` ( `app_id` VARCHAR, `started` INTEGER, -- seconds since epoch, rounded to "blur time" `waiting_time` INTEGER, -- seconds from start to 2nd side appearing, or None `total_time` INTEGER, -- seconds from open to last close/prune `result` VARCHAR -- happy, lonely, pruney, crowded -- nameplate moods: -- "happy": two sides open and close -- "lonely": one side opens and closes (no response from 2nd side) -- "pruney": channels which get pruned for inactivity -- "crowded": three or more sides were involved ); CREATE INDEX `nameplates_idx` ON `nameplates` (`app_id`, `started`); -- one row is created each time a mailbox is retired CREATE TABLE `mailboxes` ( `app_id` VARCHAR, `for_nameplate` BOOLEAN, -- allocated for a nameplate, not standalone `started` INTEGER, -- seconds since epoch, rounded to "blur time" `total_time` INTEGER, -- seconds from open to last close `waiting_time` INTEGER, -- seconds from start to 2nd side appearing, or None `result` VARCHAR -- happy, scary, lonely, errory, pruney -- rendezvous moods: -- "happy": both sides close with mood=happy -- "scary": any side closes with mood=scary (bad MAC, probably wrong pw) -- "lonely": any side closes with mood=lonely (no response from 2nd side) -- "errory": any side closes with mood=errory (other errors) -- "pruney": channels which get pruned for inactivity -- "crowded": three or more sides were involved ); CREATE INDEX `mailboxes_idx` ON `mailboxes` (`app_id`, `started`); CREATE INDEX `mailboxes_result_idx` ON `mailboxes` (`result`); CREATE TABLE `client_versions` ( `app_id` VARCHAR, `side` VARCHAR, -- for deduplication of reconnects `connect_time` INTEGER, -- seconds since epoch, rounded to "blur time" -- the client sends us a 'client_version' tuple of (implementation, version) -- the Python client sends e.g. ("python", "0.11.0") `implementation` VARCHAR, `version` VARCHAR ); CREATE INDEX `client_versions_time_idx` on `client_versions` (`connect_time`); CREATE INDEX `client_versions_appid_time_idx` on `client_versions` (`app_id`, `connect_time`); magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/increase_rlimits.py000066400000000000000000000026621355461217500310340ustar00rootroot00000000000000try: # 'resource' is unix-only from resource import getrlimit, setrlimit, RLIMIT_NOFILE except ImportError: # pragma: nocover getrlimit, setrlimit, RLIMIT_NOFILE = None, None, None # pragma: nocover from twisted.python import log def increase_rlimits(): if getrlimit is None: log.msg("unable to import 'resource', leaving rlimit alone") return soft, hard = getrlimit(RLIMIT_NOFILE) if soft >= 10000: log.msg("RLIMIT_NOFILE.soft was %d, leaving it alone" % soft) return # OS-X defaults to soft=7168, and reports a huge number for 'hard', # but won't accept anything more than soft=10240, so we can't just # set soft=hard. Linux returns (1024, 1048576) and is fine with # soft=hard. Cygwin is reported to return (256,-1) and accepts up to # soft=3200. So we try multiple values until something works. for newlimit in [hard, 10000, 3200, 1024]: log.msg("changing RLIMIT_NOFILE from (%s,%s) to (%s,%s)" % (soft, hard, newlimit, hard)) try: setrlimit(RLIMIT_NOFILE, (newlimit, hard)) log.msg("setrlimit successful") return except ValueError as e: log.msg("error during setrlimit: %s" % e) continue except: log.msg("other error during setrlimit, leaving it alone") log.err() return log.msg("unable to change rlimit, leaving it alone") magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/server.py000066400000000000000000000727601355461217500270140ustar00rootroot00000000000000from __future__ import print_function, unicode_literals import os, random, base64 from collections import namedtuple from twisted.python import log from twisted.application import service def generate_mailbox_id(): return base64.b32encode(os.urandom(8)).lower().strip(b"=").decode("ascii") class CrowdedError(Exception): pass class ReclaimedError(Exception): pass Usage = namedtuple("Usage", ["started", "waiting_time", "total_time", "result"]) TransitUsage = namedtuple("TransitUsage", ["started", "waiting_time", "total_time", "total_bytes", "result"]) SidedMessage = namedtuple("SidedMessage", ["side", "phase", "body", "server_rx", "msg_id"]) class Mailbox: def __init__(self, app, db, usage_db, app_id, mailbox_id): self._app = app self._db = db self._usage_db = usage_db self._app_id = app_id self._mailbox_id = mailbox_id self._listeners = {} # handle -> (send_f, stop_f) # "handle" is a hashable object, for deregistration # send_f() takes a JSONable object, stop_f() has no args def open(self, side, when): # requires caller to db.commit() assert isinstance(side, type("")), type(side) db = self._db already = db.execute("SELECT * FROM `mailbox_sides`" " WHERE `mailbox_id`=? AND `side`=?", (self._mailbox_id, side)).fetchone() if not already: db.execute("INSERT INTO `mailbox_sides`" " (`mailbox_id`, `opened`, `side`, `added`)" " VALUES(?,?,?,?)", (self._mailbox_id, True, side, when)) # We accept re-opening a mailbox which a side previously closed, # unlike claim_nameplate(), which forbids any side from re-claiming a # nameplate which they previously released. (Nameplates forbid this # because the act of claiming a nameplate for the first time causes a # new mailbox to be created, which should only happen once). # Mailboxes have their own distinct objects (to manage # subscriptions), so closing one which was already closed requires # making a new object, which works by calling open() just before # close(). We really do want to support re-closing closed mailboxes, # because this enables intermittently-connected clients, who remember # sending a 'close' but aren't sure whether it was received or not, # then get shut down. Those clients will wake up and re-send the # 'close', until they receive the 'closed' ack message. self._touch(when) db.commit() # XXX: reconcile the need for this with the comment above def _touch(self, when): self._db.execute("UPDATE `mailboxes` SET `updated`=? WHERE `id`=?", (when, self._mailbox_id)) def get_messages(self): messages = [] db = self._db for row in db.execute("SELECT * FROM `messages`" " WHERE `app_id`=? AND `mailbox_id`=?" " ORDER BY `server_rx` ASC", (self._app_id, self._mailbox_id)).fetchall(): sm = SidedMessage(side=row["side"], phase=row["phase"], body=row["body"], server_rx=row["server_rx"], msg_id=row["msg_id"]) messages.append(sm) return messages def add_listener(self, handle, send_f, stop_f): #log.msg("add_listener", self._mailbox_id, handle) self._listeners[handle] = (send_f, stop_f) #log.msg(" added", len(self._listeners)) return self.get_messages() def remove_listener(self, handle): #log.msg("remove_listener", self._mailbox_id, handle) self._listeners.pop(handle, None) #log.msg(" removed", len(self._listeners)) def has_listeners(self): return bool(self._listeners) def count_listeners(self): return len(self._listeners) def broadcast_message(self, sm): for (send_f, stop_f) in self._listeners.values(): send_f(sm) def _add_message(self, sm): self._db.execute("INSERT INTO `messages`" " (`app_id`, `mailbox_id`, `side`, `phase`, `body`," " `server_rx`, `msg_id`)" " VALUES (?,?,?,?,?, ?,?)", (self._app_id, self._mailbox_id, sm.side, sm.phase, sm.body, sm.server_rx, sm.msg_id)) self._touch(sm.server_rx) self._db.commit() def add_message(self, sm): assert isinstance(sm, SidedMessage) self._add_message(sm) self.broadcast_message(sm) def close(self, side, mood, when): assert isinstance(side, type("")), type(side) db = self._db row = db.execute("SELECT * FROM `mailboxes`" " WHERE `app_id`=? AND `id`=?", (self._app_id, self._mailbox_id)).fetchone() if not row: return for_nameplate = row["for_nameplate"] row = db.execute("SELECT * FROM `mailbox_sides`" " WHERE `mailbox_id`=? AND `side`=?", (self._mailbox_id, side)).fetchone() if not row: return db.execute("UPDATE `mailbox_sides` SET `opened`=?, `mood`=?" " WHERE `mailbox_id`=? AND `side`=?", (False, mood, self._mailbox_id, side)) db.commit() # are any sides still open? side_rows = db.execute("SELECT * FROM `mailbox_sides`" " WHERE `mailbox_id`=?", (self._mailbox_id,)).fetchall() if any([sr["opened"] for sr in side_rows]): return # nope. delete and summarize db.execute("DELETE FROM `messages` WHERE `mailbox_id`=?", (self._mailbox_id,)) db.execute("DELETE FROM `mailbox_sides` WHERE `mailbox_id`=?", (self._mailbox_id,)) db.execute("DELETE FROM `mailboxes` WHERE `id`=?", (self._mailbox_id,)) if self._usage_db: self._app._summarize_mailbox_and_store(for_nameplate, side_rows, when, pruned=False) self._usage_db.commit() db.commit() # Shut down any listeners, just in case they're still lingering # around. for (send_f, stop_f) in self._listeners.values(): stop_f() self._listeners = {} self._app.free_mailbox(self._mailbox_id) def _shutdown(self): # used at test shutdown to accelerate client disconnects for (send_f, stop_f) in self._listeners.values(): stop_f() self._listeners = {} class AppNamespace(object): def __init__(self, db, usage_db, blur_usage, log_requests, app_id, allow_list): self._db = db self._usage_db = usage_db self._blur_usage = blur_usage self._log_requests = log_requests self._app_id = app_id self._mailboxes = {} self._allow_list = allow_list def log_client_version(self, server_rx, side, client_version): if self._blur_usage: server_rx = self._blur_usage * (server_rx // self._blur_usage) implementation = client_version[0] version = client_version[1] if self._usage_db: self._usage_db.execute("INSERT INTO `client_versions`" " (`app_id`, `side`, `connect_time`," " `implementation`, `version`)" " VALUES(?,?,?,?,?)", (self._app_id, side, server_rx, implementation, version)) self._usage_db.commit() def get_nameplate_ids(self): if not self._allow_list: return [] return self._get_nameplate_ids() def _get_nameplate_ids(self): db = self._db # TODO: filter this to numeric ids? c = db.execute("SELECT DISTINCT `name` FROM `nameplates`" " WHERE `app_id`=?", (self._app_id,)) return set([row["name"] for row in c.fetchall()]) def _find_available_nameplate_id(self): claimed = self._get_nameplate_ids() for size in range(1,4): # stick to 1-999 for now available = set() for id_int in range(10**(size-1), 10**size): id = "%d" % id_int if id not in claimed: available.add(id) if available: return random.choice(list(available)) # ouch, 999 currently claimed. Try random ones for a while. for tries in range(1000): id_int = random.randrange(1000, 1000*1000) id = "%d" % id_int if id not in claimed: return id raise ValueError("unable to find a free nameplate-id") def allocate_nameplate(self, side, when): nameplate_id = self._find_available_nameplate_id() mailbox_id = self.claim_nameplate(nameplate_id, side, when) del mailbox_id # ignored, they'll learn it from claim() return nameplate_id def claim_nameplate(self, name, side, when): # when we're done: # * there will be one row for the nameplate # * there will be one 'side' attached to it, with claimed=True # * a mailbox id and mailbox row will be created # * a mailbox 'side' will be attached, with opened=True assert isinstance(name, type("")), type(name) assert isinstance(side, type("")), type(side) db = self._db row = db.execute("SELECT * FROM `nameplates`" " WHERE `app_id`=? AND `name`=?", (self._app_id, name)).fetchone() if not row: if self._log_requests: log.msg("creating nameplate#%s for app_id %s" % (name, self._app_id)) mailbox_id = generate_mailbox_id() self._add_mailbox(mailbox_id, True, side, when) # ensure row exists sql = ("INSERT INTO `nameplates`" " (`app_id`, `name`, `mailbox_id`)" " VALUES(?,?,?)") npid = db.execute(sql, (self._app_id, name, mailbox_id) ).lastrowid else: npid = row["id"] mailbox_id = row["mailbox_id"] row = db.execute("SELECT * FROM `nameplate_sides`" " WHERE `nameplates_id`=? AND `side`=?", (npid, side)).fetchone() if not row: db.execute("INSERT INTO `nameplate_sides`" " (`nameplates_id`, `claimed`, `side`, `added`)" " VALUES(?,?,?,?)", (npid, True, side, when)) else: if not row["claimed"]: raise ReclaimedError("you cannot re-claim a nameplate that your side previously released") # since that might cause a new mailbox to be allocated db.commit() self.open_mailbox(mailbox_id, side, when) # may raise CrowdedError rows = db.execute("SELECT * FROM `nameplate_sides`" " WHERE `nameplates_id`=?", (npid,)).fetchall() if len(rows) > 2: # this line will probably never get hit: any crowding is noticed # on mailbox_sides first, inside open_mailbox() raise CrowdedError("too many sides have claimed this nameplate") return mailbox_id def release_nameplate(self, name, side, when): # when we're done: # * the 'claimed' flag will be cleared on the nameplate_sides row # * if the nameplate is now unused (no claimed sides): # * a usage record will be added # * the nameplate row will be removed # * the nameplate sides will be removed assert isinstance(name, type("")), type(name) assert isinstance(side, type("")), type(side) db = self._db np_row = db.execute("SELECT * FROM `nameplates`" " WHERE `app_id`=? AND `name`=?", (self._app_id, name)).fetchone() if not np_row: return npid = np_row["id"] row = db.execute("SELECT * FROM `nameplate_sides`" " WHERE `nameplates_id`=? AND `side`=?", (npid, side)).fetchone() if not row: return db.execute("UPDATE `nameplate_sides` SET `claimed`=?" " WHERE `nameplates_id`=? AND `side`=?", (False, npid, side)) db.commit() # now, are there any remaining claims? side_rows = db.execute("SELECT * FROM `nameplate_sides`" " WHERE `nameplates_id`=?", (npid,)).fetchall() claims = [1 for sr in side_rows if sr["claimed"]] if claims: return # delete and summarize db.execute("DELETE FROM `nameplate_sides` WHERE `nameplates_id`=?", (npid,)) db.execute("DELETE FROM `nameplates` WHERE `id`=?", (npid,)) if self._usage_db: self._summarize_nameplate_and_store(side_rows, when, pruned=False) self._usage_db.commit() db.commit() def _summarize_nameplate_and_store(self, side_rows, delete_time, pruned): # requires caller to self._usage_db.commit() u = self._summarize_nameplate_usage(side_rows, delete_time, pruned) self._usage_db.execute("INSERT INTO `nameplates`" " (`app_id`," " `started`, `total_time`, `waiting_time`, `result`)" " VALUES (?, ?,?,?,?)", (self._app_id, u.started, u.total_time, u.waiting_time, u.result)) def _summarize_nameplate_usage(self, side_rows, delete_time, pruned): times = sorted([row["added"] for row in side_rows]) started = times[0] if self._blur_usage: started = self._blur_usage * (started // self._blur_usage) waiting_time = None if len(times) > 1: waiting_time = times[1] - times[0] total_time = delete_time - times[0] result = "lonely" if len(times) == 2: result = "happy" if pruned: result = "pruney" if len(times) > 2: result = "crowded" return Usage(started=started, waiting_time=waiting_time, total_time=total_time, result=result) def _add_mailbox(self, mailbox_id, for_nameplate, side, when): assert isinstance(mailbox_id, type("")), type(mailbox_id) db = self._db row = db.execute("SELECT * FROM `mailboxes`" " WHERE `app_id`=? AND `id`=?", (self._app_id, mailbox_id)).fetchone() if not row: self._db.execute("INSERT INTO `mailboxes`" " (`app_id`, `id`, `for_nameplate`, `updated`)" " VALUES(?,?,?,?)", (self._app_id, mailbox_id, for_nameplate, when)) # we don't need a commit here, because mailbox.open() only # does SELECT FROM `mailbox_sides`, not from `mailboxes` def open_mailbox(self, mailbox_id, side, when): assert isinstance(mailbox_id, type("")), type(mailbox_id) self._add_mailbox(mailbox_id, False, side, when) # ensure row exists db = self._db if not mailbox_id in self._mailboxes: # ensure Mailbox object exists if self._log_requests: log.msg("spawning #%s for app_id %s" % (mailbox_id, self._app_id)) self._mailboxes[mailbox_id] = Mailbox(self, self._db, self._usage_db, self._app_id, mailbox_id) mailbox = self._mailboxes[mailbox_id] # delegate to mailbox.open() to add a row to mailbox_sides, and # update the mailbox.updated timestamp mailbox.open(side, when) db.commit() rows = db.execute("SELECT * FROM `mailbox_sides`" " WHERE `mailbox_id`=?", (mailbox_id,)).fetchall() if len(rows) > 2: raise CrowdedError("too many sides have opened this mailbox") return mailbox def free_mailbox(self, mailbox_id): # called from Mailbox.delete_and_summarize(), which deletes any # messages if mailbox_id in self._mailboxes: self._mailboxes.pop(mailbox_id) #if self._log_requests: # log.msg("freed+killed #%s, now have %d DB mailboxes, %d live" % # (mailbox_id, len(self.get_claimed()), len(self._mailboxes))) def _summarize_mailbox_and_store(self, for_nameplate, side_rows, delete_time, pruned): db = self._usage_db u = self._summarize_mailbox(side_rows, delete_time, pruned) db.execute("INSERT INTO `mailboxes`" " (`app_id`, `for_nameplate`," " `started`, `total_time`, `waiting_time`, `result`)" " VALUES (?,?, ?,?,?,?)", (self._app_id, for_nameplate, u.started, u.total_time, u.waiting_time, u.result)) def _summarize_mailbox(self, side_rows, delete_time, pruned): times = sorted([row["added"] for row in side_rows]) started = times[0] if self._blur_usage: started = self._blur_usage * (started // self._blur_usage) waiting_time = None if len(times) > 1: waiting_time = times[1] - times[0] total_time = delete_time - times[0] num_sides = len(times) if num_sides == 0: result = "quiet" elif num_sides == 1: result = "lonely" else: result = "happy" # "mood" is only recorded at close() moods = [row["mood"] for row in side_rows if row.get("mood")] if "lonely" in moods: result = "lonely" if "errory" in moods: result = "errory" if "scary" in moods: result = "scary" if pruned: result = "pruney" if num_sides > 2: result = "crowded" return Usage(started=started, waiting_time=waiting_time, total_time=total_time, result=result) def prune(self, now, old): # The pruning check runs every 10 minutes, and "old" is defined to be # 11 minutes ago (unit tests can use different values). The client is # allowed to disconnect for up to 9 minutes without losing the # channel (nameplate, mailbox, and messages). # Each time a client does something, the mailbox.updated field is # updated with the current timestamp. If a client is subscribed to # the mailbox when pruning check runs, the "updated" field is also # updated. After that check, if the "updated" field is "old", the # channel is deleted. # For now, pruning is logged even if log_requests is False, to debug # the pruning process, and since pruning is triggered by a timer # instead of by user action. It does reveal which mailboxes were # present when the pruning process began, though, so in the log run # it should do less logging. log.msg(" prune begins (%s)" % self._app_id) db = self._db modified = False for mailbox in self._mailboxes.values(): if mailbox.has_listeners(): log.msg("touch %s because listeners" % mailbox._mailbox_id) mailbox._touch(now) db.commit() # make sure the updates are visible below new_mailboxes = set() old_mailboxes = set() for row in db.execute("SELECT * FROM `mailboxes` WHERE `app_id`=?", (self._app_id,)).fetchall(): mailbox_id = row["id"] log.msg(" 1: age=%s, old=%s, %s" % (now - row["updated"], now - old, mailbox_id)) if row["updated"] > old: new_mailboxes.add(mailbox_id) else: old_mailboxes.add(mailbox_id) log.msg(" 2: mailboxes:", new_mailboxes, old_mailboxes) old_nameplates = set() for row in db.execute("SELECT * FROM `nameplates` WHERE `app_id`=?", (self._app_id,)).fetchall(): npid = row["id"] mailbox_id = row["mailbox_id"] if mailbox_id in old_mailboxes: old_nameplates.add(npid) log.msg(" 3: old_nameplates dbids", old_nameplates) for npid in old_nameplates: log.msg(" deleting nameplate with dbid", npid) side_rows = db.execute("SELECT * FROM `nameplate_sides`" " WHERE `nameplates_id`=?", (npid,)).fetchall() db.execute("DELETE FROM `nameplate_sides` WHERE `nameplates_id`=?", (npid,)) db.execute("DELETE FROM `nameplates` WHERE `id`=?", (npid,)) if self._usage_db: self._summarize_nameplate_and_store(side_rows, now, pruned=True) modified = True # delete all messages for old mailboxes # delete all old mailboxes for mailbox_id in old_mailboxes: log.msg(" deleting mailbox", mailbox_id) row = db.execute("SELECT * FROM `mailboxes`" " WHERE `id`=?", (mailbox_id,)).fetchone() for_nameplate = row["for_nameplate"] side_rows = db.execute("SELECT * FROM `mailbox_sides`" " WHERE `mailbox_id`=?", (mailbox_id,)).fetchall() db.execute("DELETE FROM `messages` WHERE `mailbox_id`=?", (mailbox_id,)) db.execute("DELETE FROM `mailbox_sides` WHERE `mailbox_id`=?", (mailbox_id,)) db.execute("DELETE FROM `mailboxes` WHERE `id`=?", (mailbox_id,)) if self._usage_db: self._summarize_mailbox_and_store(for_nameplate, side_rows, now, pruned=True) modified = True if modified: db.commit() if self._usage_db: self._usage_db.commit() in_use = bool(self._mailboxes) log.msg(" prune complete, modified=%s, in_use=%s" % (modified, in_use)) return in_use def count_listeners(self): return sum(mailbox.count_listeners() for mailbox in self._mailboxes.values()) def _shutdown(self): for channel in self._mailboxes.values(): channel._shutdown() class Server(service.MultiService): def __init__(self, db, allow_list, welcome, blur_usage, usage_db=None, log_file=None): service.MultiService.__init__(self) self._db = db self._allow_list = allow_list self._welcome = welcome self._blur_usage = blur_usage self._log_requests = blur_usage is None self._usage_db = usage_db self._log_file = log_file self._apps = {} def get_welcome(self): return self._welcome def get_log_requests(self): return self._log_requests def get_app(self, app_id): assert isinstance(app_id, type("")) if not app_id in self._apps: if self._log_requests: log.msg("spawning app_id %s" % (app_id,)) self._apps[app_id] = AppNamespace( self._db, self._usage_db, self._blur_usage, self._log_requests, app_id, self._allow_list, ) return self._apps[app_id] def get_all_apps(self): apps = set() for row in self._db.execute("SELECT DISTINCT `app_id`" " FROM `nameplates`").fetchall(): apps.add(row["app_id"]) for row in self._db.execute("SELECT DISTINCT `app_id`" " FROM `mailboxes`").fetchall(): apps.add(row["app_id"]) for row in self._db.execute("SELECT DISTINCT `app_id`" " FROM `messages`").fetchall(): apps.add(row["app_id"]) return apps def prune_all_apps(self, now, old): # As with AppNamespace.prune_old_mailboxes, we log for now. log.msg("beginning app prune") for app_id in sorted(self.get_all_apps()): log.msg(" app prune checking %r" % (app_id,)) app = self.get_app(app_id) in_use = app.prune(now, old) if not in_use: del self._apps[app_id] log.msg("app prune ends, %d apps" % len(self._apps)) def dump_stats(self, now, rebooted): if not self._usage_db: return # write everything to self._usage_db # Most of our current-status state is recorded in the channel_db, and # our historical state goes into the usage_db. Both are updated each # time something changes, so stats monitors can just read things out # from there. The one bit of runtime state that isn't recorded each # time is the number of connected clients, which will differ from the # number of live "sides" briefly after they disconnect but before the # mailbox is closed. connections = sum(app.count_listeners() for app in self._apps.values()) # TODO: this is all connections, not just the websocket ones. We don't # have non-websocket connections yet, but when we add them, this needs # to be updated. Probably by asking the WebSocketServerFactory to count # them. self._usage_db.execute("DELETE FROM `current`") self._usage_db.execute("INSERT INTO `current`" " (`rebooted`, `updated`, `blur_time`," " `connections_websocket`)" " VALUES(?,?,?,?)", (rebooted, now, self._blur_usage, connections)) self._usage_db.commit() # current status: expected to be zero most of the time #c["nameplates_total"] = q("SELECT COUNT() FROM `nameplates`") # TODO: nameplates with only one side (most of them) # TODO: nameplates with two sides (very fleeting) # TODO: nameplates with three or more sides (crowded, unlikely) #c["mailboxes_total"] = q("SELECT COUNT() FROM `mailboxes`") # TODO: mailboxes with only one side (most of them) # TODO: mailboxes with two sides (somewhat fleeting, in-transit) # TODO: mailboxes with three or more sides (unlikely) #c["messages_total"] = q("SELECT COUNT() FROM `messages`") # recent timings (last 100 operations) # TODO: median/etc of nameplate.total_time # TODO: median/etc of mailbox.waiting_time (should be the same) # TODO: median/etc of mailbox.total_time # other # TODO: mailboxes without nameplates (needs new DB schema) def startService(self): service.MultiService.startService(self) log.msg("Wormhole relay server running") if self._blur_usage: log.msg("blurring access times to %d seconds" % self._blur_usage) #log.msg("not logging HTTP requests") else: log.msg("not blurring access times") if not self._allow_list: log.msg("listing of allocated nameplates disallowed") def stopService(self): # This forcibly boots any clients that are still connected, which # helps with unit tests that use threads for both clients. One client # hits an exception, which terminates the test (and .tearDown calls # stopService on the relay), but the other client (in its thread) is # still waiting for a message. By killing off all connections, that # other client gets an error, and exits promptly. for app in self._apps.values(): app._shutdown() return service.MultiService.stopService(self) def make_server(db, allow_list=True, advertise_version=None, signal_error=None, blur_usage=None, usage_db=None, log_file=None, ): if blur_usage: log.msg("blurring access times to %d seconds" % blur_usage) else: log.msg("not blurring access times") welcome = { # adding .motd will cause all clients to display the message, # then keep running normally #"motd": "Welcome to the public relay.\nPlease enjoy this service.", # adding .error will cause all clients to fail, with this message #"error": "This server has been disabled, see URL for details.", } if advertise_version: # The primary (python CLI) implementation will emit a message if # its version does not match this key. If/when we have # distributions which include older version, but we still expect # them to be compatible, stop sending this key. welcome["current_cli_version"] = advertise_version if signal_error: welcome["error"] = signal_error return Server(db, allow_list=allow_list, welcome=welcome, blur_usage=blur_usage, usage_db=usage_db, log_file=log_file) magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/server_tap.py000066400000000000000000000104701355461217500276460ustar00rootroot00000000000000import os, json, time from twisted.internet import reactor from twisted.python import usage, log from twisted.application.service import MultiService from twisted.application.internet import (TimerService, StreamServerEndpointService) from twisted.internet import endpoints from .increase_rlimits import increase_rlimits from .server import make_server from .web import make_web_server from .database import create_or_upgrade_channel_db, create_or_upgrade_usage_db LONGDESC = """This plugin sets up a 'Mailbox' server for magic-wormhole. This service forwards short messages between clients, to perform key exchange and connection setup.""" class Options(usage.Options): synopsis = "[--port=] [--log-fd] [--blur-usage=] [--usage-db=]" longdesc = LONGDESC optParameters = [ ("port", "p", "tcp:4000:interface=\:\:", "endpoint to listen on"), ("blur-usage", None, None, "round logged access times to improve privacy"), ("log-fd", None, None, "write JSON usage logs to this file descriptor"), ("channel-db", None, "relay.sqlite", "location for the state database"), ("usage-db", None, None, "record usage data (SQLite)"), ("advertise-version", None, None, "version to recommend to clients"), ("signal-error", None, None, "force all clients to fail with a message"), ] optFlags = [ ("disallow-list", None, "refuse to send list of allocated nameplates"), ] def __init__(self): super(Options, self).__init__() self["websocket-protocol-options"] = [] self["allow-list"] = True def opt_disallow_list(self): self["allow-list"] = False def opt_log_fd(self, arg): self["log-fd"] = int(arg) def opt_blur_usage(self, arg): # --blur-usage= is in seconds. If the option isn't provided, we'll keep # the default of None self["blur-usage"] = int(arg) def opt_websocket_protocol_option(self, arg): """A websocket server protocol option to configure: OPTION=VALUE. This option can be provided multiple times.""" try: key, value = arg.split("=", 1) except ValueError: raise usage.UsageError("format options as OPTION=VALUE") try: value = json.loads(value) except: raise usage.UsageError("could not parse JSON value for {}".format(key)) self["websocket-protocol-options"].append((key, value)) SECONDS = 1.0 MINUTE = 60*SECONDS # CHANNEL_EXPIRATION_TIME should be longer than EXPIRATION_CHECK_PERIOD CHANNEL_EXPIRATION_TIME = 11*MINUTE EXPIRATION_CHECK_PERIOD = 5*MINUTE def makeService(config, channel_db="relay.sqlite", reactor=reactor): increase_rlimits() parent = MultiService() channel_db = create_or_upgrade_channel_db(config["channel-db"]) usage_db = create_or_upgrade_usage_db(config["usage-db"]) log_file = (os.fdopen(int(config["log-fd"]), "w") if config["log-fd"] is not None else None) server = make_server(channel_db, allow_list=config["allow-list"], advertise_version=config["advertise-version"], signal_error=config["signal-error"], blur_usage=config["blur-usage"], usage_db=usage_db, log_file=log_file, ) server.setServiceParent(parent) rebooted = time.time() def expire(): now = time.time() old = now - CHANNEL_EXPIRATION_TIME try: server.prune_all_apps(now, old) except Exception as e: # catch-and-log exceptions during prune, so a single error won't # kill the loop. See #13 for details. log.msg("error during prune_all_apps") log.err(e) server.dump_stats(now, rebooted=rebooted) TimerService(EXPIRATION_CHECK_PERIOD, expire).setServiceParent(parent) log_requests = config["blur-usage"] is None site = make_web_server(server, log_requests, config["websocket-protocol-options"]) ep = endpoints.serverFromString(reactor, config["port"]) # to listen StreamServerEndpointService(ep, site).setServiceParent(parent) log.msg("websocket listening on ws://HOSTNAME:PORT/v1") return parent magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/server_websocket.py000066400000000000000000000315351355461217500310550ustar00rootroot00000000000000from __future__ import unicode_literals import time from twisted.internet import reactor from twisted.python import log from autobahn.twisted import websocket from .server import CrowdedError, ReclaimedError, SidedMessage from .util import dict_to_bytes, bytes_to_dict # The WebSocket allows the client to send "commands" to the server, and the # server to send "responses" to the client. Note that commands and responses # are not necessarily one-to-one. All commands provoke an "ack" response # (with a copy of the original message) for timing, testing, and # synchronization purposes. All commands and responses are JSON-encoded. # Each WebSocket connection is bound to one "appid" and one "side", which are # set by the "bind" command (which must be the first command on the # connection), and must be set before any other command will be accepted. # Each connection can be bound to a single "mailbox" (a two-sided # store-and-forward queue, identified by the "mailbox id": a long, randomly # unique string identifier) by using the "open" command. This protects the # mailbox from idle closure, enables the "add" command (to put new messages # in the queue), and triggers delivery of past and future messages via the # "message" response. The "close" command removes the binding (but note that # it does not enable the subsequent binding of a second mailbox). When the # last side closes a mailbox, its contents are deleted. # Additionally, the connection can be bound a single "nameplate", which is # short identifier that makes up the first component of a wormhole code. Each # nameplate points to a single long-id "mailbox". The "allocate" message # determines the shortest available numeric nameplate, reserves it, and # returns the nameplate id. "list" returns a list of all numeric nameplates # which currently have only one side active (i.e. they are waiting for a # partner). The "claim" message reserves an arbitrary nameplate id (perhaps # the receiver of a wormhole connection typed in a code they got from the # sender, or perhaps the two sides agreed upon a code offline and are both # typing it in), and the "release" message releases it. When every side that # has claimed the nameplate has also released it, the nameplate is # deallocated (but they will probably keep the underlying mailbox open). # "claim" and "release" may only be called once per connection, however calls # across connections (assuming a consistent "side") are idempotent. [connect, # claim, disconnect, connect, claim] is legal, but not useful, as is a # "release" for a nameplate that nobody is currently claiming. # "open" and "close" may only be called once per connection. They are # basically idempotent, however "open" doubles as a subscribe action. So # [connect, open, disconnect, connect, open] is legal *and* useful (without # the second "open", the second connection would not be subscribed to hear # about new messages). # Inbound (client to server) commands are marked as "->" below. Unrecognized # inbound keys will be ignored. Outbound (server to client) responses use # "<-". There is no guaranteed correlation between requests and responses. In # this list, "A -> B" means that some time after A is received, at least one # message of type B will be sent out (probably). # All responses include a "server_tx" key, which is a float (seconds since # epoch) with the server clock just before the outbound response was written # to the socket. # connection -> welcome # <- {type: "welcome", welcome: {}} # .welcome keys are all optional: # current_cli_version: out-of-date clients display a warning # motd: all clients display message, then continue normally # error: all clients display mesage, then terminate with error # -> {type: "bind", appid:, side:} # # -> {type: "list"} -> nameplates # <- {type: "nameplates", nameplates: [{id: str,..},..]} # -> {type: "allocate"} -> nameplate, mailbox # <- {type: "allocated", nameplate: str} # -> {type: "claim", nameplate: str} -> mailbox # <- {type: "claimed", mailbox: str} # -> {type: "release"} # .nameplate is optional, but must match previous claim() # <- {type: "released"} # # -> {type: "open", mailbox: str} -> message # sends old messages now, and subscribes to deliver future messages # <- {type: "message", side:, phase:, body:, msg_id:}} # body is hex # -> {type: "add", phase: str, body: hex} # will send echo in a "message" # # -> {type: "close", mood: str} -> closed # .mailbox is optional, but must match previous open() # <- {type: "closed"} # # <- {type: "error", error: str, orig: {}} # in response to malformed msgs # for tests that need to know when a message has been processed: # -> {type: "ping", ping: int} -> pong (does not require bind/claim) # <- {type: "pong", pong: int} class Error(Exception): def __init__(self, explain): self._explain = explain class WebSocketServer(websocket.WebSocketServerProtocol): def __init__(self): websocket.WebSocketServerProtocol.__init__(self) self._app = None self._side = None self._did_allocate = False # only one allocate() per websocket self._listening = False self._did_claim = False self._nameplate_id = None self._did_release = False self._did_open = False self._mailbox = None self._mailbox_id = None self._did_close = False def onConnect(self, request): rv = self.factory.server if rv.get_log_requests(): log.msg("ws client connecting: %s" % (request.peer,)) self._reactor = self.factory.reactor def onOpen(self): rv = self.factory.server self.send("welcome", welcome=rv.get_welcome()) def onMessage(self, payload, isBinary): server_rx = time.time() msg = bytes_to_dict(payload) try: if "type" not in msg: raise Error("missing 'type'") self.send("ack", id=msg.get("id")) mtype = msg["type"] if mtype == "ping": return self.handle_ping(msg) if mtype == "bind": return self.handle_bind(msg, server_rx) if not self._app: raise Error("must bind first") if mtype == "list": return self.handle_list() if mtype == "allocate": return self.handle_allocate(server_rx) if mtype == "claim": return self.handle_claim(msg, server_rx) if mtype == "release": return self.handle_release(msg, server_rx) if mtype == "open": return self.handle_open(msg, server_rx) if mtype == "add": return self.handle_add(msg, server_rx) if mtype == "close": return self.handle_close(msg, server_rx) raise Error("unknown type") except Error as e: self.send("error", error=e._explain, orig=msg) def handle_ping(self, msg): if "ping" not in msg: raise Error("ping requires 'ping'") self.send("pong", pong=msg["ping"]) def handle_bind(self, msg, server_rx): if self._app or self._side: raise Error("already bound") if "appid" not in msg: raise Error("bind requires 'appid'") if "side" not in msg: raise Error("bind requires 'side'") self._app = self.factory.server.get_app(msg["appid"]) self._side = msg["side"] client_version = msg.get("client_version", (None, None)) # e.g. ("python", "0.xyz") . <=0.10.5 did not send client_version self._app.log_client_version(server_rx, self._side, client_version) def handle_list(self): nameplate_ids = sorted(self._app.get_nameplate_ids()) # provide room to add nameplate attributes later (like which wordlist # is used for each, maybe how many words) nameplates = [{"id": nid} for nid in nameplate_ids] self.send("nameplates", nameplates=nameplates) def handle_allocate(self, server_rx): if self._did_allocate: raise Error("you already allocated one, don't be greedy") nameplate_id = self._app.allocate_nameplate(self._side, server_rx) assert isinstance(nameplate_id, type("")) self._did_allocate = True self.send("allocated", nameplate=nameplate_id) def handle_claim(self, msg, server_rx): if "nameplate" not in msg: raise Error("claim requires 'nameplate'") if self._did_claim: raise Error("only one claim per connection") self._did_claim = True nameplate_id = msg["nameplate"] assert isinstance(nameplate_id, type("")), type(nameplate_id) self._nameplate_id = nameplate_id try: mailbox_id = self._app.claim_nameplate(nameplate_id, self._side, server_rx) except CrowdedError: raise Error("crowded") except ReclaimedError: raise Error("reclaimed") self.send("claimed", mailbox=mailbox_id) def handle_release(self, msg, server_rx): if self._did_release: raise Error("only one release per connection") if "nameplate" in msg: if self._nameplate_id is not None: if msg["nameplate"] != self._nameplate_id: raise Error("release and claim must use same nameplate") nameplate_id = msg["nameplate"] else: if self._nameplate_id is None: raise Error("release without nameplate must follow claim") nameplate_id = self._nameplate_id assert nameplate_id is not None self._did_release = True self._app.release_nameplate(nameplate_id, self._side, server_rx) self.send("released") def handle_open(self, msg, server_rx): if self._mailbox: raise Error("only one open per connection") if "mailbox" not in msg: raise Error("open requires 'mailbox'") mailbox_id = msg["mailbox"] assert isinstance(mailbox_id, type("")) self._mailbox_id = mailbox_id try: self._mailbox = self._app.open_mailbox(mailbox_id, self._side, server_rx) except CrowdedError: raise Error("crowded") def _send(sm): self.send("message", side=sm.side, phase=sm.phase, body=sm.body, server_rx=sm.server_rx, id=sm.msg_id) def _stop(): pass self._listening = True for old_sm in self._mailbox.add_listener(self, _send, _stop): _send(old_sm) def handle_add(self, msg, server_rx): if not self._mailbox: raise Error("must open mailbox before adding") if "phase" not in msg: raise Error("missing 'phase'") if "body" not in msg: raise Error("missing 'body'") msg_id = msg.get("id") # optional sm = SidedMessage(side=self._side, phase=msg["phase"], body=msg["body"], server_rx=server_rx, msg_id=msg_id) self._mailbox.add_message(sm) def handle_close(self, msg, server_rx): if self._did_close: raise Error("only one close per connection") if "mailbox" in msg: if self._mailbox_id is not None: if msg["mailbox"] != self._mailbox_id: raise Error("open and close must use same mailbox") mailbox_id = msg["mailbox"] else: if self._mailbox_id is None: raise Error("close without mailbox must follow open") mailbox_id = self._mailbox_id if not self._mailbox: try: self._mailbox = self._app.open_mailbox(mailbox_id, self._side, server_rx) except CrowdedError: raise Error("crowded") if self._listening: self._mailbox.remove_listener(self) self._listening = False self._did_close = True self._mailbox.close(self._side, msg.get("mood"), server_rx) self._mailbox = None self.send("closed") def send(self, mtype, **kwargs): kwargs["type"] = mtype kwargs["server_tx"] = time.time() payload = dict_to_bytes(kwargs) self.sendMessage(payload, False) def onClose(self, wasClean, code, reason): #log.msg("onClose", self, self._mailbox, self._listening) if self._mailbox and self._listening: self._mailbox.remove_listener(self) class WebSocketServerFactory(websocket.WebSocketServerFactory): protocol = WebSocketServer def __init__(self, url, server): websocket.WebSocketServerFactory.__init__(self, url) self.setProtocolOptions(autoPingInterval=60, autoPingTimeout=600) self.server = server self.reactor = reactor # for tests to control magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/test/000077500000000000000000000000001355461217500260775ustar00rootroot00000000000000magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/test/__init__.py000066400000000000000000000000001355461217500301760ustar00rootroot00000000000000magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/test/common.py000066400000000000000000000050651355461217500277470ustar00rootroot00000000000000#from __future__ import unicode_literals from twisted.internet import reactor, endpoints from twisted.internet.defer import inlineCallbacks from ..database import create_or_upgrade_channel_db, create_or_upgrade_usage_db from ..server import make_server from ..web import make_web_server class ServerBase: log_requests = False @inlineCallbacks def setUp(self): self._lp = None if self.log_requests: blur_usage = None else: blur_usage = 60.0 usage_db = create_or_upgrade_usage_db(":memory:") yield self._setup_relay(blur_usage=blur_usage, usage_db=usage_db) @inlineCallbacks def _setup_relay(self, do_listen=False, web_log_requests=False, **kwargs): channel_db = create_or_upgrade_channel_db(":memory:") self._server = make_server(channel_db, **kwargs) if do_listen: ep = endpoints.TCP4ServerEndpoint(reactor, 0, interface="127.0.0.1") self._site = make_web_server(self._server, log_requests=web_log_requests) self._lp = yield ep.listen(self._site) addr = self._lp.getHost() self.relayurl = "ws://127.0.0.1:%d/v1" % addr.port self.rdv_ws_port = addr.port def tearDown(self): if self._lp: return self._lp.stopListening() class _Util: def _nameplate(self, app, name): np_row = app._db.execute("SELECT * FROM `nameplates`" " WHERE `app_id`='appid' AND `name`=?", (name,)).fetchone() if not np_row: return None, None npid = np_row["id"] side_rows = app._db.execute("SELECT * FROM `nameplate_sides`" " WHERE `nameplates_id`=?", (npid,)).fetchall() return np_row, side_rows def _mailbox(self, app, mailbox_id): mb_row = app._db.execute("SELECT * FROM `mailboxes`" " WHERE `app_id`='appid' AND `id`=?", (mailbox_id,)).fetchone() if not mb_row: return None, None side_rows = app._db.execute("SELECT * FROM `mailbox_sides`" " WHERE `mailbox_id`=?", (mailbox_id,)).fetchall() return mb_row, side_rows def _messages(self, app): c = app._db.execute("SELECT * FROM `messages`" " WHERE `app_id`='appid' AND `mailbox_id`='mid'") return c.fetchall() magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/test/test_config.py000066400000000000000000000220111355461217500307510ustar00rootroot00000000000000from __future__ import unicode_literals, print_function from twisted.python.usage import UsageError from twisted.trial import unittest from .. import server_tap PORT = "tcp:4000:interface=\:\:" class Config(unittest.TestCase): def test_defaults(self): o = server_tap.Options() o.parseOptions([]) self.assertEqual(o, {"port": PORT, "channel-db": "relay.sqlite", "disallow-list": 0, "allow-list": True, "advertise-version": None, "signal-error": None, "usage-db": None, "blur-usage": None, "log-fd": None, "websocket-protocol-options": [], }) def test_advertise_version(self): o = server_tap.Options() o.parseOptions(["--advertise-version=1.0"]) self.assertEqual(o, {"port": PORT, "channel-db": "relay.sqlite", "disallow-list": 0, "allow-list": True, "advertise-version": "1.0", "signal-error": None, "usage-db": None, "blur-usage": None, "log-fd": None, "websocket-protocol-options": [], }) def test_blur(self): o = server_tap.Options() o.parseOptions(["--blur-usage=60"]) self.assertEqual(o, {"port": PORT, "channel-db": "relay.sqlite", "disallow-list": 0, "allow-list": True, "advertise-version": None, "signal-error": None, "usage-db": None, "blur-usage": 60, "log-fd": None, "websocket-protocol-options": [], }) def test_channel_db(self): o = server_tap.Options() o.parseOptions(["--channel-db=other.sqlite"]) self.assertEqual(o, {"port": PORT, "channel-db": "other.sqlite", "disallow-list": 0, "allow-list": True, "advertise-version": None, "signal-error": None, "usage-db": None, "blur-usage": None, "log-fd": None, "websocket-protocol-options": [], }) def test_disallow_list(self): o = server_tap.Options() o.parseOptions(["--disallow-list"]) self.assertEqual(o, {"port": PORT, "channel-db": "relay.sqlite", "disallow-list": 0, "allow-list": False, "advertise-version": None, "signal-error": None, "usage-db": None, "blur-usage": None, "log-fd": None, "websocket-protocol-options": [], }) def test_log_fd(self): o = server_tap.Options() o.parseOptions(["--log-fd=5"]) self.assertEqual(o, {"port": PORT, "channel-db": "relay.sqlite", "disallow-list": 0, "allow-list": True, "advertise-version": None, "signal-error": None, "usage-db": None, "blur-usage": None, "log-fd": 5, "websocket-protocol-options": [], }) def test_port(self): o = server_tap.Options() o.parseOptions(["-p", "tcp:5555"]) self.assertEqual(o, {"port": "tcp:5555", "channel-db": "relay.sqlite", "disallow-list": 0, "allow-list": True, "advertise-version": None, "signal-error": None, "usage-db": None, "blur-usage": None, "log-fd": None, "websocket-protocol-options": [], }) o = server_tap.Options() o.parseOptions(["--port=tcp:5555"]) self.assertEqual(o, {"port": "tcp:5555", "channel-db": "relay.sqlite", "disallow-list": 0, "allow-list": True, "advertise-version": None, "signal-error": None, "usage-db": None, "blur-usage": None, "log-fd": None, "websocket-protocol-options": [], }) def test_signal_error(self): o = server_tap.Options() o.parseOptions(["--signal-error=ohnoes"]) self.assertEqual(o, {"port": PORT, "channel-db": "relay.sqlite", "disallow-list": 0, "allow-list": True, "advertise-version": None, "signal-error": "ohnoes", "usage-db": None, "blur-usage": None, "log-fd": None, "websocket-protocol-options": [], }) def test_usage_db(self): o = server_tap.Options() o.parseOptions(["--usage-db=usage.sqlite"]) self.assertEqual(o, {"port": PORT, "channel-db": "relay.sqlite", "disallow-list": 0, "allow-list": True, "advertise-version": None, "signal-error": None, "usage-db": "usage.sqlite", "blur-usage": None, "log-fd": None, "websocket-protocol-options": [], }) def test_websocket_protocol_option_1(self): o = server_tap.Options() o.parseOptions(["--websocket-protocol-option", 'foo="bar"']) self.assertEqual(o, {"port": PORT, "channel-db": "relay.sqlite", "disallow-list": 0, "allow-list": True, "advertise-version": None, "signal-error": None, "usage-db": None, "blur-usage": None, "log-fd": None, "websocket-protocol-options": [("foo", "bar")], }) def test_websocket_protocol_option_2(self): o = server_tap.Options() o.parseOptions(["--websocket-protocol-option", 'foo="bar"', "--websocket-protocol-option", 'baz=[1,"buz"]', ]) self.assertEqual(o, {"port": PORT, "channel-db": "relay.sqlite", "disallow-list": 0, "allow-list": True, "advertise-version": None, "signal-error": None, "usage-db": None, "blur-usage": None, "log-fd": None, "websocket-protocol-options": [("foo", "bar"), ("baz", [1, "buz"]), ], }) def test_websocket_protocol_option_errors(self): o = server_tap.Options() with self.assertRaises(UsageError): o.parseOptions(["--websocket-protocol-option", 'foo']) with self.assertRaises(UsageError): # I would be nice if this worked, but the 'bar' isn't JSON. To # enable passing lists and more complicated things as values, # simple string values must be passed with additional quotes # (e.g. '"bar"') o.parseOptions(["--websocket-protocol-option", 'foo=bar']) def test_string(self): o = server_tap.Options() s = str(o) self.assertIn("This plugin sets up a 'Mailbox' server", s) self.assertIn("--blur-usage=", s) self.assertIn("round logged access times to improve privacy", s) magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/test/test_database.py000066400000000000000000000174351355461217500312660ustar00rootroot00000000000000from __future__ import print_function, unicode_literals import os from twisted.python import filepath from twisted.trial import unittest from .. import database from ..database import (CHANNELDB_TARGET_VERSION, USAGEDB_TARGET_VERSION, _get_db, dump_db, DBError) class Get(unittest.TestCase): def test_create_default(self): db_url = ":memory:" db = _get_db(db_url, "channel", CHANNELDB_TARGET_VERSION) rows = db.execute("SELECT * FROM version").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0]["version"], CHANNELDB_TARGET_VERSION) def test_open_existing_file(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "normal.db") db = _get_db(fn, "channel", CHANNELDB_TARGET_VERSION) rows = db.execute("SELECT * FROM version").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0]["version"], CHANNELDB_TARGET_VERSION) db2 = _get_db(fn, "channel", CHANNELDB_TARGET_VERSION) rows = db2.execute("SELECT * FROM version").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0]["version"], CHANNELDB_TARGET_VERSION) def test_open_bad_version(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "old.db") db = _get_db(fn, "channel", CHANNELDB_TARGET_VERSION) db.execute("UPDATE version SET version=999") db.commit() with self.assertRaises(DBError) as e: _get_db(fn, "channel", CHANNELDB_TARGET_VERSION) self.assertIn("Unable to handle db version 999", str(e.exception)) def test_open_corrupt(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "corrupt.db") with open(fn, "wb") as f: f.write(b"I am not a database") with self.assertRaises(DBError) as e: _get_db(fn, "channel", CHANNELDB_TARGET_VERSION) self.assertIn("not a database", str(e.exception)) def test_failed_create_allows_subsequent_create(self): patch = self.patch(database, "get_schema", lambda version: b"this is a broken schema") dbfile = filepath.FilePath(self.mktemp()) self.assertRaises(Exception, lambda: _get_db(dbfile.path)) patch.restore() _get_db(dbfile.path, "channel", CHANNELDB_TARGET_VERSION) def test_upgrade(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "upgrade.db") self.assertNotEqual(USAGEDB_TARGET_VERSION, 1) # create an old-version DB in a file db = _get_db(fn, "usage", 1) rows = db.execute("SELECT * FROM version").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0]["version"], 1) del db # then upgrade the file to the latest version dbA = _get_db(fn, "usage", USAGEDB_TARGET_VERSION) rows = dbA.execute("SELECT * FROM version").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0]["version"], USAGEDB_TARGET_VERSION) dbA_text = dump_db(dbA) del dbA # make sure the upgrades got committed to disk dbB = _get_db(fn, "usage", USAGEDB_TARGET_VERSION) dbB_text = dump_db(dbB) del dbB self.assertEqual(dbA_text, dbB_text) # The upgraded schema should be equivalent to that of a new DB. latest_db = _get_db(":memory:", "usage", USAGEDB_TARGET_VERSION) latest_text = dump_db(latest_db) with open("up.sql","w") as f: f.write(dbA_text) with open("new.sql","w") as f: f.write(latest_text) # debug with "diff -u _trial_temp/up.sql _trial_temp/new.sql" self.assertEqual(dbA_text, latest_text) def test_upgrade_fails(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "upgrade.db") self.assertNotEqual(USAGEDB_TARGET_VERSION, 1) # create an old-version DB in a file db = _get_db(fn, "usage", 1) rows = db.execute("SELECT * FROM version").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0]["version"], 1) del db # then upgrade the file to a too-new version, for which we have no # upgrader with self.assertRaises(DBError): _get_db(fn, "usage", USAGEDB_TARGET_VERSION+1) class CreateChannel(unittest.TestCase): def test_memory(self): db = database.create_channel_db(":memory:") latest_text = dump_db(db) self.assertIn("CREATE TABLE", latest_text) def test_preexisting(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "preexisting.db") with open(fn, "w"): pass with self.assertRaises(database.DBAlreadyExists): database.create_channel_db(fn) def test_create(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "created.db") db = database.create_channel_db(fn) latest_text = dump_db(db) self.assertIn("CREATE TABLE", latest_text) def test_create_or_upgrade(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "created.db") db = database.create_or_upgrade_channel_db(fn) latest_text = dump_db(db) self.assertIn("CREATE TABLE", latest_text) class CreateUsage(unittest.TestCase): def test_memory(self): db = database.create_usage_db(":memory:") latest_text = dump_db(db) self.assertIn("CREATE TABLE", latest_text) def test_preexisting(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "preexisting.db") with open(fn, "w"): pass with self.assertRaises(database.DBAlreadyExists): database.create_usage_db(fn) def test_create(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "created.db") db = database.create_usage_db(fn) latest_text = dump_db(db) self.assertIn("CREATE TABLE", latest_text) def test_create_or_upgrade(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "created.db") db = database.create_or_upgrade_usage_db(fn) latest_text = dump_db(db) self.assertIn("CREATE TABLE", latest_text) def test_create_or_upgrade_disabled(self): db = database.create_or_upgrade_usage_db(None) self.assertIs(db, None) class OpenChannel(unittest.TestCase): def test_open(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "created.db") db1 = database.create_channel_db(fn) latest_text = dump_db(db1) self.assertIn("CREATE TABLE", latest_text) db2 = database.open_existing_db(fn) self.assertIn("CREATE TABLE", dump_db(db2)) def test_doesnt_exist(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "created.db") with self.assertRaises(database.DBDoesntExist): database.open_existing_db(fn) class OpenUsage(unittest.TestCase): def test_open(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "created.db") db1 = database.create_usage_db(fn) latest_text = dump_db(db1) self.assertIn("CREATE TABLE", latest_text) db2 = database.open_existing_db(fn) self.assertIn("CREATE TABLE", dump_db(db2)) def test_doesnt_exist(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "created.db") with self.assertRaises(database.DBDoesntExist): database.open_existing_db(fn) magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/test/test_rlimits.py000066400000000000000000000053421355461217500311770ustar00rootroot00000000000000from __future__ import print_function, unicode_literals import mock from twisted.trial import unittest from ..increase_rlimits import increase_rlimits class RLimits(unittest.TestCase): def test_rlimit(self): def patch_r(name, *args, **kwargs): return mock.patch("wormhole_mailbox_server.increase_rlimits." + name, *args, **kwargs) fakelog = [] def checklog(*expected): self.assertEqual(fakelog, list(expected)) fakelog[:] = [] NF = "NOFILE" mock_NF = patch_r("RLIMIT_NOFILE", NF) with patch_r("log.msg", fakelog.append): with patch_r("getrlimit", None): increase_rlimits() checklog("unable to import 'resource', leaving rlimit alone") with mock_NF: with patch_r("getrlimit", return_value=(20000, 30000)) as gr: increase_rlimits() self.assertEqual(gr.mock_calls, [mock.call(NF)]) checklog("RLIMIT_NOFILE.soft was 20000, leaving it alone") with patch_r("getrlimit", return_value=(10, 30000)) as gr: with patch_r("setrlimit", side_effect=TypeError("other")): with patch_r("log.err") as err: increase_rlimits() self.assertEqual(err.mock_calls, [mock.call()]) checklog("changing RLIMIT_NOFILE from (10,30000) to (30000,30000)", "other error during setrlimit, leaving it alone") for maxlimit in [40000, 20000, 9000, 2000, 1000]: def setrlimit(which, newlimit): if newlimit[0] > maxlimit: raise ValueError("nope") return None calls = [] expected = [] for tries in [30000, 10000, 3200, 1024]: calls.append(mock.call(NF, (tries, 30000))) expected.append("changing RLIMIT_NOFILE from (10,30000) to (%d,30000)" % tries) if tries > maxlimit: expected.append("error during setrlimit: nope") else: expected.append("setrlimit successful") break else: expected.append("unable to change rlimit, leaving it alone") with patch_r("setrlimit", side_effect=setrlimit) as sr: increase_rlimits() self.assertEqual(sr.mock_calls, calls) checklog(*expected) magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/test/test_server.py000066400000000000000000000620761355461217500310310ustar00rootroot00000000000000from __future__ import print_function, unicode_literals import mock from twisted.trial import unittest from twisted.python import log from .common import ServerBase, _Util from ..server import (make_server, Usage, SidedMessage, CrowdedError, AppNamespace) from ..database import create_channel_db, create_usage_db class Server(_Util, ServerBase, unittest.TestCase): def test_apps(self): app1 = self._server.get_app("appid1") self.assertIdentical(app1, self._server.get_app("appid1")) app2 = self._server.get_app("appid2") self.assertNotIdentical(app1, app2) def test_nameplate_allocation(self): app = self._server.get_app("appid") nids = set() # this takes a second, and claims all the short-numbered nameplates def add(): nameplate_id = app.allocate_nameplate("side1", 0) self.assertEqual(type(nameplate_id), type("")) nid = int(nameplate_id) nids.add(nid) for i in range(9): add() self.assertNotIn(0, nids) self.assertEqual(set(range(1,10)), nids) for i in range(100-10): add() self.assertEqual(len(nids), 99) self.assertEqual(set(range(1,100)), nids) for i in range(1000-100): add() self.assertEqual(len(nids), 999) self.assertEqual(set(range(1,1000)), nids) add() self.assertEqual(len(nids), 1000) biggest = max(nids) self.assert_(1000 <= biggest < 1000000, biggest) def test_nameplate_allocation_failure(self): app = self._server.get_app("appid") # pretend to fill all 1M <7-digit nameplates, it should give up # eventually def _get_nameplate_ids(): return set(("%d" % id_int for id_int in range(1, 1000*1000))) app._get_nameplate_ids = _get_nameplate_ids with self.assertRaises(ValueError) as e: app.allocate_nameplate("side1", 0) self.assertIn("unable to find a free nameplate-id", str(e.exception)) def test_nameplate(self): app = self._server.get_app("appid") name = app.allocate_nameplate("side1", 0) self.assertEqual(type(name), type("")) nid = int(name) self.assert_(0 < nid < 10, nid) self.assertEqual(app.get_nameplate_ids(), set([name])) # allocate also does a claim np_row, side_rows = self._nameplate(app, name) self.assertEqual(len(side_rows), 1) self.assertEqual(side_rows[0]["side"], "side1") self.assertEqual(side_rows[0]["added"], 0) # duplicate claims by the same side are combined mailbox_id = app.claim_nameplate(name, "side1", 1) self.assertEqual(type(mailbox_id), type("")) self.assertEqual(mailbox_id, np_row["mailbox_id"]) np_row, side_rows = self._nameplate(app, name) self.assertEqual(len(side_rows), 1) self.assertEqual(side_rows[0]["added"], 0) self.assertEqual(mailbox_id, np_row["mailbox_id"]) # and they don't updated the 'added' time mailbox_id2 = app.claim_nameplate(name, "side1", 2) self.assertEqual(mailbox_id, mailbox_id2) np_row, side_rows = self._nameplate(app, name) self.assertEqual(len(side_rows), 1) self.assertEqual(side_rows[0]["added"], 0) # claim by the second side is new mailbox_id3 = app.claim_nameplate(name, "side2", 3) self.assertEqual(mailbox_id, mailbox_id3) np_row, side_rows = self._nameplate(app, name) self.assertEqual(len(side_rows), 2) self.assertEqual(sorted([row["side"] for row in side_rows]), sorted(["side1", "side2"])) self.assertIn(("side2", 3), [(row["side"], row["added"]) for row in side_rows]) # a third claim marks the nameplate as "crowded", and adds a third # claim (which must be released later), but leaves the two existing # claims alone self.assertRaises(CrowdedError, app.claim_nameplate, name, "side3", 4) np_row, side_rows = self._nameplate(app, name) self.assertEqual(len(side_rows), 3) # releasing a non-existent nameplate is ignored app.release_nameplate(name+"not", "side4", 0) # releasing a side that never claimed the nameplate is ignored app.release_nameplate(name, "side4", 0) np_row, side_rows = self._nameplate(app, name) self.assertEqual(len(side_rows), 3) # releasing one side leaves the second claim app.release_nameplate(name, "side1", 5) np_row, side_rows = self._nameplate(app, name) claims = [(row["side"], row["claimed"]) for row in side_rows] self.assertIn(("side1", False), claims) self.assertIn(("side2", True), claims) self.assertIn(("side3", True), claims) # releasing one side multiple times is ignored app.release_nameplate(name, "side1", 5) np_row, side_rows = self._nameplate(app, name) claims = [(row["side"], row["claimed"]) for row in side_rows] self.assertIn(("side1", False), claims) self.assertIn(("side2", True), claims) self.assertIn(("side3", True), claims) # release the second side app.release_nameplate(name, "side2", 6) np_row, side_rows = self._nameplate(app, name) claims = [(row["side"], row["claimed"]) for row in side_rows] self.assertIn(("side1", False), claims) self.assertIn(("side2", False), claims) self.assertIn(("side3", True), claims) # releasing the third side frees the nameplate, and adds usage app.release_nameplate(name, "side3", 7) np_row, side_rows = self._nameplate(app, name) self.assertEqual(np_row, None) usage = app._usage_db.execute("SELECT * FROM `nameplates`").fetchone() self.assertEqual(usage["app_id"], "appid") self.assertEqual(usage["started"], 0) self.assertEqual(usage["waiting_time"], 3) self.assertEqual(usage["total_time"], 7) self.assertEqual(usage["result"], "crowded") def test_mailbox(self): app = self._server.get_app("appid") mailbox_id = "mid" m1 = app.open_mailbox(mailbox_id, "side1", 0) mb_row, side_rows = self._mailbox(app, mailbox_id) self.assertEqual(len(side_rows), 1) self.assertEqual(side_rows[0]["side"], "side1") self.assertEqual(side_rows[0]["added"], 0) # opening the same mailbox twice, by the same side, gets the same # object, and does not update the "added" timestamp self.assertIdentical(m1, app.open_mailbox(mailbox_id, "side1", 1)) mb_row, side_rows = self._mailbox(app, mailbox_id) self.assertEqual(len(side_rows), 1) self.assertEqual(side_rows[0]["side"], "side1") self.assertEqual(side_rows[0]["added"], 0) # opening a second side gets the same object, and adds a new claim self.assertIdentical(m1, app.open_mailbox(mailbox_id, "side2", 2)) mb_row, side_rows = self._mailbox(app, mailbox_id) self.assertEqual(len(side_rows), 2) adds = [(row["side"], row["added"]) for row in side_rows] self.assertIn(("side1", 0), adds) self.assertIn(("side2", 2), adds) # a third open marks it as crowded self.assertRaises(CrowdedError, app.open_mailbox, mailbox_id, "side3", 3) mb_row, side_rows = self._mailbox(app, mailbox_id) self.assertEqual(len(side_rows), 3) m1.close("side3", "company", 4) # closing a side that never claimed the mailbox is ignored m1.close("side4", "mood", 4) mb_row, side_rows = self._mailbox(app, mailbox_id) self.assertEqual(len(side_rows), 3) # closing one side leaves the second claim m1.close("side1", "mood", 5) mb_row, side_rows = self._mailbox(app, mailbox_id) sides = [(row["side"], row["opened"], row["mood"]) for row in side_rows] self.assertIn(("side1", False, "mood"), sides) self.assertIn(("side2", True, None), sides) self.assertIn(("side3", False, "company"), sides) # closing one side multiple times is ignored m1.close("side1", "mood", 6) mb_row, side_rows = self._mailbox(app, mailbox_id) sides = [(row["side"], row["opened"], row["mood"]) for row in side_rows] self.assertIn(("side1", False, "mood"), sides) self.assertIn(("side2", True, None), sides) self.assertIn(("side3", False, "company"), sides) l1 = []; stop1 = []; stop1_f = lambda: stop1.append(True) m1.add_listener("handle1", l1.append, stop1_f) # closing the second side frees the mailbox, and adds usage m1.close("side2", "mood", 7) self.assertEqual(stop1, [True]) mb_row, side_rows = self._mailbox(app, mailbox_id) self.assertEqual(mb_row, None) usage = app._usage_db.execute("SELECT * FROM `mailboxes`").fetchone() self.assertEqual(usage["app_id"], "appid") self.assertEqual(usage["started"], 0) self.assertEqual(usage["waiting_time"], 2) self.assertEqual(usage["total_time"], 7) self.assertEqual(usage["result"], "crowded") def test_messages(self): app = self._server.get_app("appid") mailbox_id = "mid" m1 = app.open_mailbox(mailbox_id, "side1", 0) m1.add_message(SidedMessage(side="side1", phase="phase", body="body", server_rx=1, msg_id="msgid")) msgs = self._messages(app) self.assertEqual(len(msgs), 1) self.assertEqual(msgs[0]["body"], "body") l1 = []; stop1 = []; stop1_f = lambda: stop1.append(True) l2 = []; stop2 = []; stop2_f = lambda: stop2.append(True) old = m1.add_listener("handle1", l1.append, stop1_f) self.assertEqual(len(old), 1) self.assertEqual(old[0].side, "side1") self.assertEqual(old[0].body, "body") m1.add_message(SidedMessage(side="side1", phase="phase2", body="body2", server_rx=1, msg_id="msgid")) self.assertEqual(len(l1), 1) self.assertEqual(l1[0].body, "body2") old = m1.add_listener("handle2", l2.append, stop2_f) self.assertEqual(len(old), 2) m1.add_message(SidedMessage(side="side1", phase="phase3", body="body3", server_rx=1, msg_id="msgid")) self.assertEqual(len(l1), 2) self.assertEqual(l1[-1].body, "body3") self.assertEqual(len(l2), 1) self.assertEqual(l2[-1].body, "body3") m1.remove_listener("handle1") m1.add_message(SidedMessage(side="side1", phase="phase4", body="body4", server_rx=1, msg_id="msgid")) self.assertEqual(len(l1), 2) self.assertEqual(l1[-1].body, "body3") self.assertEqual(len(l2), 2) self.assertEqual(l2[-1].body, "body4") m1._shutdown() self.assertEqual(stop1, []) self.assertEqual(stop2, [True]) # message adds are not idempotent: clients filter duplicates m1.add_message(SidedMessage(side="side1", phase="phase", body="body", server_rx=1, msg_id="msgid")) msgs = self._messages(app) self.assertEqual(len(msgs), 5) self.assertEqual(msgs[-1]["body"], "body") class Prune(unittest.TestCase): def _get_mailbox_updated(self, app, mbox_id): row = app._db.execute("SELECT * FROM `mailboxes` WHERE" " `app_id`=? AND `id`=?", (app._app_id, mbox_id)).fetchone() return row["updated"] def test_update(self): rv = make_server(create_channel_db(":memory:")) app = rv.get_app("appid") mbox_id = "mbox1" app.open_mailbox(mbox_id, "side1", 1) self.assertEqual(self._get_mailbox_updated(app, mbox_id), 1) mb = app.open_mailbox(mbox_id, "side2", 2) self.assertEqual(self._get_mailbox_updated(app, mbox_id), 2) sm = SidedMessage("side1", "phase", "body", 3, "msgid") mb.add_message(sm) self.assertEqual(self._get_mailbox_updated(app, mbox_id), 3) def test_apps(self): rv = make_server(create_channel_db(":memory:")) app = rv.get_app("appid") app.allocate_nameplate("side", 121) app.prune = mock.Mock() rv.prune_all_apps(now=123, old=122) self.assertEqual(app.prune.mock_calls, [mock.call(123, 122)]) def test_nameplates(self): db = create_channel_db(":memory:") rv = make_server(db, blur_usage=3600) # timestamps <=50 are "old", >=51 are "new" #OLD = "old"; NEW = "new" #when = {OLD: 1, NEW: 60} new_nameplates = set() APPID = "appid" app = rv.get_app(APPID) # Exercise the first-vs-second newness tests app.claim_nameplate("np-1", "side1", 1) app.claim_nameplate("np-2", "side1", 1) app.claim_nameplate("np-2", "side2", 2) app.claim_nameplate("np-3", "side1", 60) new_nameplates.add("np-3") app.claim_nameplate("np-4", "side1", 1) app.claim_nameplate("np-4", "side2", 60) new_nameplates.add("np-4") app.claim_nameplate("np-5", "side1", 60) app.claim_nameplate("np-5", "side2", 61) new_nameplates.add("np-5") rv.prune_all_apps(now=123, old=50) nameplates = set([row["name"] for row in db.execute("SELECT * FROM `nameplates`").fetchall()]) self.assertEqual(new_nameplates, nameplates) mailboxes = set([row["id"] for row in db.execute("SELECT * FROM `mailboxes`").fetchall()]) self.assertEqual(len(new_nameplates), len(mailboxes)) def test_mailboxes(self): db = create_channel_db(":memory:") rv = make_server(db, blur_usage=3600) # timestamps <=50 are "old", >=51 are "new" #OLD = "old"; NEW = "new" #when = {OLD: 1, NEW: 60} new_mailboxes = set() APPID = "appid" app = rv.get_app(APPID) # Exercise the first-vs-second newness tests app.open_mailbox("mb-11", "side1", 1) app.open_mailbox("mb-12", "side1", 1) app.open_mailbox("mb-12", "side2", 2) app.open_mailbox("mb-13", "side1", 60) new_mailboxes.add("mb-13") app.open_mailbox("mb-14", "side1", 1) app.open_mailbox("mb-14", "side2", 60) new_mailboxes.add("mb-14") app.open_mailbox("mb-15", "side1", 60) app.open_mailbox("mb-15", "side2", 61) new_mailboxes.add("mb-15") rv.prune_all_apps(now=123, old=50) mailboxes = set([row["id"] for row in db.execute("SELECT * FROM `mailboxes`").fetchall()]) self.assertEqual(new_mailboxes, mailboxes) def test_lots(self): OLD = "old"; NEW = "new" for nameplate in [False, True]: for mailbox in [OLD, NEW]: for has_listeners in [False, True]: self.one(nameplate, mailbox, has_listeners) def test_one(self): # to debug specific problems found by test_lots self.one(None, "new", False) def one(self, nameplate, mailbox, has_listeners): desc = ("nameplate=%s, mailbox=%s, has_listeners=%s" % (nameplate, mailbox, has_listeners)) log.msg(desc) db = create_channel_db(":memory:") rv = make_server(db, blur_usage=3600) APPID = "appid" app = rv.get_app(APPID) # timestamps <=50 are "old", >=51 are "new" OLD = "old"; NEW = "new" when = {OLD: 1, NEW: 60} nameplate_survives = False mailbox_survives = False mbid = "mbid" if nameplate: mbid = app.claim_nameplate("npid", "side1", when[mailbox]) mb = app.open_mailbox(mbid, "side1", when[mailbox]) # the pruning algorithm doesn't care about the age of messages, # because mailbox.updated is always updated each time we add a # message sm = SidedMessage("side1", "phase", "body", when[mailbox], "msgid") mb.add_message(sm) if has_listeners: mb.add_listener("handle", None, None) if (mailbox == NEW or has_listeners): if nameplate: nameplate_survives = True mailbox_survives = True messages_survive = mailbox_survives rv.prune_all_apps(now=123, old=50) nameplates = set([row["name"] for row in db.execute("SELECT * FROM `nameplates`").fetchall()]) self.assertEqual(nameplate_survives, bool(nameplates), ("nameplate", nameplate_survives, nameplates, desc)) mailboxes = set([row["id"] for row in db.execute("SELECT * FROM `mailboxes`").fetchall()]) self.assertEqual(mailbox_survives, bool(mailboxes), ("mailbox", mailbox_survives, mailboxes, desc)) messages = set([row["msg_id"] for row in db.execute("SELECT * FROM `messages`").fetchall()]) self.assertEqual(messages_survive, bool(messages), ("messages", messages_survive, messages, desc)) class Summary(unittest.TestCase): def test_mailbox(self): app = AppNamespace(None, None, None, False, None, True) # starts at time 1, maybe gets second open at time 3, closes at 5 def s(rows, pruned=False): return app._summarize_mailbox(rows, 5, pruned) rows = [dict(added=1)] self.assertEqual(s(rows), Usage(1, None, 4, "lonely")) rows = [dict(added=1, mood="lonely")] self.assertEqual(s(rows), Usage(1, None, 4, "lonely")) rows = [dict(added=1, mood="errory")] self.assertEqual(s(rows), Usage(1, None, 4, "errory")) rows = [dict(added=1, mood=None)] self.assertEqual(s(rows, pruned=True), Usage(1, None, 4, "pruney")) rows = [dict(added=1, mood="happy")] self.assertEqual(s(rows, pruned=True), Usage(1, None, 4, "pruney")) rows = [dict(added=1, mood="happy"), dict(added=3, mood="happy")] self.assertEqual(s(rows), Usage(1, 2, 4, "happy")) rows = [dict(added=1, mood="errory"), dict(added=3, mood="happy")] self.assertEqual(s(rows), Usage(1, 2, 4, "errory")) rows = [dict(added=1, mood="happy"), dict(added=3, mood="errory")] self.assertEqual(s(rows), Usage(1, 2, 4, "errory")) rows = [dict(added=1, mood="scary"), dict(added=3, mood="happy")] self.assertEqual(s(rows), Usage(1, 2, 4, "scary")) rows = [dict(added=1, mood="scary"), dict(added=3, mood="errory")] self.assertEqual(s(rows), Usage(1, 2, 4, "scary")) rows = [dict(added=1, mood="happy"), dict(added=3, mood=None)] self.assertEqual(s(rows, pruned=True), Usage(1, 2, 4, "pruney")) rows = [dict(added=1, mood="happy"), dict(added=3, mood="happy")] self.assertEqual(s(rows, pruned=True), Usage(1, 2, 4, "pruney")) rows = [dict(added=1), dict(added=3), dict(added=4)] self.assertEqual(s(rows), Usage(1, 2, 4, "crowded")) rows = [dict(added=1), dict(added=3), dict(added=4)] self.assertEqual(s(rows, pruned=True), Usage(1, 2, 4, "crowded")) def test_nameplate(self): a = AppNamespace(None, None, None, False, None, True) # starts at time 1, maybe gets second open at time 3, closes at 5 def s(rows, pruned=False): return a._summarize_nameplate_usage(rows, 5, pruned) rows = [dict(added=1)] self.assertEqual(s(rows), Usage(1, None, 4, "lonely")) rows = [dict(added=1), dict(added=3)] self.assertEqual(s(rows), Usage(1, 2, 4, "happy")) rows = [dict(added=1), dict(added=3)] self.assertEqual(s(rows, pruned=True), Usage(1, 2, 4, "pruney")) rows = [dict(added=1), dict(added=3), dict(added=4)] self.assertEqual(s(rows), Usage(1, 2, 4, "crowded")) def test_nameplate_disallowed(self): db = create_channel_db(":memory:") a = AppNamespace(db, None, None, False, "some_app_id", False) a.allocate_nameplate("side1", "123") self.assertEqual([], a.get_nameplate_ids()) def test_nameplate_allowed(self): db = create_channel_db(":memory:") a = AppNamespace(db, None, None, False, "some_app_id", True) np = a.allocate_nameplate("side1", "321") self.assertEqual(set([np]), a.get_nameplate_ids()) def test_blur(self): db = create_channel_db(":memory:") usage_db = create_usage_db(":memory:") rv = make_server(db, blur_usage=3600, usage_db=usage_db) APPID = "appid" app = rv.get_app(APPID) app.claim_nameplate("npid", "side1", 10) # start time is 10 rv.prune_all_apps(now=123, old=50) # start time should be rounded to top of the hour (blur_usage=3600) row = usage_db.execute("SELECT * FROM `nameplates`").fetchone() self.assertEqual(row["started"], 0) app = rv.get_app(APPID) app.open_mailbox("mbid", "side1", 20) # start time is 20 rv.prune_all_apps(now=123, old=50) row = usage_db.execute("SELECT * FROM `mailboxes`").fetchone() self.assertEqual(row["started"], 0) def test_no_blur(self): db = create_channel_db(":memory:") usage_db = create_usage_db(":memory:") rv = make_server(db, blur_usage=None, usage_db=usage_db) APPID = "appid" app = rv.get_app(APPID) app.claim_nameplate("npid", "side1", 10) # start time is 10 rv.prune_all_apps(now=123, old=50) row = usage_db.execute("SELECT * FROM `nameplates`").fetchone() self.assertEqual(row["started"], 10) usage_db.execute("DELETE FROM `mailboxes`") usage_db.commit() app = rv.get_app(APPID) app.open_mailbox("mbid", "side1", 20) # start time is 20 rv.prune_all_apps(now=123, old=50) row = usage_db.execute("SELECT * FROM `mailboxes`").fetchone() self.assertEqual(row["started"], 20) ## class DumpStats(unittest.TestCase): ## def test_nostats(self): ## rs = easy_relay() ## # with no ._stats_file, this should do nothing ## rs.dump_stats(1, 1) ## def test_empty(self): ## basedir = self.mktemp() ## os.mkdir(basedir) ## fn = os.path.join(basedir, "stats.json") ## rs = easy_relay(stats_file=fn) ## now = 1234 ## validity = 500 ## rs.dump_stats(now, validity) ## with open(fn, "rb") as f: ## data_bytes = f.read() ## data = json.loads(data_bytes.decode("utf-8")) ## self.assertEqual(data["created"], now) ## self.assertEqual(data["valid_until"], now+validity) ## self.assertEqual(data["rendezvous"]["all_time"]["mailboxes_total"], 0) class Startup(unittest.TestCase): @mock.patch('wormhole_mailbox_server.server.log') def test_empty(self, fake_log): db = create_channel_db(":memory:") s = make_server(db, allow_list=False) s.startService() try: logs = '\n'.join([call[1][0] for call in fake_log.mock_calls]) self.assertIn('listing of allocated nameplates disallowed', logs) finally: s.stopService() @mock.patch('wormhole_mailbox_server.server.log') def test_allow_list(self, fake_log): db = create_channel_db(":memory:") s = make_server(db, allow_list=True) s.startService() try: logs = '\n'.join([call[1][0] for call in fake_log.mock_calls]) self.assertNotIn('listing of allocated nameplates disallowed', logs) finally: s.stopService() @mock.patch('wormhole_mailbox_server.server.log') def test_blur_usage(self, fake_log): db = create_channel_db(":memory:") s = make_server(db, blur_usage=60, allow_list=True) s.startService() try: logs = '\n'.join([call[1][0] for call in fake_log.mock_calls]) self.assertNotIn('listing of allocated nameplates disallowed', logs) self.assertIn('blurring access times to 60 seconds', logs) finally: s.stopService() class MakeServer(unittest.TestCase): def test_welcome_empty(self): db = create_channel_db(":memory:") s = make_server(db) self.assertEqual(s.get_welcome(), {}) def test_welcome_error(self): db = create_channel_db(":memory:") s = make_server(db, signal_error="error!") self.assertEqual(s.get_welcome(), {"error": "error!"}) def test_welcome_advertise_version(self): db = create_channel_db(":memory:") s = make_server(db, advertise_version="version") self.assertEqual(s.get_welcome(), {"current_cli_version": "version"}) # exercise _find_available_nameplate_id failing # exercise CrowdedError # exercise double free_mailbox # exercise _summarize_mailbox = quiet (0 sides) # exercise AppNamespace._shutdown # so Server.stopService ## test blur_usage/not on Server ## test make_server(signal_error=) ## exercise dump_stats (with/without usagedb) magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/test/test_service.py000066400000000000000000000056361355461217500311620ustar00rootroot00000000000000from __future__ import unicode_literals, print_function from twisted.trial import unittest import mock from twisted.application.service import MultiService from .. import server_tap class Service(unittest.TestCase): def test_defaults(self): o = server_tap.Options() o.parseOptions([]) cdb = object() udb = object() r = mock.Mock() ws = object() with mock.patch("wormhole_mailbox_server.server_tap.create_or_upgrade_channel_db", return_value=cdb) as ccdb: with mock.patch("wormhole_mailbox_server.server_tap.create_or_upgrade_usage_db", return_value=udb) as ccub: with mock.patch("wormhole_mailbox_server.server_tap.make_server", return_value=r) as ms: with mock.patch("wormhole_mailbox_server.server_tap.make_web_server", return_value=ws) as mws: s = server_tap.makeService(o) self.assertEqual(ccdb.mock_calls, [mock.call("relay.sqlite")]) self.assertEqual(ccub.mock_calls, [mock.call(None)]) self.assertEqual(ms.mock_calls, [mock.call(cdb, allow_list=True, advertise_version=None, signal_error=None, blur_usage=None, usage_db=udb, log_file=None)]) self.assertEqual(mws.mock_calls, [mock.call(r, True, [])]) self.assertIsInstance(s, MultiService) self.assertEqual(len(r.mock_calls), 1) # setServiceParent def test_log_fd(self): o = server_tap.Options() o.parseOptions(["--log-fd=99"]) fd = object() cdb = object() udb = object() r = mock.Mock() ws = object() with mock.patch("wormhole_mailbox_server.server_tap.create_or_upgrade_channel_db", return_value=cdb): with mock.patch("wormhole_mailbox_server.server_tap.create_or_upgrade_usage_db", return_value=udb): with mock.patch("wormhole_mailbox_server.server_tap.make_server", return_value=r) as ms: with mock.patch("wormhole_mailbox_server.server_tap.make_web_server", return_value=ws): with mock.patch("wormhole_mailbox_server.server_tap.os.fdopen", return_value=fd) as f: server_tap.makeService(o) self.assertEqual(f.mock_calls, [mock.call(99, "w")]) self.assertEqual(ms.mock_calls, [mock.call(cdb, allow_list=True, advertise_version=None, signal_error=None, blur_usage=None, usage_db=udb, log_file=fd)]) magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/test/test_stats.py000066400000000000000000000331651355461217500306560ustar00rootroot00000000000000from __future__ import print_function, unicode_literals #import io, json from twisted.trial import unittest from ..database import create_channel_db, create_usage_db from ..server import make_server, CrowdedError class _Make: def make(self, blur_usage=None, with_usage_db=True): self._cdb = create_channel_db(":memory:") db = create_usage_db(":memory:") if with_usage_db else None s = make_server(self._cdb, usage_db=db, blur_usage=blur_usage) app = s.get_app("appid") return s, db, app class Current(_Make, unittest.TestCase): def test_current_no_mailboxes(self): s, db, app = self.make() s.dump_stats(456, rebooted=451) self.assertEqual(db.execute("SELECT * FROM `current`").fetchall(), [dict(rebooted=451, updated=456, blur_time=None, connections_websocket=0), ]) def test_current_no_listeners(self): s, db, app = self.make() app.open_mailbox("m1", "s1", 1) s.dump_stats(456, rebooted=451) self.assertEqual(db.execute("SELECT * FROM `current`").fetchall(), [dict(rebooted=451, updated=456, blur_time=None, connections_websocket=0), ]) def test_current_one_listener(self): s, db, app = self.make() mbox = app.open_mailbox("m1", "s1", 1) mbox.add_listener("h1", lambda sm: None, lambda: None) s.dump_stats(456, rebooted=451) self.assertEqual(db.execute("SELECT * FROM `current`").fetchall(), [dict(rebooted=451, updated=456, blur_time=None, connections_websocket=1), ]) class ClientVersion(_Make, unittest.TestCase): def test_add_version(self): s, db, app = self.make() app.log_client_version(451, "side1", ("python", "1.2.3")) self.assertEqual(db.execute("SELECT * FROM `client_versions`").fetchall(), [dict(app_id="appid", connect_time=451, side="side1", implementation="python", version="1.2.3")]) def test_add_version_extra_fields(self): s, db, app = self.make() app.log_client_version(451, "side1", ("python", "1.2.3", "extra")) self.assertEqual(db.execute("SELECT * FROM `client_versions`").fetchall(), [dict(app_id="appid", connect_time=451, side="side1", implementation="python", version="1.2.3")]) def test_blur(self): s, db, app = self.make(blur_usage=100) app.log_client_version(451, "side1", ("python", "1.2.3")) self.assertEqual(db.execute("SELECT * FROM `client_versions`").fetchall(), [dict(app_id="appid", connect_time=400, side="side1", implementation="python", version="1.2.3")]) def test_no_usage_db(self): s, db, app = self.make(with_usage_db=False) app.log_client_version(451, "side1", ("python", "1.2.3")) class Nameplate(_Make, unittest.TestCase): def test_nameplate_happy(self): s, db, app = self.make() app.claim_nameplate("n1", "s1", 1) app.claim_nameplate("n1", "s2", 3) app.release_nameplate("n1", "s1", 6) self.assertEqual(db.execute("SELECT * FROM `nameplates`").fetchall(), []) app.release_nameplate("n1", "s2", 10) self.assertEqual(db.execute("SELECT * FROM `nameplates`").fetchall(), [dict(app_id="appid", result="happy", started=1, waiting_time=2, total_time=9)]) def test_nameplate_lonely(self): s, db, app = self.make() app.claim_nameplate("n1", "s1", 1) app.release_nameplate("n1", "s1", 6) self.assertEqual(db.execute("SELECT * FROM `nameplates`").fetchall(), [dict(app_id="appid", result="lonely", started=1, waiting_time=None, total_time=5)]) def test_nameplate_pruney(self): s, db, app = self.make() app.claim_nameplate("n1", "s1", 1) app.prune(10, 5) # prune at t=10, anything earlier than 5 is "old" self.assertEqual(db.execute("SELECT * FROM `nameplates`").fetchall(), [dict(app_id="appid", result="pruney", started=1, waiting_time=None, total_time=9)]) def test_nameplate_crowded(self): s, db, app = self.make() app.claim_nameplate("n1", "s1", 1) app.claim_nameplate("n1", "s2", 2) with self.assertRaises(CrowdedError): app.claim_nameplate("n1", "s3", 3) self.assertEqual(db.execute("SELECT * FROM `nameplates`").fetchall(), []) app.release_nameplate("n1", "s1", 4) self.assertEqual(db.execute("SELECT * FROM `nameplates`").fetchall(), []) app.release_nameplate("n1", "s2", 5) self.assertEqual(db.execute("SELECT * FROM `nameplates`").fetchall(), []) #print(self._cdb.execute("SELECT * FROM `nameplates`").fetchall()) #print(self._cdb.execute("SELECT * FROM `nameplate_sides`").fetchall()) # TODO: to get "crowded", we need all three sides to release the # nameplate, even though the third side threw CrowdedError and thus # probably doesn't think it has a claim app.release_nameplate("n1", "s3", 6) self.assertEqual(db.execute("SELECT * FROM `nameplates`").fetchall(), [dict(app_id="appid", result="crowded", started=1, waiting_time=1, total_time=5)]) def test_nameplate_crowded_pruned(self): s, db, app = self.make() app.claim_nameplate("n1", "s1", 1) app.claim_nameplate("n1", "s2", 2) with self.assertRaises(CrowdedError): app.claim_nameplate("n1", "s3", 3) self.assertEqual(db.execute("SELECT * FROM `nameplates`").fetchall(), []) app.prune(10, 5) self.assertEqual(db.execute("SELECT * FROM `nameplates`").fetchall(), [dict(app_id="appid", result="crowded", started=1, waiting_time=1, total_time=9)]) def test_no_db(self): s, db, app = self.make(with_usage_db=False) app.claim_nameplate("n1", "s1", 1) app.release_nameplate("n1", "s1", 6) s.dump_stats(3, 1) def test_nameplate_happy_blur_usage(self): s, db, app = self.make(blur_usage=20) app.claim_nameplate("n1", "s1", 21) app.claim_nameplate("n1", "s2", 23) app.release_nameplate("n1", "s1", 26) self.assertEqual(db.execute("SELECT * FROM `nameplates`").fetchall(), []) app.release_nameplate("n1", "s2", 30) self.assertEqual(db.execute("SELECT * FROM `nameplates`").fetchall(), [dict(app_id="appid", result="happy", started=20, waiting_time=2, total_time=9)]) class Mailbox(_Make, unittest.TestCase): def test_mailbox_prune_quiet(self): s, db, app = self.make() app.claim_nameplate("n1", "s1", 1) app.release_nameplate("n1", "s1", 2) app.prune(10, 5) self.assertEqual(db.execute("SELECT * FROM `mailboxes`").fetchall(), [dict(app_id="appid", for_nameplate=1, result="pruney", started=1, waiting_time=None, total_time=9)]) def test_mailbox_lonely(self): s, db, app = self.make() mid = app.claim_nameplate("n1", "s1", 1) mbox = app.open_mailbox(mid, "s1", 2) app.release_nameplate("n1", "s1", 3) mbox.close("s1", "mood-ignored", 4) self.assertEqual(db.execute("SELECT * FROM `mailboxes`").fetchall(), [dict(app_id="appid", for_nameplate=1, result="lonely", started=1, waiting_time=None, total_time=3)]) def test_mailbox_happy(self): s, db, app = self.make() mid = app.claim_nameplate("n1", "s1", 1) mbox1 = app.open_mailbox(mid, "s1", 2) app.release_nameplate("n1", "s1", 3) mbox2 = app.open_mailbox(mid, "s2", 4) mbox1.close("s1", "happy", 5) mbox2.close("s2", "happy", 6) self.assertEqual(db.execute("SELECT * FROM `mailboxes`").fetchall(), [dict(app_id="appid", for_nameplate=1, result="happy", started=1, waiting_time=3, total_time=5)]) def test_mailbox_happy_blur_usage(self): s, db, app = self.make(blur_usage=20) mid = app.claim_nameplate("n1", "s1", 21) mbox1 = app.open_mailbox(mid, "s1", 22) app.release_nameplate("n1", "s1", 23) mbox2 = app.open_mailbox(mid, "s2", 24) mbox1.close("s1", "happy", 25) mbox2.close("s2", "happy", 26) self.assertEqual(db.execute("SELECT * FROM `mailboxes`").fetchall(), [dict(app_id="appid", for_nameplate=1, result="happy", started=20, waiting_time=3, total_time=5)]) def test_mailbox_lonely_connected(self): # I don't think this could actually happen. It requires both sides to # connect, but then at least one side says they're lonely when they # close. s, db, app = self.make() mid = app.claim_nameplate("n1", "s1", 1) mbox1 = app.open_mailbox(mid, "s1", 2) app.release_nameplate("n1", "s1", 3) mbox2 = app.open_mailbox(mid, "s2", 4) mbox1.close("s1", "lonely", 5) mbox2.close("s2", "happy", 6) self.assertEqual(db.execute("SELECT * FROM `mailboxes`").fetchall(), [dict(app_id="appid", for_nameplate=1, result="lonely", started=1, waiting_time=3, total_time=5)]) def test_mailbox_scary(self): s, db, app = self.make() mid = app.claim_nameplate("n1", "s1", 1) mbox1 = app.open_mailbox(mid, "s1", 2) app.release_nameplate("n1", "s1", 3) mbox2 = app.open_mailbox(mid, "s2", 4) mbox1.close("s1", "scary", 5) mbox2.close("s2", "happy", 6) self.assertEqual(db.execute("SELECT * FROM `mailboxes`").fetchall(), [dict(app_id="appid", for_nameplate=1, result="scary", started=1, waiting_time=3, total_time=5)]) def test_mailbox_errory(self): s, db, app = self.make() mid = app.claim_nameplate("n1", "s1", 1) mbox1 = app.open_mailbox(mid, "s1", 2) app.release_nameplate("n1", "s1", 3) mbox2 = app.open_mailbox(mid, "s2", 4) mbox1.close("s1", "errory", 5) mbox2.close("s2", "happy", 6) self.assertEqual(db.execute("SELECT * FROM `mailboxes`").fetchall(), [dict(app_id="appid", for_nameplate=1, result="errory", started=1, waiting_time=3, total_time=5)]) def test_mailbox_errory_scary(self): s, db, app = self.make() mid = app.claim_nameplate("n1", "s1", 1) mbox1 = app.open_mailbox(mid, "s1", 2) app.release_nameplate("n1", "s1", 3) mbox2 = app.open_mailbox(mid, "s2", 4) mbox1.close("s1", "errory", 5) mbox2.close("s2", "scary", 6) self.assertEqual(db.execute("SELECT * FROM `mailboxes`").fetchall(), [dict(app_id="appid", for_nameplate=1, result="scary", started=1, waiting_time=3, total_time=5)]) def test_mailbox_crowded(self): s, db, app = self.make() mid = app.claim_nameplate("n1", "s1", 1) mbox1 = app.open_mailbox(mid, "s1", 2) app.release_nameplate("n1", "s1", 3) mbox2 = app.open_mailbox(mid, "s2", 4) with self.assertRaises(CrowdedError): app.open_mailbox(mid, "s3", 5) mbox1.close("s1", "happy", 6) mbox2.close("s2", "happy", 7) # again, not realistic mbox2.close("s3", "happy", 8) self.assertEqual(db.execute("SELECT * FROM `mailboxes`").fetchall(), [dict(app_id="appid", for_nameplate=1, result="crowded", started=1, waiting_time=3, total_time=7)]) ## class LogToStdout(unittest.TestCase): ## def test_log(self): ## # emit lines of JSON to log_file, if set ## log_file = io.StringIO() ## t = Transit(blur_usage=None, log_file=log_file, usage_db=None) ## t.recordUsage(started=123, result="happy", total_bytes=100, ## total_time=10, waiting_time=2) ## self.assertEqual(json.loads(log_file.getvalue()), ## {"started": 123, "total_time": 10, ## "waiting_time": 2, "total_bytes": 100, ## "mood": "happy"}) ## def test_log_blurred(self): ## # if blurring is enabled, timestamps should be rounded to the ## # requested amount, and sizes should be rounded up too ## log_file = io.StringIO() ## t = Transit(blur_usage=60, log_file=log_file, usage_db=None) ## t.recordUsage(started=123, result="happy", total_bytes=11999, ## total_time=10, waiting_time=2) ## self.assertEqual(json.loads(log_file.getvalue()), ## {"started": 120, "total_time": 10, ## "waiting_time": 2, "total_bytes": 20000, ## "mood": "happy"}) ## def test_do_not_log(self): ## t = Transit(blur_usage=60, log_file=None, usage_db=None) ## t.recordUsage(started=123, result="happy", total_bytes=11999, ## total_time=10, waiting_time=2) magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/test/test_util.py000066400000000000000000000024411355461217500304660ustar00rootroot00000000000000from __future__ import unicode_literals import unicodedata from twisted.trial import unittest from .. import util class Utils(unittest.TestCase): def test_to_bytes(self): b = util.to_bytes("abc") self.assertIsInstance(b, type(b"")) self.assertEqual(b, b"abc") A = unicodedata.lookup("LATIN SMALL LETTER A WITH DIAERESIS") b = util.to_bytes(A + "bc") self.assertIsInstance(b, type(b"")) self.assertEqual(b, b"\xc3\xa4\x62\x63") def test_bytes_to_hexstr(self): b = b"\x00\x45\x91\xfe\xff" hexstr = util.bytes_to_hexstr(b) self.assertIsInstance(hexstr, type("")) self.assertEqual(hexstr, "004591feff") def test_hexstr_to_bytes(self): hexstr = "004591feff" b = util.hexstr_to_bytes(hexstr) hexstr = util.bytes_to_hexstr(b) self.assertIsInstance(b, type(b"")) self.assertEqual(b, b"\x00\x45\x91\xfe\xff") def test_dict_to_bytes(self): d = {"a": "b"} b = util.dict_to_bytes(d) self.assertIsInstance(b, type(b"")) self.assertEqual(b, b'{"a": "b"}') def test_bytes_to_dict(self): b = b'{"a": "b", "c": 2}' d = util.bytes_to_dict(b) self.assertIsInstance(d, dict) self.assertEqual(d, {"a": "b", "c": 2}) magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/test/test_web.py000066400000000000000000000655301355461217500302760ustar00rootroot00000000000000from __future__ import print_function, unicode_literals import io, time import mock import treq from twisted.trial import unittest from twisted.internet import defer, reactor from twisted.internet.defer import inlineCallbacks, returnValue from ..web import make_web_server from ..server import SidedMessage from ..database import create_or_upgrade_usage_db from .common import ServerBase, _Util from .ws_client import WSFactory class WebSocketProtocolOptions(unittest.TestCase): @mock.patch('wormhole_mailbox_server.web.WebSocketServerFactory') def test_set(self, fake_factory): make_web_server(None, False, websocket_protocol_options=[ ("foo", "bar"), ], ) self.assertEqual( mock.call().setProtocolOptions(foo="bar"), fake_factory.mock_calls[1], ) class LogRequests(ServerBase, unittest.TestCase): def setUp(self): self._clients = [] def tearDown(self): for c in self._clients: c.transport.loseConnection() return ServerBase.tearDown(self) @inlineCallbacks def make_client(self): f = WSFactory(self.relayurl) f.d = defer.Deferred() reactor.connectTCP("127.0.0.1", self.rdv_ws_port, f) c = yield f.d self._clients.append(c) returnValue(c) @inlineCallbacks def test_log_http(self): yield self._setup_relay(do_listen=True, web_log_requests=True) # check the HTTP log fakelog = io.BytesIO() self._site.logFile = fakelog yield treq.get("http://127.0.0.1:%d/" % self.rdv_ws_port, persistent=False) lines = fakelog.getvalue().splitlines() self.assertEqual(len(lines), 1, lines) @inlineCallbacks def test_log_websocket(self): yield self._setup_relay(do_listen=True, web_log_requests=True) # now check the twisted log for websocket connect messages with mock.patch("wormhole_mailbox_server.server_websocket.log.msg") as l: c1 = yield self.make_client() yield c1.next_non_ack() # the actual message includes the TCP port number of the client client_port = self._clients[0].transport.getHost().port expected = "ws client connecting: tcp4:127.0.0.1:%d" % client_port self.assertEqual(l.mock_calls, [mock.call(expected)]) @inlineCallbacks def test_no_log_http(self): yield self._setup_relay(do_listen=True, web_log_requests=False) # check the HTTP log fakelog = io.BytesIO() self._site.logFile = fakelog yield treq.get("http://127.0.0.1:%d/" % self.rdv_ws_port, persistent=False) lines = fakelog.getvalue().splitlines() self.assertEqual(len(lines), 0, lines) @inlineCallbacks def test_no_log_websocket(self): yield self._setup_relay(do_listen=True, blur_usage=60, web_log_requests=True) # now check the twisted log for websocket connect messages with mock.patch("wormhole_mailbox_server.server_websocket.log.msg") as l: c1 = yield self.make_client() yield c1.next_non_ack() self.assertEqual(l.mock_calls, []) class WebSocketAPI(_Util, ServerBase, unittest.TestCase): @inlineCallbacks def setUp(self): self._lp = None self._clients = [] self._usage_db = usage_db = create_or_upgrade_usage_db(":memory:") yield self._setup_relay(do_listen=True, advertise_version="advertised.version", usage_db=usage_db) def tearDown(self): for c in self._clients: c.transport.loseConnection() return ServerBase.tearDown(self) @inlineCallbacks def make_client(self): f = WSFactory(self.relayurl) f.d = defer.Deferred() reactor.connectTCP("127.0.0.1", self.rdv_ws_port, f) c = yield f.d self._clients.append(c) returnValue(c) def check_welcome(self, data): self.failUnlessIn("welcome", data) self.failUnlessEqual(data["welcome"], {"current_cli_version": "advertised.version"}) @inlineCallbacks def test_welcome(self): c1 = yield self.make_client() msg = yield c1.next_non_ack() self.check_welcome(msg) self.assertEqual(self._server._apps, {}) @inlineCallbacks def test_bind(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid") # missing side= err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "bind requires 'side'") c1.send("bind", side="side") # missing appid= err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "bind requires 'appid'") c1.send("bind", appid="appid", side="side") yield c1.sync() self.assertEqual(list(self._server._apps.keys()), ["appid"]) c1.send("bind", appid="appid", side="side") # duplicate err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "already bound") c1.send_notype(other="misc") # missing 'type' err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "missing 'type'") c1.send("___unknown") # unknown type err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "unknown type") c1.send("ping") # missing 'ping' err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "ping requires 'ping'") @inlineCallbacks def test_bind_with_client_version(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side", client_version=("python", "1.2.3")) yield c1.sync() self.assertEqual(list(self._server._apps.keys()), ["appid"]) v = self._usage_db.execute("SELECT * FROM `client_versions`").fetchall() self.assertEqual(v[0]["app_id"], "appid") self.assertEqual(v[0]["side"], "side") self.assertEqual(v[0]["implementation"], "python") self.assertEqual(v[0]["version"], "1.2.3") @inlineCallbacks def test_bind_without_client_version(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") yield c1.sync() self.assertEqual(list(self._server._apps.keys()), ["appid"]) v = self._usage_db.execute("SELECT * FROM `client_versions`").fetchall() self.assertEqual(v[0]["app_id"], "appid") self.assertEqual(v[0]["side"], "side") self.assertEqual(v[0]["implementation"], None) self.assertEqual(v[0]["version"], None) @inlineCallbacks def test_bind_with_client_version_extra_junk(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side", client_version=("python", "1.2.3", "extra ignore me")) yield c1.sync() self.assertEqual(list(self._server._apps.keys()), ["appid"]) v = self._usage_db.execute("SELECT * FROM `client_versions`").fetchall() self.assertEqual(v[0]["app_id"], "appid") self.assertEqual(v[0]["side"], "side") self.assertEqual(v[0]["implementation"], "python") self.assertEqual(v[0]["version"], "1.2.3") @inlineCallbacks def test_list(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("list") # too early, must bind first err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "must bind first") c1.send("bind", appid="appid", side="side") c1.send("list") m = yield c1.next_non_ack() self.assertEqual(m["type"], "nameplates") self.assertEqual(m["nameplates"], []) app = self._server.get_app("appid") nameplate_id1 = app.allocate_nameplate("side", 0) app.claim_nameplate("np2", "side", 0) c1.send("list") m = yield c1.next_non_ack() self.assertEqual(m["type"], "nameplates") nids = set() for n in m["nameplates"]: self.assertEqual(type(n), dict) self.assertEqual(list(n.keys()), ["id"]) nids.add(n["id"]) self.assertEqual(nids, set([nameplate_id1, "np2"])) @inlineCallbacks def test_allocate(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("allocate") # too early, must bind first err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "must bind first") c1.send("bind", appid="appid", side="side") app = self._server.get_app("appid") c1.send("allocate") m = yield c1.next_non_ack() self.assertEqual(m["type"], "allocated") name = m["nameplate"] nids = app.get_nameplate_ids() self.assertEqual(len(nids), 1) self.assertEqual(name, list(nids)[0]) c1.send("allocate") err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "you already allocated one, don't be greedy") c1.send("claim", nameplate=name) # allocate+claim is ok yield c1.sync() np_row, side_rows = self._nameplate(app, name) self.assertEqual(len(side_rows), 1) self.assertEqual(side_rows[0]["side"], "side") @inlineCallbacks def test_claim(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") app = self._server.get_app("appid") c1.send("claim") # missing nameplate= err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "claim requires 'nameplate'") c1.send("claim", nameplate="np1") m = yield c1.next_non_ack() self.assertEqual(m["type"], "claimed") mailbox_id = m["mailbox"] self.assertEqual(type(mailbox_id), type("")) c1.send("claim", nameplate="np1") err = yield c1.next_non_ack() self.assertEqual(err["type"], "error", err) self.assertEqual(err["error"], "only one claim per connection") nids = app.get_nameplate_ids() self.assertEqual(len(nids), 1) self.assertEqual("np1", list(nids)[0]) np_row, side_rows = self._nameplate(app, "np1") self.assertEqual(len(side_rows), 1) self.assertEqual(side_rows[0]["side"], "side") # claiming a nameplate assigns a random mailbox id and creates the # mailbox row mailboxes = app._db.execute("SELECT * FROM `mailboxes`" " WHERE `app_id`='appid'").fetchall() self.assertEqual(len(mailboxes), 1) @inlineCallbacks def test_claim_crowded(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") app = self._server.get_app("appid") app.claim_nameplate("np1", "side1", 0) app.claim_nameplate("np1", "side2", 0) # the third claim will signal crowding c1.send("claim", nameplate="np1") err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "crowded") @inlineCallbacks def test_release(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") app = self._server.get_app("appid") app.claim_nameplate("np1", "side2", 0) c1.send("release") # didn't do claim first err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "release without nameplate must follow claim") c1.send("claim", nameplate="np1") yield c1.next_non_ack() c1.send("release") m = yield c1.next_non_ack() self.assertEqual(m["type"], "released", m) np_row, side_rows = self._nameplate(app, "np1") claims = [(row["side"], row["claimed"]) for row in side_rows] self.assertIn(("side", False), claims) self.assertIn(("side2", True), claims) c1.send("release") # no longer claimed err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "only one release per connection") @inlineCallbacks def test_release_named(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") c1.send("claim", nameplate="np1") yield c1.next_non_ack() c1.send("release", nameplate="np1") m = yield c1.next_non_ack() self.assertEqual(m["type"], "released", m) @inlineCallbacks def test_release_named_ignored(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") c1.send("release", nameplate="np1") # didn't do claim first, ignored m = yield c1.next_non_ack() self.assertEqual(m["type"], "released", m) @inlineCallbacks def test_release_named_mismatch(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") c1.send("claim", nameplate="np1") yield c1.next_non_ack() c1.send("release", nameplate="np2") # mismatching nameplate err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "release and claim must use same nameplate") @inlineCallbacks def test_open(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") app = self._server.get_app("appid") c1.send("open") # missing mailbox= err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "open requires 'mailbox'") mb1 = app.open_mailbox("mb1", "side2", 0) mb1.add_message(SidedMessage(side="side2", phase="phase", body="body", server_rx=0, msg_id="msgid")) c1.send("open", mailbox="mb1") m = yield c1.next_non_ack() self.assertEqual(m["type"], "message") self.assertEqual(m["body"], "body") self.assertTrue(mb1.has_listeners()) mb1.add_message(SidedMessage(side="side2", phase="phase2", body="body2", server_rx=0, msg_id="msgid")) m = yield c1.next_non_ack() self.assertEqual(m["type"], "message") self.assertEqual(m["body"], "body2") c1.send("open", mailbox="mb1") err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "only one open per connection") # exercise the _stop() handler too, which is a nop mb1.close("side2", "happy", 1) mb1.close("side", "happy", 2) @inlineCallbacks def test_open_crowded(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") app = self._server.get_app("appid") mbid = app.claim_nameplate("np1", "side1", 0) app.claim_nameplate("np1", "side2", 0) # the third open will signal crowding c1.send("open", mailbox=mbid) err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "crowded") @inlineCallbacks def test_add(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") app = self._server.get_app("appid") mb1 = app.open_mailbox("mb1", "side2", 0) l1 = []; stop1 = []; stop1_f = lambda: stop1.append(True) mb1.add_listener("handle1", l1.append, stop1_f) c1.send("add") # didn't open first err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "must open mailbox before adding") c1.send("open", mailbox="mb1") c1.send("add", body="body") # missing phase= err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "missing 'phase'") c1.send("add", phase="phase") # missing body= err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "missing 'body'") c1.send("add", phase="phase", body="body") m = yield c1.next_non_ack() # echoed back self.assertEqual(m["type"], "message") self.assertEqual(m["body"], "body") self.assertEqual(len(l1), 1) self.assertEqual(l1[0].body, "body") @inlineCallbacks def test_close(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") app = self._server.get_app("appid") c1.send("close", mood="mood") # must open first err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "close without mailbox must follow open") c1.send("open", mailbox="mb1") yield c1.sync() mb1 = app._mailboxes["mb1"] self.assertTrue(mb1.has_listeners()) c1.send("close", mood="mood") m = yield c1.next_non_ack() self.assertEqual(m["type"], "closed") self.assertFalse(mb1.has_listeners()) c1.send("close", mood="mood") # already closed err = yield c1.next_non_ack() self.assertEqual(err["type"], "error", m) self.assertEqual(err["error"], "only one close per connection") @inlineCallbacks def test_close_named(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") c1.send("open", mailbox="mb1") yield c1.sync() c1.send("close", mailbox="mb1", mood="mood") m = yield c1.next_non_ack() self.assertEqual(m["type"], "closed") @inlineCallbacks def test_close_named_ignored(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") c1.send("close", mailbox="mb1", mood="mood") # no open first, ignored m = yield c1.next_non_ack() self.assertEqual(m["type"], "closed") @inlineCallbacks def test_close_named_mismatch(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") c1.send("open", mailbox="mb1") yield c1.sync() c1.send("close", mailbox="mb2", mood="mood") err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "open and close must use same mailbox") @inlineCallbacks def test_close_crowded(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") app = self._server.get_app("appid") mbid = app.claim_nameplate("np1", "side1", 0) app.claim_nameplate("np1", "side2", 0) # a close that allocates a third side will signal crowding c1.send("close", mailbox=mbid) err = yield c1.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "crowded") @inlineCallbacks def test_disconnect(self): c1 = yield self.make_client() yield c1.next_non_ack() c1.send("bind", appid="appid", side="side") app = self._server.get_app("appid") c1.send("open", mailbox="mb1") yield c1.sync() mb1 = app._mailboxes["mb1"] self.assertTrue(mb1.has_listeners()) yield c1.close() # wait for the server to notice the socket has closed started = time.time() while mb1.has_listeners() and (time.time()-started < 5.0): d = defer.Deferred() reactor.callLater(0.01, d.callback, None) yield d self.assertFalse(mb1.has_listeners()) @inlineCallbacks def test_interrupted_client_nameplate(self): # a client's interactions with the server might be split over # multiple sequential WebSocket connections, e.g. when the server is # bounced and the client reconnects, or vice versa c = yield self.make_client() yield c.next_non_ack() c.send("bind", appid="appid", side="side") app = self._server.get_app("appid") c.send("claim", nameplate="np1") m = yield c.next_non_ack() self.assertEqual(m["type"], "claimed") mailbox_id = m["mailbox"] self.assertEqual(type(mailbox_id), type("")) np_row, side_rows = self._nameplate(app, "np1") claims = [(row["side"], row["claimed"]) for row in side_rows] self.assertEqual(claims, [("side", True)]) c.close() yield c.d c = yield self.make_client() yield c.next_non_ack() c.send("bind", appid="appid", side="side") c.send("claim", nameplate="np1") # idempotent m = yield c.next_non_ack() self.assertEqual(m["type"], "claimed") self.assertEqual(m["mailbox"], mailbox_id) # mailbox id is stable np_row, side_rows = self._nameplate(app, "np1") claims = [(row["side"], row["claimed"]) for row in side_rows] self.assertEqual(claims, [("side", True)]) c.close() yield c.d c = yield self.make_client() yield c.next_non_ack() c.send("bind", appid="appid", side="side") # we haven't done a claim with this particular connection, but we can # still send a release as long as we include the nameplate c.send("release", nameplate="np1") # release-without-claim m = yield c.next_non_ack() self.assertEqual(m["type"], "released") np_row, side_rows = self._nameplate(app, "np1") self.assertEqual(np_row, None) c.close() yield c.d c = yield self.make_client() yield c.next_non_ack() c.send("bind", appid="appid", side="side") # and the release is idempotent, when done on separate connections c.send("release", nameplate="np1") m = yield c.next_non_ack() self.assertEqual(m["type"], "released") np_row, side_rows = self._nameplate(app, "np1") self.assertEqual(np_row, None) c.close() yield c.d @inlineCallbacks def test_interrupted_client_nameplate_reclaimed(self): c = yield self.make_client() yield c.next_non_ack() c.send("bind", appid="appid", side="side") app = self._server.get_app("appid") # a new claim on a previously-closed nameplate is forbidden. We make # a new nameplate here and manually open a second claim on it, so the # nameplate stays alive long enough for the code check to happen. c = yield self.make_client() yield c.next_non_ack() c.send("bind", appid="appid", side="side") c.send("claim", nameplate="np2") m = yield c.next_non_ack() self.assertEqual(m["type"], "claimed") app.claim_nameplate("np2", "side2", 0) c.send("release", nameplate="np2") m = yield c.next_non_ack() self.assertEqual(m["type"], "released") np_row, side_rows = self._nameplate(app, "np2") claims = sorted([(row["side"], row["claimed"]) for row in side_rows]) self.assertEqual(claims, [("side", 0), ("side2", 1)]) c.close() yield c.d c = yield self.make_client() yield c.next_non_ack() c.send("bind", appid="appid", side="side") c.send("claim", nameplate="np2") # new claim is forbidden err = yield c.next_non_ack() self.assertEqual(err["type"], "error") self.assertEqual(err["error"], "reclaimed") np_row, side_rows = self._nameplate(app, "np2") claims = sorted([(row["side"], row["claimed"]) for row in side_rows]) self.assertEqual(claims, [("side", 0), ("side2", 1)]) c.close() yield c.d @inlineCallbacks def test_interrupted_client_mailbox(self): # a client's interactions with the server might be split over # multiple sequential WebSocket connections, e.g. when the server is # bounced and the client reconnects, or vice versa c = yield self.make_client() yield c.next_non_ack() c.send("bind", appid="appid", side="side") app = self._server.get_app("appid") mb1 = app.open_mailbox("mb1", "side2", 0) mb1.add_message(SidedMessage(side="side2", phase="phase", body="body", server_rx=0, msg_id="msgid")) c.send("open", mailbox="mb1") m = yield c.next_non_ack() self.assertEqual(m["type"], "message") self.assertEqual(m["body"], "body") self.assertTrue(mb1.has_listeners()) c.close() yield c.d c = yield self.make_client() yield c.next_non_ack() c.send("bind", appid="appid", side="side") # open should be idempotent c.send("open", mailbox="mb1") m = yield c.next_non_ack() self.assertEqual(m["type"], "message") self.assertEqual(m["body"], "body") mb_row, side_rows = self._mailbox(app, "mb1") openeds = [(row["side"], row["opened"]) for row in side_rows] self.assertIn(("side", 1), openeds) # TODO: why 1, and not True? # close on the same connection as open is ok c.send("close", mailbox="mb1", mood="mood") m = yield c.next_non_ack() self.assertEqual(m["type"], "closed", m) mb_row, side_rows = self._mailbox(app, "mb1") openeds = [(row["side"], row["opened"]) for row in side_rows] self.assertIn(("side", 0), openeds) c.close() yield c.d # close (on a separate connection) is idempotent c = yield self.make_client() yield c.next_non_ack() c.send("bind", appid="appid", side="side") c.send("close", mailbox="mb1", mood="mood") m = yield c.next_non_ack() self.assertEqual(m["type"], "closed", m) mb_row, side_rows = self._mailbox(app, "mb1") openeds = [(row["side"], row["opened"]) for row in side_rows] self.assertIn(("side", 0), openeds) c.close() yield c.d magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/test/test_ws_client.py000066400000000000000000000034371355461217500315060ustar00rootroot00000000000000from __future__ import print_function, unicode_literals import json from twisted.trial import unittest from twisted.internet.defer import inlineCallbacks from .ws_client import WSClient class WSClientSync(unittest.TestCase): # make sure my 'sync' method actually works @inlineCallbacks def test_sync(self): sent = [] c = WSClient() def _send(mtype, **kwargs): sent.append( (mtype, kwargs) ) c.send = _send def add(mtype, **kwargs): kwargs["type"] = mtype c.onMessage(json.dumps(kwargs).encode("utf-8"), False) # no queued messages d = c.sync() self.assertEqual(sent, [("ping", {"ping": 0})]) self.assertNoResult(d) add("pong", pong=0) yield d self.assertEqual(c.events, []) # one,two,ping,pong add("one") add("two", two=2) d = c.sync() add("pong", pong=1) yield d m = yield c.next_non_ack() self.assertEqual(m["type"], "one") m = yield c.next_non_ack() self.assertEqual(m["type"], "two") self.assertEqual(c.events, []) # one,ping,two,pong add("one") d = c.sync() add("two", two=2) add("pong", pong=2) yield d m = yield c.next_non_ack() self.assertEqual(m["type"], "one") m = yield c.next_non_ack() self.assertEqual(m["type"], "two") self.assertEqual(c.events, []) # ping,one,two,pong d = c.sync() add("one") add("two", two=2) add("pong", pong=3) yield d m = yield c.next_non_ack() self.assertEqual(m["type"], "one") m = yield c.next_non_ack() self.assertEqual(m["type"], "two") self.assertEqual(c.events, []) magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/test/ws_client.py000066400000000000000000000050321355461217500304400ustar00rootroot00000000000000from __future__ import print_function, unicode_literals import json, itertools from twisted.internet import defer from twisted.internet.defer import inlineCallbacks, returnValue from autobahn.twisted import websocket class WSClient(websocket.WebSocketClientProtocol): def __init__(self): websocket.WebSocketClientProtocol.__init__(self) self.events = [] self.errors = [] self.d = None self.ping_counter = itertools.count(0) def onOpen(self): self.factory.d.callback(self) def onMessage(self, payload, isBinary): assert not isBinary event = json.loads(payload.decode("utf-8")) if event["type"] == "error": self.errors.append(event) if self.d: assert not self.events d,self.d = self.d,None d.callback(event) return self.events.append(event) def close(self): self.d = defer.Deferred() self.transport.loseConnection() return self.d def onClose(self, wasClean, code, reason): if self.d: self.d.callback((wasClean, code, reason)) def next_event(self): assert not self.d if self.events: event = self.events.pop(0) return defer.succeed(event) self.d = defer.Deferred() return self.d @inlineCallbacks def next_non_ack(self): while True: m = yield self.next_event() if isinstance(m, tuple): print("unexpected onClose", m) raise AssertionError("unexpected onClose") if m["type"] != "ack": returnValue(m) def strip_acks(self): self.events = [e for e in self.events if e["type"] != "ack"] def send(self, mtype, **kwargs): kwargs["type"] = mtype payload = json.dumps(kwargs).encode("utf-8") self.sendMessage(payload, False) def send_notype(self, **kwargs): payload = json.dumps(kwargs).encode("utf-8") self.sendMessage(payload, False) @inlineCallbacks def sync(self): ping = next(self.ping_counter) self.send("ping", ping=ping) # queue all messages until the pong, then put them back old_events = [] while True: ev = yield self.next_event() if ev["type"] == "pong" and ev["pong"] == ping: self.events = old_events + self.events returnValue(None) old_events.append(ev) class WSFactory(websocket.WebSocketClientFactory): protocol = WSClient magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/util.py000066400000000000000000000013741355461217500264540ustar00rootroot00000000000000# No unicode_literals import json, unicodedata from binascii import hexlify, unhexlify def to_bytes(u): return unicodedata.normalize("NFC", u).encode("utf-8") def bytes_to_hexstr(b): assert isinstance(b, type(b"")) hexstr = hexlify(b).decode("ascii") assert isinstance(hexstr, type(u"")) return hexstr def hexstr_to_bytes(hexstr): assert isinstance(hexstr, type(u"")) b = unhexlify(hexstr.encode("ascii")) assert isinstance(b, type(b"")) return b def dict_to_bytes(d): assert isinstance(d, dict) b = json.dumps(d).encode("utf-8") assert isinstance(b, type(b"")) return b def bytes_to_dict(b): assert isinstance(b, type(b"")) d = json.loads(b.decode("utf-8")) assert isinstance(d, dict) return d magic-wormhole-mailbox-server-0.4.1/src/wormhole_mailbox_server/web.py000066400000000000000000000016421355461217500262520ustar00rootroot00000000000000from twisted.web import server, static from twisted.web.resource import Resource from .server_websocket import WebSocketServerFactory from autobahn.twisted.resource import WebSocketResource class Root(Resource): # child_FOO is a nevow thing, not a twisted.web.resource thing def __init__(self): Resource.__init__(self) self.putChild(b"", static.Data(b"Wormhole Relay\n", "text/plain")) class PrivacyEnhancedSite(server.Site): logRequests = True def log(self, request): if self.logRequests: return server.Site.log(self, request) def make_web_server(server, log_requests, websocket_protocol_options=()): root = Root() wsrf = WebSocketServerFactory(None, server) wsrf.setProtocolOptions(**dict(websocket_protocol_options)) root.putChild(b"v1", WebSocketResource(wsrf)) site = PrivacyEnhancedSite(root) site.logRequests = log_requests return site magic-wormhole-mailbox-server-0.4.1/tox.ini000066400000000000000000000024471355461217500207160ustar00rootroot00000000000000# Tox (http://tox.testrun.org/) is a tool for running tests # in multiple virtualenvs. This configuration file will run the # test suite on all supported python versions. To use it, "pip install tox" # and then run "tox" from this directory. [tox] envlist = {py27,py35,py36,py37,pypy} skip_missing_interpreters = True minversion = 2.4.0 [testenv] usedevelop = True extras = dev deps = pyflakes >= 1.2.3 commands = pyflakes setup.py src python -m twisted.trial {posargs:wormhole_mailbox_server} # on windows, trial is installed as venv/bin/trial.py, not .exe, but (at # least appveyor) adds .PY to $PATHEXT. So "trial wormhole" might work on # windows, and certainly does on unix. But to get "coverage run" to work, we # need a script name (since "python -m twisted.scripts.trial" doesn't have a # 'if __name__ == "__main__": run()' -style clause), and the script name will # vary on the platform. So we added a small class (wormhole.test.run_trial) # that does the right import for us. [testenv:coverage] deps = pyflakes >= 1.2.3 coverage commands = pyflakes setup.py src coverage run --branch -m twisted.trial {posargs:wormhole_mailbox_server} coverage xml [testenv:flake8] deps = flake8 commands = flake8 *.py src --count --select=E901,E999,F821,F822,F823 --statistics magic-wormhole-mailbox-server-0.4.1/versioneer.py000066400000000000000000002060031355461217500221300ustar00rootroot00000000000000 # Version: 0.18 """The Versioneer - like a rocketeer, but for versions. The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/warner/python-versioneer * Brian Warner * License: Public Domain * Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy * [![Latest Version] (https://pypip.in/version/versioneer/badge.svg?style=flat) ](https://pypi.python.org/pypi/versioneer/) * [![Build Status] (https://travis-ci.org/warner/python-versioneer.png?branch=master) ](https://travis-ci.org/warner/python-versioneer) This is a tool for managing a recorded version number in distutils-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install * `pip install versioneer` to somewhere to your $PATH * add a `[versioneer]` section to your setup.cfg (see below) * run `versioneer install` in your source tree, commit the results ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes. The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation See [INSTALL.md](./INSTALL.md) for detailed installation instructions. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the commit date in ISO 8601 format. This will be None if the date is not available. * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See [details.md](details.md) in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Known Limitations Some situations are known to cause problems for Versioneer. This details the most significant ones. More can be found on Github [issues page](https://github.com/warner/python-versioneer/issues). ### Subprojects Versioneer has limited support for source trees in which `setup.py` is not in the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are two common reasons why `setup.py` might not be in the root: * Source trees which contain multiple subprojects, such as [Buildbot](https://github.com/buildbot/buildbot), which contains both "master" and "slave" subprojects, each with their own `setup.py`, `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI distributions (and upload multiple independently-installable tarballs). * Source trees whose main purpose is to contain a C library, but which also provide bindings to Python (and perhaps other langauges) in subdirectories. Versioneer will look for `.git` in parent directories, and most operations should get the right version string. However `pip` and `setuptools` have bugs and implementation details which frequently cause `pip install .` from a subproject directory to fail to find a correct version string (so it usually defaults to `0+unknown`). `pip install --editable .` should work correctly. `setup.py install` might work too. Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in some later version. [Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking this issue. The discussion in [PR #61](https://github.com/warner/python-versioneer/pull/61) describes the issue from the Versioneer side in more detail. [pip PR#3176](https://github.com/pypa/pip/pull/3176) and [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve pip to let Versioneer work correctly. Versioneer-0.16 and earlier only looked for a `.git` directory next to the `setup.cfg`, so subprojects were completely unsupported with those releases. ### Editable installs with setuptools <= 18.5 `setup.py develop` and `pip install --editable .` allow you to install a project into a virtualenv once, then continue editing the source code (and test) without re-installing after every change. "Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a convenient way to specify executable scripts that should be installed along with the python package. These both work as expected when using modern setuptools. When using setuptools-18.5 or earlier, however, certain operations will cause `pkg_resources.DistributionNotFound` errors when running the entrypoint script, which must be resolved by re-installing the package. This happens when the install happens with one version, then the egg_info data is regenerated while a different version is checked out. Many setup.py commands cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into a different virtualenv), so this can be surprising. [Bug #83](https://github.com/warner/python-versioneer/issues/83) describes this one, but upgrading to a newer version of setuptools should probably resolve it. ### Unicode version strings While Versioneer works (and is continually tested) with both Python 2 and Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. Newer releases probably generate unicode version strings on py2. It's not clear that this is wrong, but it may be surprising for applications when then write these strings to a network connection or include them in bytes-oriented APIs like cryptographic checksums. [Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates this question. ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg`, if necessary, to include any new configuration settings indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. * re-run `versioneer install` in your source tree, to replace `SRC/_version.py` * commit any changed files ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. Specifically, both are released under the Creative Commons "Public Domain Dedication" license (CC0-1.0), as described in https://creativecommons.org/publicdomain/zero/1.0/ . """ from __future__ import print_function try: import configparser except ImportError: import ConfigParser as configparser import errno import json import os import re import subprocess import sys class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_root(): """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. me = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(me)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(me), versioneer_py)) except NameError: pass return root def get_config_from_root(root): """Read the project setup.cfg file to determine Versioneer config.""" # This might raise EnvironmentError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.SafeConfigParser() with open(setup_cfg, "r") as f: parser.readfp(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" cfg.versionfile_source = get(parser, "versionfile_source") cfg.versionfile_build = get(parser, "versionfile_build") cfg.tag_prefix = get(parser, "tag_prefix") if cfg.tag_prefix in ("''", '""'): cfg.tag_prefix = "" cfg.parentdir_prefix = get(parser, "parentdir_prefix") cfg.verbose = get(parser, "verbose") return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode LONG_VERSION_PY['git'] = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%%s*" %% tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%%d" %% pieces["distance"] else: # exception #1 rendered = "0.post.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def do_vcs_install(manifest_in, versionfile_source, ipy): """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: me = __file__ if me.endswith(".pyc") or me.endswith(".pyo"): me = os.path.splitext(me)[0] + ".py" versioneer_file = os.path.relpath(me) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: f = open(".gitattributes", "r") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() except EnvironmentError: pass if not present: f = open(".gitattributes", "a+") f.write("%s export-subst\n" % versionfile_source) f.close() files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.18) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename): """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" def get_versions(verbose=False): """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. """ if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} def get_version(): """Get the short version string for this project.""" return get_versions()["version"] def get_cmdclass(): """Get the custom setuptools/distutils subclasses used by Versioneer.""" if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/warner/python-versioneer/issues/52 cmds = {} # we add "version" to both distutils and setuptools from distutils.core import Command class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? # pip install: # copies source tree to a tempdir before running egg_info/etc # if .git isn't copied too, 'git describe' will fail # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? # we override different "build_py" commands for both environments if "setuptools" in sys.modules: from setuptools.command.build_py import build_py as _build_py else: from distutils.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION # "product_version": versioneer.get_version(), # ... class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] if 'py2exe' in sys.modules: # py2exe enabled? try: from py2exe.distutils_buildexe import py2exe as _py2exe # py3 except ImportError: from py2exe.build_exe import py2exe as _py2exe # py2 class cmd_py2exe(_py2exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _py2exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments if "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist else: from distutils.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ INIT_PY_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ def do_setup(): """Main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except EnvironmentError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(INIT_PY_SNIPPET) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) ipy = None # Make sure both the top-level "versioneer.py" and versionfile_source # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so # they'll be copied into source distributions. Pip won't be able to # install the package without this. manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: with open(manifest_in, "r") as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) except EnvironmentError: pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so # it might give some false negatives. Appending redundant 'include' # lines is safe, though. if "versioneer.py" not in simple_includes: print(" appending 'versioneer.py' to MANIFEST.in") with open(manifest_in, "a") as f: f.write("include versioneer.py\n") else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: print(" versionfile_source already in MANIFEST.in") # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. do_vcs_install(manifest_in, cfg.versionfile_source, ipy) return 0 def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": errors = do_setup() errors += scan_setup_py() if errors: sys.exit(1)