pax_global_header00006660000000000000000000000064135546107110014515gustar00rootroot0000000000000052 comment=604cb786b27edc44a9b0345a6ce945b5adffcc26 magic-wormhole-transit-relay-0.2.1/000077500000000000000000000000001355461071100172235ustar00rootroot00000000000000magic-wormhole-transit-relay-0.2.1/.coveragerc000066400000000000000000000013611355461071100213450ustar00rootroot00000000000000# -*- mode: conf -*- [run] # only record trace data for wormhole_transit_relay.* source = wormhole_transit_relay # and don't trace the test files themselves, or Versioneer's stuff omit = src/wormhole_transit_relay/test/* src/wormhole_transit_relay/_version.py # This allows 'coverage combine' to correlate the tracing data built while # running tests in multiple tox virtualenvs. To take advantage of this # properly, use "coverage erase" before tox, "coverage run --parallel-mode" # inside tox to avoid overwriting the output data (by writing it into # .coverage-XYZ instead of just .coverage), and run "coverage combine" # afterwards. [paths] source = src/ .tox/*/lib/python*/site-packages/ .tox/pypy*/site-packages/ magic-wormhole-transit-relay-0.2.1/LICENSE000066400000000000000000000020551355461071100202320ustar00rootroot00000000000000MIT License Copyright (c) 2017 Brian Warner Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. magic-wormhole-transit-relay-0.2.1/MANIFEST.in000066400000000000000000000003431355461071100207610ustar00rootroot00000000000000include versioneer.py include src/wormhole_transit_relay/_version.py include LICENSE README.md NEWS.md recursive-include docs *.md *.rst *.dot include .coveragerc tox.ini include misc/*.py include misc/munin/wormhole_transit* magic-wormhole-transit-relay-0.2.1/NEWS.md000066400000000000000000000014321355461071100203210ustar00rootroot00000000000000User-visible changes in "magic-wormhole-transit-relay": ## Release 0.2.1 (11-Sep-2019) * listen on IPv4+IPv6 properly (#12) ## Release 0.2.0 (10-Sep-2019) * listen on IPv4+IPv6 socket by default (#12) * enable SO_KEEPALIVE on all connections (#9) * drop support for py3.3 and py3.4 * improve munin plugins ## Release 0.1.2 (19-Mar-2018) * Allow more simultaneous connections, by increasing the rlimits() ceiling at startup * Improve munin plugins * Get tests working on Windows ## Release 0.1.1 (14-Feb-2018) Improve logging and munin graphing tools: previous version would count bad handshakes twice (once as "errory", and again as "lonely"). The munin plugins have been renamed. ## Release 0.1.0 (12-Nov-2017) Initial release. Forked from magic-wormhole-0.10.3 (12-Sep-2017). magic-wormhole-transit-relay-0.2.1/PKG-INFO000066400000000000000000000005071355461071100203220ustar00rootroot00000000000000Metadata-Version: 2.1 Name: magic-wormhole-transit-relay Version: 0.2.1 Summary: Transit Relay server for Magic-Wormhole Home-page: https://github.com/warner/magic-wormhole-transit-relay Author: Brian Warner Author-email: warner-magic-wormhole@lothar.com License: MIT Description: UNKNOWN Platform: UNKNOWN Provides-Extra: dev magic-wormhole-transit-relay-0.2.1/README.md000066400000000000000000000024051355461071100205030ustar00rootroot00000000000000# magic-wormhole-transit-relay [![PyPI](http://img.shields.io/pypi/v/magic-wormhole-transit-relay.svg)](https://pypi.python.org/pypi/magic-wormhole-transit-relay) [![Build Status](https://travis-ci.org/warner/magic-wormhole-transit-relay.svg?branch=master)](https://travis-ci.org/warner/magic-wormhole-transit-relay) [![Windows Build Status](https://ci.appveyor.com/api/projects/status/61kgarqikolbvj1m/branch/master?svg=true)](https://ci.appveyor.com/project/warner/magic-wormhole-transit-relay) [![codecov.io](https://codecov.io/github/warner/magic-wormhole-transit-relay/coverage.svg?branch=master)](https://codecov.io/github/warner/magic-wormhole-transit-relay?branch=master) Transit Relay server for Magic-Wormhole This repository implements the Magic-Wormhole "Transit Relay", a server that helps clients establish bulk-data transit connections even when both are behind NAT boxes. Each side makes a TCP connection to this server and presents a handshake. Two connections with identical handshakes are glued together, allowing them to pretend they have a direct connection. This server used to be included in the magic-wormhole repository, but was split out into a separate repo to aid deployment and development. See docs/running.md for instructions to launch the server. magic-wormhole-transit-relay-0.2.1/docs/000077500000000000000000000000001355461071100201535ustar00rootroot00000000000000magic-wormhole-transit-relay-0.2.1/docs/logging.md000066400000000000000000000073631355461071100221340ustar00rootroot00000000000000# Usage Logs The transit relay does not emit or record any logging by default. By adding option flags to the twist/twistd command line, you can enable one of two different kinds of logs. To avoid collecting information which could later be used to correlate clients with external network traces, logged information can be "blurred". This reduces the resolution of the data, retaining enough to answer questions about how much the server is being used, but discarding fine-grained timestamps or exact transfer sizes. The ``--blur-usage=`` option enables this, and it takes an integer value (in seconds) to specify the desired time window. ## Logging JSON Upon Each Connection If --log-fd is provided, a line will be written to the given (numeric) file descriptor after each connection is done. These events could be delivered to a comprehensive logging system like XXX for offline analysis. Each line will be a complete JSON object (starting with ``{``, ending with ``}\n``, and containing no internal newlines). The keys will be: * ``started``: number, seconds since epoch * ``total_time``: number, seconds from open to last close * ``waiting_time``: number, seconds from start to 2nd side appearing, or null * ``total_bytes``: number, total bytes relayed (sum of both directions) * ``mood``: string, one of: happy, lonely, errory A mood of ``happy`` means both sides gave a correct handshake. ``lonely`` means a second matching side never appeared (and thus ``waiting_time`` will be null). ``errory`` means the first side gave an invalid handshake. If --blur-usage= is provided, then ``started`` will be rounded to the given time interval, and ``total_bytes`` will be rounded to a fixed set of buckets: * file sizes less than 1MB: rounded to the next largest multiple of 10kB * less than 1GB: multiple of 1MB * 1GB or larger: multiple of 100MB ## Usage Database If --usage-db= is provided, the server will maintain a SQLite database in the given file. Current, recent, and historical usage data will be written to the database, and external tools can query the DB for metrics: the munin plugins in misc/ may be useful. Timestamps and sizes in this file will respect --blur-usage. The four tables are: ``current`` contains a single row, with these columns: * connected: number of paired connections * waiting: number of not-yet-paired connections * partal_bytes: bytes transmitted over not-yet-complete connections ``since_reboot`` contains a single row, with these columns: * bytes: sum of ``total_bytes`` * connections: number of completed connections * mood_happy: count of connections that finished "happy": both sides gave correct handshake * mood_lonely: one side gave good handshake, other side never showed up * mood_errory: one side gave a bad handshake ``all_time`` contains a single row, with these columns: * bytes: * connections: * mood_happy: * mood_lonely: * mood_errory: ``usage`` contains one row per closed connection, with these columns: * started: seconds since epoch, rounded to "blur time" * total_time: seconds from first open to last close * waiting_time: seconds from first open to second open, or None * bytes: total bytes relayed (in both directions) * result: (string) the mood: happy, lonely, errory All tables will be updated after each connection is finished. In addition, the ``current`` table will be updated at least once every 5 minutes. ## Logfiles for twistd If daemonized by twistd, the server will write ``twistd.pid`` and ``twistd.log`` files as usual. By default ``twistd.log`` will only contain startup, shutdown, and exception messages. Setting ``--log-fd=1`` (file descriptor 1 is always stdout) will cause the per-connection JSON lines to be interleaved with any messages sent to Twisted's logging system. It may be better to use a different file descriptor. magic-wormhole-transit-relay-0.2.1/docs/running.md000066400000000000000000000124451355461071100221630ustar00rootroot00000000000000# Running the Transit Relay First off, you probably don't need to run a relay. The ``wormhole`` command, as shipped from magic-wormhole.io, is configured to use a default Transit Relay operated by the author of Magic-Wormhole. This can be changed with the ``--transit-helper=`` argument, and other applications that import the Wormhole library might point elsewhere. The only reasons to run a separate relay are: * You are a kind-hearted server admin who wishes to support the project by paying the bandwidth costs incurred by your friends, who you instruct in the use of ``--transit-helper=``. * You publish a different application, and want to provide your users with a relay that fails at different times than the official one ## Installation To run a transit relay, first you need an environment to install it. * create a virtualenv * ``pip install magic-wormhole-transit-relay`` into this virtualenv ``` % virtualenv tr-venv ... % tr-venv/bin/pip install magic-wormhole-transit-relay ... ``` ## Running The transit relay is not a standalone program: rather it is a plugin for the Twisted application-running tools named ``twist`` (which only runs in the foreground) and ``twistd`` (which daemonizes). To run the relay for testing, use something like this: ``` % tr-venv/bin/twist transitrelay [ARGS] 2017-11-09T17:07:28-0800 [-] not blurring access times 2017-11-09T17:07:28-0800 [-] Transit starting on 4001 2017-11-09T17:07:28-0800 [wormhole_transit_relay.transit_server.Transit#info] Starting factory ... ``` The relevant arguments are: * ``--port=``: the endpoint to listen on, like ``tcp:4001`` * ``--log-fd=``: writes JSON lines to the given file descriptor for each connection * ``--usage-db=``: maintains a SQLite database with current and historical usage data * ``--blur-usage=``: if provided, logs are rounded to the given number of seconds, and data sizes are rounded too When you use ``twist``, the relay runs in the foreground, so it will generally exit as soon as the controlling terminal exits. For persistent environments, you should daemonize the server. ## Daemonization A production installation will want to daemonize the server somehow. One option is to use ``twistd`` (the daemonizing version of ``twist``). This takes the same plugin name and arguments as ``twist``, but forks into the background, detaches from the controlling terminal, and writes all output into a logfile: ``` % tr-venv/bin/twistd transitrelay [ARGS] % cat twistd.log 2017-11-09T17:07:28-0800 [-] not blurring access times 2017-11-09T17:07:28-0800 [-] Transit starting on 4001 2017-11-09T17:07:28-0800 [wormhole_transit_relay.transit_server.Transit#info] Starting factory ... % cat twistd.pid; echo 18985 ``` To shut down a ``twistd``-based server, you'll need to look in the ``twistd.pid`` file for the process id, and kill it: ``` % kill `cat twistd.pid` ``` To start the server each time the host reboots, you might use a crontab "@reboot" job, or a systemd unit. Another option is to run ``twist`` underneath a daemonization tool like ``daemontools`` or ``start-stop-daemon``. Since ``twist`` is just a regular program, this leaves the daemonization tool in charge of issues like restarting a process that exits unexpectedly, limiting the rate of respawning, and switching to the correct user-id and base directory. Packagers who create an installable transit-relay server package should choose a suitable daemonization tool that matches the practices of the target operating system. For example, Debian/Ubuntu packages should probably include a systemd unit that runs ``twist transitrelay`` in some ``/var/run/magic-wormhole-transit-relay/`` directory. Production environments that want to monitor the server for capacity management can use the ``--log-fd=`` option to emit logs, then route those logs into a suitable analysis tool. Other environments might be content to use ``--usage-db=`` and run the included Munin plugins to monitor usage. There is also a [Dockerfile](https://github.com/ggeorgovassilis/magic-wormhole-transit-relay-docker), written by George Georgovassilis, which you might find useful. ## Configuring Clients The transit relay will listen on an "endpoint" (usually a TCP port, but it could be a unix-domain socket or any other Endpoint that Twisted knows how to listen on). By default this is ``tcp:4001``. The relay does not know what hostname or IP address might point at it. Clients are configured with a "Transit Helper" setting that includes both the hostname and the port number, like the default ``tcp:transit.magic-wormhole.io:4001``. The standard ``wormhole`` tool takes a ``--transit-helper=`` argument to override this. Other applications that use ``wormhole`` as a library will have internal means to configure which transit relay they use. If you run your own transit relay, you will need to provide the new settings to your clients for it to be used. The standard ``wormhole`` tool is used by two sides: the sender and the receiver. Both sides exchange their configured transit relay with their partner. So if the sender overrides ``--transit-helper=`` but the receiver does not, they might wind up using either relay server, depending upon which one gets an established connection first. magic-wormhole-transit-relay-0.2.1/docs/transit.md000066400000000000000000000040561355461071100221660ustar00rootroot00000000000000# Transit Protocol The Transit protocol is responsible for establishing an encrypted bidirectional record stream between two programs. It must be given a "transit key" and a set of "hints" which help locate the other end (which are both delivered by Wormhole). The protocol tries hard to create a **direct** connection between the two ends, but if that fails, it uses a centralized relay server to ferry data between two separate TCP streams (one to each client). This repository provides that centralized relay server. For details of the protocol spoken by the clients, and the client-side API, please see ``transit.md`` in the magic-wormhole repository. ## Relay The **Transit Relay** is a host which offers TURN-like services for magic-wormhole instances. It uses a TCP-based protocol with a handshake to determine which connection wants to be connected to which. When connecting to a relay, the Transit client first writes RELAY-HANDSHAKE to the socket, which is `please relay %s\n`, where `%s` is the hex-encoded 32-byte HKDF derivative of the transit key, using `transit_relay_token` as the context. The client then waits for `ok\n`. The relay waits for a second connection that uses the same token. When this happens, the relay sends `ok\n` to both, then wires the connections together, so that everything received after the token on one is written out (after the ok) on the other. When either connection is lost, the other will be closed (the relay does not support "half-close"). When clients use a relay connection, they perform the usual sender/receiver handshake just after the `ok\n` is received: until that point they pretend the connection doesn't even exist. Direct connections are better, since they are faster and less expensive for the relay operator. If there are any potentially-viable direct connection hints available, the Transit instance will wait a few seconds before attempting to use the relay. If it has no viable direct hints, it will start using the relay right away. This prefers direct connections, but doesn't introduce completely unnecessary stalls. magic-wormhole-transit-relay-0.2.1/misc/000077500000000000000000000000001355461071100201565ustar00rootroot00000000000000magic-wormhole-transit-relay-0.2.1/misc/migrate_usage_db.py000066400000000000000000000035331355461071100240150ustar00rootroot00000000000000"""Migrate the usage data from the old bundled Transit Relay database. The magic-wormhole package used to include both servers (Rendezvous and Transit). "wormhole server" started both of these, and used the "relay.sqlite" database to store both immediate server state and long-term usage data. These were split out to their own packages: version 0.11 omitted the Transit Relay in favor of the new "magic-wormhole-transit-relay" distribution. This script reads the long-term Transit usage data from the pre-0.11 wormhole-server relay.sqlite, and copies it into a new "usage.sqlite" database in the current directory. It will refuse to touch an existing "usage.sqlite" file. The resuting "usage.sqlite" should be passed into --usage-db=, e.g. "twist transitrelay --usage=.../PATH/TO/usage.sqlite". """ from __future__ import unicode_literals, print_function import sys from wormhole_transit_relay.database import open_existing_db, create_db source_fn = sys.argv[1] source_db = open_existing_db(source_fn) target_db = create_db("usage.sqlite") num_rows = 0 for row in source_db.execute("SELECT * FROM `transit_usage`" " ORDER BY `started`").fetchall(): target_db.execute("INSERT INTO `usage`" " (`started`, `total_time`, `waiting_time`," " `total_bytes`, `result`)" " VALUES(?,?,?,?,?)", (row["started"], row["total_time"], row["waiting_time"], row["total_bytes"], row["result"])) num_rows += 1 target_db.execute("INSERT INTO `current`" " (`rebooted`, `updated`, `connected`, `waiting`," " `incomplete_bytes`)" " VALUES(?,?,?,?,?)", (0, 0, 0, 0, 0)) target_db.commit() print("usage database migrated (%d rows) into 'usage.sqlite'" % num_rows) sys.exit(0) magic-wormhole-transit-relay-0.2.1/misc/munin/000077500000000000000000000000001355461071100213045ustar00rootroot00000000000000magic-wormhole-transit-relay-0.2.1/misc/munin/wormhole_transit_active000077500000000000000000000017101355461071100261640ustar00rootroot00000000000000#! /usr/bin/env python """ Use the following in /etc/munin/plugin-conf.d/wormhole : [wormhole_*] env.usagedb /path/to/your/wormhole/server/usage.sqlite """ from __future__ import print_function import os, sys, time, sqlite3 CONFIG = """\ graph_title Magic-Wormhole Transit Active Channels graph_vlabel Channels graph_category wormhole waiting.label Transit Waiting waiting.draw LINE1 waiting.type GAUGE connected.label Transit Connected connected.draw LINE1 connected.type GAUGE """ if len(sys.argv) > 1 and sys.argv[1] == "config": print(CONFIG.rstrip()) sys.exit(0) dbfile = os.environ["usagedb"] assert os.path.exists(dbfile) db = sqlite3.connect(dbfile) MINUTE = 60.0 updated,waiting,connected = db.execute("SELECT `updated`,`waiting`,`connected`" " FROM `current`").fetchone() if time.time() > updated + 5*MINUTE: sys.exit(1) # expired print("waiting.value", waiting) print("connected.value", connected) magic-wormhole-transit-relay-0.2.1/misc/munin/wormhole_transit_bytes000077500000000000000000000021631355461071100260420ustar00rootroot00000000000000#! /usr/bin/env python """ Use the following in /etc/munin/plugin-conf.d/wormhole : [wormhole_*] env.usagedb /path/to/your/wormhole/server/usage.sqlite """ from __future__ import print_function import os, sys, time, sqlite3 CONFIG = """\ graph_title Magic-Wormhole Transit Usage (since reboot) graph_vlabel Bytes Since Reboot graph_category wormhole bytes.label Transit Bytes (complete) bytes.draw LINE1 bytes.type GAUGE incomplete.label Transit Bytes (incomplete) incomplete.draw LINE1 incomplete.type GAUGE """ if len(sys.argv) > 1 and sys.argv[1] == "config": print(CONFIG.rstrip()) sys.exit(0) dbfile = os.environ["usagedb"] assert os.path.exists(dbfile) db = sqlite3.connect(dbfile) MINUTE = 60.0 updated,rebooted,incomplete = db.execute("SELECT `updated`,`rebooted`,`incomplete_bytes` FROM `current`").fetchone() if time.time() > updated + 5*MINUTE: sys.exit(1) # expired complete = db.execute("SELECT SUM(`total_bytes`) FROM `usage`" " WHERE `started` > ?", (rebooted,)).fetchone()[0] or 0 print("bytes.value", complete) print("incomplete.value", complete+incomplete) magic-wormhole-transit-relay-0.2.1/misc/munin/wormhole_transit_bytes_alltime000077500000000000000000000021141355461071100275450ustar00rootroot00000000000000#! /usr/bin/env python """ Use the following in /etc/munin/plugin-conf.d/wormhole : [wormhole_*] env.usagedb /path/to/your/wormhole/server/usage.sqlite """ from __future__ import print_function import os, sys, time, sqlite3 CONFIG = """\ graph_title Magic-Wormhole Transit Usage (all time) graph_vlabel Bytes Since DB Creation graph_category wormhole bytes.label Transit Bytes (complete) bytes.draw LINE1 bytes.type GAUGE incomplete.label Transit Bytes (incomplete) incomplete.draw LINE1 incomplete.type GAUGE """ if len(sys.argv) > 1 and sys.argv[1] == "config": print(CONFIG.rstrip()) sys.exit(0) dbfile = os.environ["usagedb"] assert os.path.exists(dbfile) db = sqlite3.connect(dbfile) MINUTE = 60.0 updated,incomplete = db.execute("SELECT `updated`,`incomplete_bytes`" " FROM `current`").fetchone() if time.time() > updated + 5*MINUTE: sys.exit(1) # expired complete = db.execute("SELECT SUM(`total_bytes`)" " FROM `usage`").fetchone()[0] or 0 print("bytes.value", complete) print("incomplete.value", complete+incomplete) magic-wormhole-transit-relay-0.2.1/misc/munin/wormhole_transit_events000077500000000000000000000035161355461071100262230ustar00rootroot00000000000000#! /usr/bin/env python """ Use the following in /etc/munin/plugin-conf.d/wormhole : [wormhole_*] env.usagedb /path/to/your/wormhole/server/usage.sqlite """ from __future__ import print_function import os, sys, time, sqlite3 CONFIG = """\ graph_title Magic-Wormhole Transit Server Events (since reboot) graph_vlabel Events Since Reboot graph_category wormhole happy.label Happy happy.draw LINE1 happy.type GAUGE errory.label Errory errory.draw LINE1 errory.type GAUGE lonely.label Lonely lonely.draw LINE1 lonely.type GAUGE redundant.label Redundant redundant.draw LINE1 redundant.type GAUGE """ if len(sys.argv) > 1 and sys.argv[1] == "config": print(CONFIG.rstrip()) sys.exit(0) dbfile = os.environ["usagedb"] assert os.path.exists(dbfile) db = sqlite3.connect(dbfile) MINUTE = 60.0 rebooted,updated = db.execute("SELECT `rebooted`, `updated` FROM `current`").fetchone() if time.time() > updated + 5*MINUTE: sys.exit(1) # expired count = db.execute("SELECT COUNT() FROM `usage`" " WHERE" " `started` > ? AND" " `result` = 'happy'", (rebooted,)).fetchone()[0] print("happy.value", count) count = db.execute("SELECT COUNT() FROM `usage`" " WHERE" " `started` > ? AND" " `result` = 'errory'", (rebooted,)).fetchone()[0] print("errory.value", count) count = db.execute("SELECT COUNT() FROM `usage`" " WHERE" " `started` > ? AND" " `result` = 'lonely'", (rebooted,)).fetchone()[0] print("lonely.value", count) count = db.execute("SELECT COUNT() FROM `usage`" " WHERE" " `started` > ? AND" " `result` = 'redundant'", (rebooted,)).fetchone()[0] print("redundant.value", count) magic-wormhole-transit-relay-0.2.1/misc/munin/wormhole_transit_events_alltime000077500000000000000000000030651355461071100277310ustar00rootroot00000000000000#! /usr/bin/env python """ Use the following in /etc/munin/plugin-conf.d/wormhole : [wormhole_*] env.usagedb /path/to/your/wormhole/server/usage.sqlite """ from __future__ import print_function import os, sys, time, sqlite3 CONFIG = """\ graph_title Magic-Wormhole Transit Server Events (all time) graph_vlabel Events graph_category wormhole happy.label Happy happy.draw LINE1 happy.type GAUGE errory.label Errory errory.draw LINE1 errory.type GAUGE lonely.label Lonely lonely.draw LINE1 lonely.type GAUGE redundant.label Redundant redundant.draw LINE1 redundant.type GAUGE """ if len(sys.argv) > 1 and sys.argv[1] == "config": print(CONFIG.rstrip()) sys.exit(0) dbfile = os.environ["usagedb"] assert os.path.exists(dbfile) db = sqlite3.connect(dbfile) MINUTE = 60.0 rebooted,updated = db.execute("SELECT `rebooted`, `updated` FROM `current`").fetchone() if time.time() > updated + 5*MINUTE: sys.exit(1) # expired count = db.execute("SELECT COUNT() FROM `usage`" " WHERE `result` = 'happy'", ).fetchone()[0] print("happy.value", count) count = db.execute("SELECT COUNT() FROM `usage`" " WHERE `result` = 'errory'", ).fetchone()[0] print("errory.value", count) count = db.execute("SELECT COUNT() FROM `usage`" " WHERE `result` = 'lonely'", ).fetchone()[0] print("lonely.value", count) count = db.execute("SELECT COUNT() FROM `usage`" " WHERE `result` = 'redundant'", ).fetchone()[0] print("redundant.value", count) magic-wormhole-transit-relay-0.2.1/setup.cfg000066400000000000000000000004061355461071100210440ustar00rootroot00000000000000[wheel] universal = 1 [versioneer] vcs = git versionfile_source = src/wormhole_transit_relay/_version.py versionfile_build = wormhole_transit_relay/_version.py tag_prefix = parentdir_prefix = magic-wormhole-transit-relay [egg_info] tag_build = tag_date = 0 magic-wormhole-transit-relay-0.2.1/setup.py000066400000000000000000000016321355461071100207370ustar00rootroot00000000000000from setuptools import setup import versioneer commands = versioneer.get_cmdclass() setup(name="magic-wormhole-transit-relay", version=versioneer.get_version(), description="Transit Relay server for Magic-Wormhole", author="Brian Warner", author_email="warner-magic-wormhole@lothar.com", license="MIT", url="https://github.com/warner/magic-wormhole-transit-relay", package_dir={"": "src"}, packages=["wormhole_transit_relay", "wormhole_transit_relay.test", "twisted.plugins", ], package_data={"wormhole_transit_relay": ["db-schemas/*.sql"]}, install_requires=[ "twisted >= 17.5.0", ], extras_require={ ':sys_platform=="win32"': ["pypiwin32"], "dev": ["mock", "tox", "pyflakes"], }, test_suite="wormhole_transit_relay.test", cmdclass=commands, ) magic-wormhole-transit-relay-0.2.1/src/000077500000000000000000000000001355461071100200125ustar00rootroot00000000000000magic-wormhole-transit-relay-0.2.1/src/magic_wormhole_transit_relay.egg-info/000077500000000000000000000000001355461071100274405ustar00rootroot00000000000000magic-wormhole-transit-relay-0.2.1/src/magic_wormhole_transit_relay.egg-info/PKG-INFO000066400000000000000000000005071355461071100305370ustar00rootroot00000000000000Metadata-Version: 2.1 Name: magic-wormhole-transit-relay Version: 0.2.1 Summary: Transit Relay server for Magic-Wormhole Home-page: https://github.com/warner/magic-wormhole-transit-relay Author: Brian Warner Author-email: warner-magic-wormhole@lothar.com License: MIT Description: UNKNOWN Platform: UNKNOWN Provides-Extra: dev magic-wormhole-transit-relay-0.2.1/src/magic_wormhole_transit_relay.egg-info/SOURCES.txt000066400000000000000000000025151355461071100313270ustar00rootroot00000000000000.coveragerc LICENSE MANIFEST.in NEWS.md README.md setup.cfg setup.py tox.ini versioneer.py docs/logging.md docs/running.md docs/transit.md misc/migrate_usage_db.py misc/munin/wormhole_transit_active misc/munin/wormhole_transit_bytes misc/munin/wormhole_transit_bytes_alltime misc/munin/wormhole_transit_events misc/munin/wormhole_transit_events_alltime src/magic_wormhole_transit_relay.egg-info/PKG-INFO src/magic_wormhole_transit_relay.egg-info/SOURCES.txt src/magic_wormhole_transit_relay.egg-info/dependency_links.txt src/magic_wormhole_transit_relay.egg-info/requires.txt src/magic_wormhole_transit_relay.egg-info/top_level.txt src/twisted/plugins/magic_wormhole_transit_relay.py src/wormhole_transit_relay/__init__.py src/wormhole_transit_relay/_version.py src/wormhole_transit_relay/database.py src/wormhole_transit_relay/increase_rlimits.py src/wormhole_transit_relay/server_tap.py src/wormhole_transit_relay/transit_server.py src/wormhole_transit_relay/db-schemas/v1.sql src/wormhole_transit_relay/test/__init__.py src/wormhole_transit_relay/test/common.py src/wormhole_transit_relay/test/test_config.py src/wormhole_transit_relay/test/test_database.py src/wormhole_transit_relay/test/test_rlimits.py src/wormhole_transit_relay/test/test_service.py src/wormhole_transit_relay/test/test_stats.py src/wormhole_transit_relay/test/test_transit_server.pymagic-wormhole-transit-relay-0.2.1/src/magic_wormhole_transit_relay.egg-info/dependency_links.txt000066400000000000000000000000011355461071100335060ustar00rootroot00000000000000 magic-wormhole-transit-relay-0.2.1/src/magic_wormhole_transit_relay.egg-info/requires.txt000066400000000000000000000001151355461071100320350ustar00rootroot00000000000000twisted>=17.5.0 [:sys_platform=="win32"] pypiwin32 [dev] mock tox pyflakes magic-wormhole-transit-relay-0.2.1/src/magic_wormhole_transit_relay.egg-info/top_level.txt000066400000000000000000000000371355461071100321720ustar00rootroot00000000000000twisted wormhole_transit_relay magic-wormhole-transit-relay-0.2.1/src/twisted/000077500000000000000000000000001355461071100214755ustar00rootroot00000000000000magic-wormhole-transit-relay-0.2.1/src/twisted/plugins/000077500000000000000000000000001355461071100231565ustar00rootroot00000000000000magic-wormhole-transit-relay-0.2.1/src/twisted/plugins/magic_wormhole_transit_relay.py000066400000000000000000000004371355461071100314700ustar00rootroot00000000000000from twisted.application.service import ServiceMaker TransitRelay = ServiceMaker( "Magic-Wormhole Transit Relay", # name "wormhole_transit_relay.server_tap", # module "Provide the Transit Relay server for Magic-Wormhole clients.", # desc "transitrelay", # tapname ) magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/000077500000000000000000000000001355461071100246065ustar00rootroot00000000000000magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/__init__.py000066400000000000000000000001351355461071100267160ustar00rootroot00000000000000 from ._version import get_versions __version__ = get_versions()['version'] del get_versions magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/_version.py000066400000000000000000000007611355461071100270100ustar00rootroot00000000000000 # This file was generated by 'versioneer.py' (0.18) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' { "date": "2019-09-11T00:25:26-0700", "dirty": false, "error": null, "full-revisionid": "c6445321d78bc7f3a795cdf7c71a45731dcffbee", "version": "0.2.1" } ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/database.py000066400000000000000000000115421355461071100267270ustar00rootroot00000000000000from __future__ import unicode_literals import os import sqlite3 import tempfile from pkg_resources import resource_string from twisted.python import log class DBError(Exception): pass def get_schema(version): schema_bytes = resource_string("wormhole_transit_relay", "db-schemas/v%d.sql" % version) return schema_bytes.decode("utf-8") ## def get_upgrader(new_version): ## schema_bytes = resource_string("wormhole_transit_relay", ## "db-schemas/upgrade-to-v%d.sql" % new_version) ## return schema_bytes.decode("utf-8") TARGET_VERSION = 1 def dict_factory(cursor, row): d = {} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d def _initialize_db_schema(db, target_version): """Creates the application schema in the given database. """ log.msg("populating new database with schema v%s" % target_version) schema = get_schema(target_version) db.executescript(schema) db.execute("INSERT INTO version (version) VALUES (?)", (target_version,)) db.commit() def _initialize_db_connection(db): """Sets up the db connection object with a row factory and with necessary foreign key settings. """ db.row_factory = dict_factory db.execute("PRAGMA foreign_keys = ON") problems = db.execute("PRAGMA foreign_key_check").fetchall() if problems: raise DBError("failed foreign key check: %s" % (problems,)) def _open_db_connection(dbfile): """Open a new connection to the SQLite3 database at the given path. """ try: db = sqlite3.connect(dbfile) _initialize_db_connection(db) except (EnvironmentError, sqlite3.OperationalError, sqlite3.DatabaseError) as e: # this indicates that the file is not a compatible database format. # Perhaps it was created with an old version, or it might be junk. raise DBError("Unable to create/open db file %s: %s" % (dbfile, e)) return db def _get_temporary_dbfile(dbfile): """Get a temporary filename near the given path. """ fd, name = tempfile.mkstemp( prefix=os.path.basename(dbfile) + ".", dir=os.path.dirname(dbfile) ) os.close(fd) return name def _atomic_create_and_initialize_db(dbfile, target_version): """Create and return a new database, initialized with the application schema. If anything goes wrong, nothing is left at the ``dbfile`` path. """ temp_dbfile = _get_temporary_dbfile(dbfile) db = _open_db_connection(temp_dbfile) _initialize_db_schema(db, target_version) db.close() os.rename(temp_dbfile, dbfile) return _open_db_connection(dbfile) def get_db(dbfile, target_version=TARGET_VERSION): """Open or create the given db file. The parent directory must exist. Returns the db connection object, or raises DBError. """ if dbfile == ":memory:": db = _open_db_connection(dbfile) _initialize_db_schema(db, target_version) elif os.path.exists(dbfile): db = _open_db_connection(dbfile) else: db = _atomic_create_and_initialize_db(dbfile, target_version) version = db.execute("SELECT version FROM version").fetchone()["version"] ## while version < target_version: ## log.msg(" need to upgrade from %s to %s" % (version, target_version)) ## try: ## upgrader = get_upgrader(version+1) ## except ValueError: # ResourceError?? ## log.msg(" unable to upgrade %s to %s" % (version, version+1)) ## raise DBError("Unable to upgrade %s to version %s, left at %s" ## % (dbfile, version+1, version)) ## log.msg(" executing upgrader v%s->v%s" % (version, version+1)) ## db.executescript(upgrader) ## db.commit() ## version = version+1 if version != target_version: raise DBError("Unable to handle db version %s" % version) return db class DBDoesntExist(Exception): pass def open_existing_db(dbfile): assert dbfile != ":memory:" if not os.path.exists(dbfile): raise DBDoesntExist() return _open_db_connection(dbfile) class DBAlreadyExists(Exception): pass def create_db(dbfile): """Create the given db file. Refuse to touch a pre-existing file. This is meant for use by migration tools, to create the output target""" if dbfile == ":memory:": db = _open_db_connection(dbfile) _initialize_db_schema(db, TARGET_VERSION) elif os.path.exists(dbfile): raise DBAlreadyExists() else: db = _atomic_create_and_initialize_db(dbfile, TARGET_VERSION) return db def dump_db(db): # to let _iterdump work, we need to restore the original row factory orig = db.row_factory try: db.row_factory = sqlite3.Row return "".join(db.iterdump()) finally: db.row_factory = orig magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/db-schemas/000077500000000000000000000000001355461071100266145ustar00rootroot00000000000000magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/db-schemas/v1.sql000066400000000000000000000022341355461071100276640ustar00rootroot00000000000000 CREATE TABLE `version` -- contains one row ( `version` INTEGER -- set to 1 ); CREATE TABLE `current` -- contains one row ( `rebooted` INTEGER, -- seconds since epoch of most recent reboot `updated` INTEGER, -- when `current` was last updated `connected` INTEGER, -- number of current paired connections `waiting` INTEGER, -- number of not-yet-paired connections `incomplete_bytes` INTEGER -- bytes sent through not-yet-complete connections ); CREATE TABLE `usage` ( `started` INTEGER, -- seconds since epoch, rounded to "blur time" `total_time` INTEGER, -- seconds from open to last close `waiting_time` INTEGER, -- seconds from start to 2nd side appearing, or None `total_bytes` INTEGER, -- total bytes relayed (both directions) `result` VARCHAR -- happy, scary, lonely, errory, pruney -- transit moods: -- "errory": one side gave the wrong handshake -- "lonely": good handshake, but the other side never showed up -- "redundant": good handshake, abandoned in favor of different connection -- "happy": both sides gave correct handshake ); CREATE INDEX `usage_started_index` ON `usage` (`started`); CREATE INDEX `usage_result_index` ON `usage` (`result`); magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/increase_rlimits.py000066400000000000000000000026621355461071100305220ustar00rootroot00000000000000try: # 'resource' is unix-only from resource import getrlimit, setrlimit, RLIMIT_NOFILE except ImportError: # pragma: nocover getrlimit, setrlimit, RLIMIT_NOFILE = None, None, None # pragma: nocover from twisted.python import log def increase_rlimits(): if getrlimit is None: log.msg("unable to import 'resource', leaving rlimit alone") return soft, hard = getrlimit(RLIMIT_NOFILE) if soft >= 10000: log.msg("RLIMIT_NOFILE.soft was %d, leaving it alone" % soft) return # OS-X defaults to soft=7168, and reports a huge number for 'hard', # but won't accept anything more than soft=10240, so we can't just # set soft=hard. Linux returns (1024, 1048576) and is fine with # soft=hard. Cygwin is reported to return (256,-1) and accepts up to # soft=3200. So we try multiple values until something works. for newlimit in [hard, 10000, 3200, 1024]: log.msg("changing RLIMIT_NOFILE from (%s,%s) to (%s,%s)" % (soft, hard, newlimit, hard)) try: setrlimit(RLIMIT_NOFILE, (newlimit, hard)) log.msg("setrlimit successful") return except ValueError as e: log.msg("error during setrlimit: %s" % e) continue except: log.msg("other error during setrlimit, leaving it alone") log.err() return log.msg("unable to change rlimit, leaving it alone") magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/server_tap.py000066400000000000000000000033141355461071100273330ustar00rootroot00000000000000import os from twisted.internet import reactor from twisted.python import usage from twisted.application.service import MultiService from twisted.application.internet import (TimerService, StreamServerEndpointService) from twisted.internet import endpoints from . import transit_server from .increase_rlimits import increase_rlimits LONGDESC = """\ This plugin sets up a 'Transit Relay' server for magic-wormhole. This service listens for TCP connections, finds pairs which present the same handshake, and glues the two TCP sockets together. """ class Options(usage.Options): synopsis = "[--port=] [--log-fd] [--blur-usage=] [--usage-db=]" longdesc = LONGDESC optParameters = [ ("port", "p", "tcp:4001:interface=\:\:", "endpoint to listen on"), ("blur-usage", None, None, "blur timestamps and data sizes in logs"), ("log-fd", None, None, "write JSON usage logs to this file descriptor"), ("usage-db", None, None, "record usage data (SQLite)"), ] def opt_blur_usage(self, arg): self["blur-usage"] = int(arg) def makeService(config, reactor=reactor): increase_rlimits() ep = endpoints.serverFromString(reactor, config["port"]) # to listen log_file = (os.fdopen(int(config["log-fd"]), "w") if config["log-fd"] is not None else None) f = transit_server.Transit(blur_usage=config["blur-usage"], log_file=log_file, usage_db=config["usage-db"]) parent = MultiService() StreamServerEndpointService(ep, f).setServiceParent(parent) TimerService(5*60.0, f.timerUpdateStats).setServiceParent(parent) return parent magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/test/000077500000000000000000000000001355461071100255655ustar00rootroot00000000000000magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/test/__init__.py000066400000000000000000000000001355461071100276640ustar00rootroot00000000000000magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/test/common.py000066400000000000000000000021161355461071100274270ustar00rootroot00000000000000#from __future__ import unicode_literals from twisted.internet import reactor, endpoints from twisted.internet.defer import inlineCallbacks from ..transit_server import Transit class ServerBase: log_requests = False @inlineCallbacks def setUp(self): self._lp = None if self.log_requests: blur_usage = None else: blur_usage = 60.0 yield self._setup_relay(blur_usage=blur_usage) self._transit_server._debug_log = self.log_requests @inlineCallbacks def _setup_relay(self, blur_usage=None, log_file=None, usage_db=None): ep = endpoints.TCP4ServerEndpoint(reactor, 0, interface="127.0.0.1") self._transit_server = Transit(blur_usage=blur_usage, log_file=log_file, usage_db=usage_db) self._lp = yield ep.listen(self._transit_server) addr = self._lp.getHost() # ws://127.0.0.1:%d/wormhole-relay/ws self.transit = u"tcp:127.0.0.1:%d" % addr.port def tearDown(self): if self._lp: return self._lp.stopListening() magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/test/test_config.py000066400000000000000000000015641355461071100304510ustar00rootroot00000000000000from __future__ import unicode_literals, print_function from twisted.trial import unittest from .. import server_tap PORT = "tcp:4001:interface=\:\:" class Config(unittest.TestCase): def test_defaults(self): o = server_tap.Options() o.parseOptions([]) self.assertEqual(o, {"blur-usage": None, "log-fd": None, "usage-db": None, "port": PORT}) def test_blur(self): o = server_tap.Options() o.parseOptions(["--blur-usage=60"]) self.assertEqual(o, {"blur-usage": 60, "log-fd": None, "usage-db": None, "port": PORT}) def test_string(self): o = server_tap.Options() s = str(o) self.assertIn("This plugin sets up a 'Transit Relay'", s) self.assertIn("--blur-usage=", s) self.assertIn("blur timestamps and data sizes in logs", s) magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/test/test_database.py000066400000000000000000000117601355461071100307470ustar00rootroot00000000000000from __future__ import print_function, unicode_literals import os from twisted.python import filepath from twisted.trial import unittest from .. import database from ..database import get_db, TARGET_VERSION, dump_db, DBError class Get(unittest.TestCase): def test_create_default(self): db_url = ":memory:" db = get_db(db_url) rows = db.execute("SELECT * FROM version").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0]["version"], TARGET_VERSION) def test_open_existing_file(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "normal.db") db = get_db(fn) rows = db.execute("SELECT * FROM version").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0]["version"], TARGET_VERSION) db2 = get_db(fn) rows = db2.execute("SELECT * FROM version").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0]["version"], TARGET_VERSION) def test_open_bad_version(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "old.db") db = get_db(fn) db.execute("UPDATE version SET version=999") db.commit() with self.assertRaises(DBError) as e: get_db(fn) self.assertIn("Unable to handle db version 999", str(e.exception)) def test_open_corrupt(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "corrupt.db") with open(fn, "wb") as f: f.write(b"I am not a database") with self.assertRaises(DBError) as e: get_db(fn) self.assertIn("not a database", str(e.exception)) def test_failed_create_allows_subsequent_create(self): patch = self.patch(database, "get_schema", lambda version: b"this is a broken schema") dbfile = filepath.FilePath(self.mktemp()) self.assertRaises(Exception, lambda: get_db(dbfile.path)) patch.restore() get_db(dbfile.path) def OFF_test_upgrade(self): # disabled until we add a v2 schema basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "upgrade.db") self.assertNotEqual(TARGET_VERSION, 2) # create an old-version DB in a file db = get_db(fn, 2) rows = db.execute("SELECT * FROM version").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0]["version"], 2) del db # then upgrade the file to the latest version dbA = get_db(fn, TARGET_VERSION) rows = dbA.execute("SELECT * FROM version").fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0]["version"], TARGET_VERSION) dbA_text = dump_db(dbA) del dbA # make sure the upgrades got committed to disk dbB = get_db(fn, TARGET_VERSION) dbB_text = dump_db(dbB) del dbB self.assertEqual(dbA_text, dbB_text) # The upgraded schema should be equivalent to that of a new DB. # However a text dump will differ because ALTER TABLE always appends # the new column to the end of a table, whereas our schema puts it # somewhere in the middle (wherever it fits naturally). Also ALTER # TABLE doesn't include comments. if False: latest_db = get_db(":memory:", TARGET_VERSION) latest_text = dump_db(latest_db) with open("up.sql","w") as f: f.write(dbA_text) with open("new.sql","w") as f: f.write(latest_text) # check with "diff -u _trial_temp/up.sql _trial_temp/new.sql" self.assertEqual(dbA_text, latest_text) class Create(unittest.TestCase): def test_memory(self): db = database.create_db(":memory:") latest_text = dump_db(db) self.assertIn("CREATE TABLE", latest_text) def test_preexisting(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "preexisting.db") with open(fn, "w"): pass with self.assertRaises(database.DBAlreadyExists): database.create_db(fn) def test_create(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "created.db") db = database.create_db(fn) latest_text = dump_db(db) self.assertIn("CREATE TABLE", latest_text) class Open(unittest.TestCase): def test_open(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "created.db") db1 = database.create_db(fn) latest_text = dump_db(db1) self.assertIn("CREATE TABLE", latest_text) db2 = database.open_existing_db(fn) self.assertIn("CREATE TABLE", dump_db(db2)) def test_doesnt_exist(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "created.db") with self.assertRaises(database.DBDoesntExist): database.open_existing_db(fn) magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/test/test_rlimits.py000066400000000000000000000053411355461071100306640ustar00rootroot00000000000000from __future__ import print_function, unicode_literals import mock from twisted.trial import unittest from ..increase_rlimits import increase_rlimits class RLimits(unittest.TestCase): def test_rlimit(self): def patch_r(name, *args, **kwargs): return mock.patch("wormhole_transit_relay.increase_rlimits." + name, *args, **kwargs) fakelog = [] def checklog(*expected): self.assertEqual(fakelog, list(expected)) fakelog[:] = [] NF = "NOFILE" mock_NF = patch_r("RLIMIT_NOFILE", NF) with patch_r("log.msg", fakelog.append): with patch_r("getrlimit", None): increase_rlimits() checklog("unable to import 'resource', leaving rlimit alone") with mock_NF: with patch_r("getrlimit", return_value=(20000, 30000)) as gr: increase_rlimits() self.assertEqual(gr.mock_calls, [mock.call(NF)]) checklog("RLIMIT_NOFILE.soft was 20000, leaving it alone") with patch_r("getrlimit", return_value=(10, 30000)) as gr: with patch_r("setrlimit", side_effect=TypeError("other")): with patch_r("log.err") as err: increase_rlimits() self.assertEqual(err.mock_calls, [mock.call()]) checklog("changing RLIMIT_NOFILE from (10,30000) to (30000,30000)", "other error during setrlimit, leaving it alone") for maxlimit in [40000, 20000, 9000, 2000, 1000]: def setrlimit(which, newlimit): if newlimit[0] > maxlimit: raise ValueError("nope") return None calls = [] expected = [] for tries in [30000, 10000, 3200, 1024]: calls.append(mock.call(NF, (tries, 30000))) expected.append("changing RLIMIT_NOFILE from (10,30000) to (%d,30000)" % tries) if tries > maxlimit: expected.append("error during setrlimit: nope") else: expected.append("setrlimit successful") break else: expected.append("unable to change rlimit, leaving it alone") with patch_r("setrlimit", side_effect=setrlimit) as sr: increase_rlimits() self.assertEqual(sr.mock_calls, calls) checklog(*expected) magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/test/test_service.py000066400000000000000000000031431355461071100306370ustar00rootroot00000000000000from __future__ import unicode_literals, print_function from twisted.trial import unittest import mock from twisted.application.service import MultiService from .. import server_tap class Service(unittest.TestCase): def test_defaults(self): o = server_tap.Options() o.parseOptions([]) with mock.patch("wormhole_transit_relay.server_tap.transit_server.Transit") as t: s = server_tap.makeService(o) self.assertEqual(t.mock_calls, [mock.call(blur_usage=None, log_file=None, usage_db=None)]) self.assertIsInstance(s, MultiService) def test_blur(self): o = server_tap.Options() o.parseOptions(["--blur-usage=60"]) with mock.patch("wormhole_transit_relay.server_tap.transit_server.Transit") as t: server_tap.makeService(o) self.assertEqual(t.mock_calls, [mock.call(blur_usage=60, log_file=None, usage_db=None)]) def test_log_fd(self): o = server_tap.Options() o.parseOptions(["--log-fd=99"]) fd = object() with mock.patch("wormhole_transit_relay.server_tap.transit_server.Transit") as t: with mock.patch("wormhole_transit_relay.server_tap.os.fdopen", return_value=fd) as f: server_tap.makeService(o) self.assertEqual(f.mock_calls, [mock.call(99, "w")]) self.assertEqual(t.mock_calls, [mock.call(blur_usage=None, log_file=fd, usage_db=None)]) magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/test/test_stats.py000066400000000000000000000100251355461071100303320ustar00rootroot00000000000000from __future__ import print_function, unicode_literals import os, io, json, sqlite3 import mock from twisted.trial import unittest from ..transit_server import Transit from .. import database class DB(unittest.TestCase): def open_db(self, dbfile): db = sqlite3.connect(dbfile) database._initialize_db_connection(db) return db def test_db(self): T = 1519075308.0 d = self.mktemp() os.mkdir(d) usage_db = os.path.join(d, "usage.sqlite") with mock.patch("time.time", return_value=T+0): t = Transit(blur_usage=None, log_file=None, usage_db=usage_db) db = self.open_db(usage_db) with mock.patch("time.time", return_value=T+1): t.recordUsage(started=123, result="happy", total_bytes=100, total_time=10, waiting_time=2) self.assertEqual(db.execute("SELECT * FROM `usage`").fetchall(), [dict(result="happy", started=123, total_bytes=100, total_time=10, waiting_time=2), ]) self.assertEqual(db.execute("SELECT * FROM `current`").fetchone(), dict(rebooted=T+0, updated=T+1, incomplete_bytes=0, waiting=0, connected=0)) with mock.patch("time.time", return_value=T+2): t.recordUsage(started=150, result="errory", total_bytes=200, total_time=11, waiting_time=3) self.assertEqual(db.execute("SELECT * FROM `usage`").fetchall(), [dict(result="happy", started=123, total_bytes=100, total_time=10, waiting_time=2), dict(result="errory", started=150, total_bytes=200, total_time=11, waiting_time=3), ]) self.assertEqual(db.execute("SELECT * FROM `current`").fetchone(), dict(rebooted=T+0, updated=T+2, incomplete_bytes=0, waiting=0, connected=0)) with mock.patch("time.time", return_value=T+3): t.timerUpdateStats() self.assertEqual(db.execute("SELECT * FROM `current`").fetchone(), dict(rebooted=T+0, updated=T+3, incomplete_bytes=0, waiting=0, connected=0)) def test_no_db(self): t = Transit(blur_usage=None, log_file=None, usage_db=None) t.recordUsage(started=123, result="happy", total_bytes=100, total_time=10, waiting_time=2) t.timerUpdateStats() class LogToStdout(unittest.TestCase): def test_log(self): # emit lines of JSON to log_file, if set log_file = io.StringIO() t = Transit(blur_usage=None, log_file=log_file, usage_db=None) t.recordUsage(started=123, result="happy", total_bytes=100, total_time=10, waiting_time=2) self.assertEqual(json.loads(log_file.getvalue()), {"started": 123, "total_time": 10, "waiting_time": 2, "total_bytes": 100, "mood": "happy"}) def test_log_blurred(self): # if blurring is enabled, timestamps should be rounded to the # requested amount, and sizes should be rounded up too log_file = io.StringIO() t = Transit(blur_usage=60, log_file=log_file, usage_db=None) t.recordUsage(started=123, result="happy", total_bytes=11999, total_time=10, waiting_time=2) self.assertEqual(json.loads(log_file.getvalue()), {"started": 120, "total_time": 10, "waiting_time": 2, "total_bytes": 20000, "mood": "happy"}) def test_do_not_log(self): t = Transit(blur_usage=60, log_file=None, usage_db=None) t.recordUsage(started=123, result="happy", total_bytes=11999, total_time=10, waiting_time=2) magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/test/test_transit_server.py000066400000000000000000000504261355461071100322570ustar00rootroot00000000000000from __future__ import print_function, unicode_literals from binascii import hexlify from twisted.trial import unittest from twisted.internet import protocol, reactor, defer from twisted.internet.endpoints import clientFromString, connectProtocol from .common import ServerBase from .. import transit_server class Accumulator(protocol.Protocol): def __init__(self): self.data = b"" self.count = 0 self._wait = None self._disconnect = defer.Deferred() def waitForBytes(self, more): assert self._wait is None self.count = more self._wait = defer.Deferred() self._check_done() return self._wait def dataReceived(self, data): self.data = self.data + data self._check_done() def _check_done(self): if self._wait and len(self.data) >= self.count: d = self._wait self._wait = None d.callback(self) def connectionLost(self, why): if self._wait: self._wait.errback(RuntimeError("closed")) self._disconnect.callback(None) def wait(): d = defer.Deferred() reactor.callLater(0.001, d.callback, None) return d class _Transit: def test_blur_size(self): blur = transit_server.blur_size self.failUnlessEqual(blur(0), 0) self.failUnlessEqual(blur(1), 10e3) self.failUnlessEqual(blur(10e3), 10e3) self.failUnlessEqual(blur(10e3+1), 20e3) self.failUnlessEqual(blur(15e3), 20e3) self.failUnlessEqual(blur(20e3), 20e3) self.failUnlessEqual(blur(1e6), 1e6) self.failUnlessEqual(blur(1e6+1), 2e6) self.failUnlessEqual(blur(1.5e6), 2e6) self.failUnlessEqual(blur(2e6), 2e6) self.failUnlessEqual(blur(900e6), 900e6) self.failUnlessEqual(blur(1000e6), 1000e6) self.failUnlessEqual(blur(1050e6), 1100e6) self.failUnlessEqual(blur(1100e6), 1100e6) self.failUnlessEqual(blur(1150e6), 1200e6) @defer.inlineCallbacks def test_register(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) token1 = b"\x00"*32 side1 = b"\x01"*8 a1.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side1) + b"\n") # let that arrive while self.count() == 0: yield wait() self.assertEqual(self.count(), 1) a1.transport.loseConnection() # let that get removed while self.count() > 0: yield wait() self.assertEqual(self.count(), 0) # the token should be removed too self.assertEqual(len(self._transit_server._pending_requests), 0) @defer.inlineCallbacks def test_both_unsided(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) a2 = yield connectProtocol(ep, Accumulator()) token1 = b"\x00"*32 a1.transport.write(b"please relay " + hexlify(token1) + b"\n") a2.transport.write(b"please relay " + hexlify(token1) + b"\n") # a correct handshake yields an ack, after which we can send exp = b"ok\n" yield a1.waitForBytes(len(exp)) self.assertEqual(a1.data, exp) s1 = b"data1" a1.transport.write(s1) exp = b"ok\n" yield a2.waitForBytes(len(exp)) self.assertEqual(a2.data, exp) # all data they sent after the handshake should be given to us exp = b"ok\n"+s1 yield a2.waitForBytes(len(exp)) self.assertEqual(a2.data, exp) a1.transport.loseConnection() a2.transport.loseConnection() @defer.inlineCallbacks def test_sided_unsided(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) a2 = yield connectProtocol(ep, Accumulator()) token1 = b"\x00"*32 side1 = b"\x01"*8 a1.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side1) + b"\n") a2.transport.write(b"please relay " + hexlify(token1) + b"\n") # a correct handshake yields an ack, after which we can send exp = b"ok\n" yield a1.waitForBytes(len(exp)) self.assertEqual(a1.data, exp) s1 = b"data1" a1.transport.write(s1) exp = b"ok\n" yield a2.waitForBytes(len(exp)) self.assertEqual(a2.data, exp) # all data they sent after the handshake should be given to us exp = b"ok\n"+s1 yield a2.waitForBytes(len(exp)) self.assertEqual(a2.data, exp) a1.transport.loseConnection() a2.transport.loseConnection() @defer.inlineCallbacks def test_unsided_sided(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) a2 = yield connectProtocol(ep, Accumulator()) token1 = b"\x00"*32 side1 = b"\x01"*8 a1.transport.write(b"please relay " + hexlify(token1) + b"\n") a2.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side1) + b"\n") # a correct handshake yields an ack, after which we can send exp = b"ok\n" yield a1.waitForBytes(len(exp)) self.assertEqual(a1.data, exp) s1 = b"data1" a1.transport.write(s1) exp = b"ok\n" yield a2.waitForBytes(len(exp)) self.assertEqual(a2.data, exp) # all data they sent after the handshake should be given to us exp = b"ok\n"+s1 yield a2.waitForBytes(len(exp)) self.assertEqual(a2.data, exp) a1.transport.loseConnection() a2.transport.loseConnection() @defer.inlineCallbacks def test_both_sided(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) a2 = yield connectProtocol(ep, Accumulator()) token1 = b"\x00"*32 side1 = b"\x01"*8 side2 = b"\x02"*8 a1.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side1) + b"\n") a2.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side2) + b"\n") # a correct handshake yields an ack, after which we can send exp = b"ok\n" yield a1.waitForBytes(len(exp)) self.assertEqual(a1.data, exp) s1 = b"data1" a1.transport.write(s1) exp = b"ok\n" yield a2.waitForBytes(len(exp)) self.assertEqual(a2.data, exp) # all data they sent after the handshake should be given to us exp = b"ok\n"+s1 yield a2.waitForBytes(len(exp)) self.assertEqual(a2.data, exp) a1.transport.loseConnection() a2.transport.loseConnection() def count(self): return sum([len(potentials) for potentials in self._transit_server._pending_requests.values()]) @defer.inlineCallbacks def test_ignore_same_side(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) a2 = yield connectProtocol(ep, Accumulator()) a3 = yield connectProtocol(ep, Accumulator()) disconnects = [] a1._disconnect.addCallback(disconnects.append) a2._disconnect.addCallback(disconnects.append) token1 = b"\x00"*32 side1 = b"\x01"*8 a1.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side1) + b"\n") # let that arrive while self.count() == 0: yield wait() a2.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side1) + b"\n") # let that arrive while self.count() == 1: yield wait() self.assertEqual(self.count(), 2) # same-side connections don't match # when the second side arrives, the spare first connection should be # closed side2 = b"\x02"*8 a3.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side2) + b"\n") # let that arrive while self.count() != 0: yield wait() self.assertEqual(len(self._transit_server._pending_requests), 0) self.assertEqual(len(self._transit_server._active_connections), 2) # That will trigger a disconnect on exactly one of (a1 or a2). Wait # until our client notices it. while not disconnects: yield wait() # the other connection should still be connected self.assertEqual(sum([int(t.transport.connected) for t in [a1, a2]]), 1) a1.transport.loseConnection() a2.transport.loseConnection() a3.transport.loseConnection() @defer.inlineCallbacks def test_bad_handshake_old(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) token1 = b"\x00"*32 # the server waits for the exact number of bytes in the expected # handshake message. to trigger "bad handshake", we must match. a1.transport.write(b"please DELAY " + hexlify(token1) + b"\n") exp = b"bad handshake\n" yield a1.waitForBytes(len(exp)) self.assertEqual(a1.data, exp) a1.transport.loseConnection() @defer.inlineCallbacks def test_bad_handshake_old_slow(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) a1.transport.write(b"please DELAY ") # As in test_impatience_new_slow, the current state machine has code # that can only be reached if we insert a stall here, so dataReceived # gets called twice. Hopefully we can delete this test once # dataReceived is refactored to remove that state. d = defer.Deferred() reactor.callLater(0.1, d.callback, None) yield d token1 = b"\x00"*32 # the server waits for the exact number of bytes in the expected # handshake message. to trigger "bad handshake", we must match. a1.transport.write(hexlify(token1) + b"\n") exp = b"bad handshake\n" yield a1.waitForBytes(len(exp)) self.assertEqual(a1.data, exp) a1.transport.loseConnection() @defer.inlineCallbacks def test_bad_handshake_new(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) token1 = b"\x00"*32 side1 = b"\x01"*8 # the server waits for the exact number of bytes in the expected # handshake message. to trigger "bad handshake", we must match. a1.transport.write(b"please DELAY " + hexlify(token1) + b" for side " + hexlify(side1) + b"\n") exp = b"bad handshake\n" yield a1.waitForBytes(len(exp)) self.assertEqual(a1.data, exp) a1.transport.loseConnection() @defer.inlineCallbacks def test_binary_handshake(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) binary_bad_handshake = b"\x00\x01\xe0\x0f\n\xff" # the embedded \n makes the server trigger early, before the full # expected handshake length has arrived. A non-wormhole client # writing non-ascii junk to the transit port used to trigger a # UnicodeDecodeError when it tried to coerce the incoming handshake # to unicode, due to the ("\n" in buf) check. This was fixed to use # (b"\n" in buf). This exercises the old failure. a1.transport.write(binary_bad_handshake) exp = b"bad handshake\n" yield a1.waitForBytes(len(exp)) self.assertEqual(a1.data, exp) a1.transport.loseConnection() @defer.inlineCallbacks def test_impatience_old(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) token1 = b"\x00"*32 # sending too many bytes is impatience. a1.transport.write(b"please relay " + hexlify(token1) + b"\nNOWNOWNOW") exp = b"impatient\n" yield a1.waitForBytes(len(exp)) self.assertEqual(a1.data, exp) a1.transport.loseConnection() @defer.inlineCallbacks def test_impatience_new(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) token1 = b"\x00"*32 side1 = b"\x01"*8 # sending too many bytes is impatience. a1.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side1) + b"\nNOWNOWNOW") exp = b"impatient\n" yield a1.waitForBytes(len(exp)) self.assertEqual(a1.data, exp) a1.transport.loseConnection() @defer.inlineCallbacks def test_impatience_new_slow(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) # For full coverage, we need dataReceived to see a particular framing # of these two pieces of data, and ITCPTransport doesn't have flush() # (which probably wouldn't work anyways). For now, force a 100ms # stall between the two writes. I tried setTcpNoDelay(True) but it # didn't seem to help without the stall. The long-term fix is to # rewrite dataReceived() to remove the multiple "impatient" # codepaths, deleting the particular clause that this test exercises, # then remove this test. token1 = b"\x00"*32 side1 = b"\x01"*8 # sending too many bytes is impatience. a1.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side1) + b"\n") d = defer.Deferred() reactor.callLater(0.1, d.callback, None) yield d a1.transport.write(b"NOWNOWNOW") exp = b"impatient\n" yield a1.waitForBytes(len(exp)) self.assertEqual(a1.data, exp) a1.transport.loseConnection() class TransitWithLogs(_Transit, ServerBase, unittest.TestCase): log_requests = True class TransitWithoutLogs(_Transit, ServerBase, unittest.TestCase): log_requests = False class Usage(ServerBase, unittest.TestCase): @defer.inlineCallbacks def setUp(self): yield super(Usage, self).setUp() self._usage = [] def record(started, result, total_bytes, total_time, waiting_time): self._usage.append((started, result, total_bytes, total_time, waiting_time)) self._transit_server.recordUsage = record @defer.inlineCallbacks def test_errory(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) a1.transport.write(b"this is a very bad handshake\n") # that will log the "errory" usage event, then drop the connection yield a1._disconnect self.assertEqual(len(self._usage), 1, self._usage) (started, result, total_bytes, total_time, waiting_time) = self._usage[0] self.assertEqual(result, "errory", self._usage) @defer.inlineCallbacks def test_lonely(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) token1 = b"\x00"*32 side1 = b"\x01"*8 a1.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side1) + b"\n") while not self._transit_server._pending_requests: yield wait() # wait for the server to see the connection # now we disconnect before the peer connects a1.transport.loseConnection() yield a1._disconnect while self._transit_server._pending_requests: yield wait() # wait for the server to see the disconnect too self.assertEqual(len(self._usage), 1, self._usage) (started, result, total_bytes, total_time, waiting_time) = self._usage[0] self.assertEqual(result, "lonely", self._usage) self.assertIdentical(waiting_time, None) @defer.inlineCallbacks def test_one_happy_one_jilted(self): ep = clientFromString(reactor, self.transit) a1 = yield connectProtocol(ep, Accumulator()) a2 = yield connectProtocol(ep, Accumulator()) token1 = b"\x00"*32 side1 = b"\x01"*8 side2 = b"\x02"*8 a1.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side1) + b"\n") while not self._transit_server._pending_requests: yield wait() # make sure a1 connects first a2.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side2) + b"\n") while not self._transit_server._active_connections: yield wait() # wait for the server to see the connection self.assertEqual(len(self._transit_server._pending_requests), 0) self.assertEqual(self._usage, []) # no events yet a1.transport.write(b"\x00" * 13) yield a2.waitForBytes(13) a2.transport.write(b"\xff" * 7) yield a1.waitForBytes(7) a1.transport.loseConnection() yield a1._disconnect while self._transit_server._active_connections: yield wait() yield a2._disconnect self.assertEqual(len(self._usage), 1, self._usage) (started, result, total_bytes, total_time, waiting_time) = self._usage[0] self.assertEqual(result, "happy", self._usage) self.assertEqual(total_bytes, 20) self.assertNotIdentical(waiting_time, None) @defer.inlineCallbacks def test_redundant(self): ep = clientFromString(reactor, self.transit) a1a = yield connectProtocol(ep, Accumulator()) a1b = yield connectProtocol(ep, Accumulator()) a1c = yield connectProtocol(ep, Accumulator()) a2 = yield connectProtocol(ep, Accumulator()) token1 = b"\x00"*32 side1 = b"\x01"*8 side2 = b"\x02"*8 a1a.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side1) + b"\n") def count_requests(): return sum([len(v) for v in self._transit_server._pending_requests.values()]) while count_requests() < 1: yield wait() a1b.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side1) + b"\n") while count_requests() < 2: yield wait() # connect and disconnect a third client (for side1) to exercise the # code that removes a pending connection without removing the entire # token a1c.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side1) + b"\n") while count_requests() < 3: yield wait() a1c.transport.loseConnection() yield a1c._disconnect while count_requests() > 2: yield wait() self.assertEqual(len(self._usage), 1, self._usage) (started, result, total_bytes, total_time, waiting_time) = self._usage[0] self.assertEqual(result, "lonely", self._usage) a2.transport.write(b"please relay " + hexlify(token1) + b" for side " + hexlify(side2) + b"\n") # this will claim one of (a1a, a1b), and close the other as redundant while not self._transit_server._active_connections: yield wait() # wait for the server to see the connection self.assertEqual(count_requests(), 0) self.assertEqual(len(self._usage), 2, self._usage) (started, result, total_bytes, total_time, waiting_time) = self._usage[1] self.assertEqual(result, "redundant", self._usage) # one of the these is unecessary, but probably harmless a1a.transport.loseConnection() a1b.transport.loseConnection() yield a1a._disconnect yield a1b._disconnect while self._transit_server._active_connections: yield wait() yield a2._disconnect self.assertEqual(len(self._usage), 3, self._usage) (started, result, total_bytes, total_time, waiting_time) = self._usage[2] self.assertEqual(result, "happy", self._usage) magic-wormhole-transit-relay-0.2.1/src/wormhole_transit_relay/transit_server.py000066400000000000000000000403051355461071100302340ustar00rootroot00000000000000from __future__ import print_function, unicode_literals import re, time, json from collections import defaultdict from twisted.python import log from twisted.internet import protocol from .database import get_db SECONDS = 1.0 MINUTE = 60*SECONDS HOUR = 60*MINUTE DAY = 24*HOUR MB = 1000*1000 def round_to(size, coarseness): return int(coarseness*(1+int((size-1)/coarseness))) def blur_size(size): if size == 0: return 0 if size < 1e6: return round_to(size, 10e3) if size < 1e9: return round_to(size, 1e6) return round_to(size, 100e6) class TransitConnection(protocol.Protocol): def __init__(self): self._got_token = False self._got_side = False self._token_buffer = b"" self._sent_ok = False self._mood = None self._buddy = None self._total_sent = 0 def describeToken(self): d = "-" if self._got_token: d = self._got_token[:16].decode("ascii") if self._got_side: d += "-" + self._got_side.decode("ascii") else: d += "-" return d def connectionMade(self): self._started = time.time() self._log_requests = self.factory._log_requests self.transport.setTcpKeepAlive(True) def dataReceived(self, data): if self._sent_ok: # We are an IPushProducer to our buddy's IConsumer, so they'll # throttle us (by calling pauseProducing()) when their outbound # buffer is full (e.g. when their downstream pipe is full). In # practice, this buffers about 10MB per connection, after which # point the sender will only transmit data as fast as the # receiver can handle it. self._total_sent += len(data) self._buddy.transport.write(data) return if self._got_token: # but not yet sent_ok self.transport.write(b"impatient\n") if self._log_requests: log.msg("transit impatience failure") return self.disconnect_error() # impatience yields failure # else this should be (part of) the token self._token_buffer += data buf = self._token_buffer # old: "please relay {64}\n" # new: "please relay {64} for side {16}\n" (old, handshake_len, token) = self._check_old_handshake(buf) assert old in ("yes", "waiting", "no") if old == "yes": # remember they aren't supposed to send anything past their # handshake until we've said go if len(buf) > handshake_len: self.transport.write(b"impatient\n") if self._log_requests: log.msg("transit impatience failure") return self.disconnect_error() # impatience yields failure return self._got_handshake(token, None) (new, handshake_len, token, side) = self._check_new_handshake(buf) assert new in ("yes", "waiting", "no") if new == "yes": if len(buf) > handshake_len: self.transport.write(b"impatient\n") if self._log_requests: log.msg("transit impatience failure") return self.disconnect_error() # impatience yields failure return self._got_handshake(token, side) if (old == "no" and new == "no"): self.transport.write(b"bad handshake\n") if self._log_requests: log.msg("transit handshake failure") return self.disconnect_error() # incorrectness yields failure # else we'll keep waiting def _check_old_handshake(self, buf): # old: "please relay {64}\n" # return ("yes", handshake, token) if buf contains an old-style handshake # return ("waiting", None, None) if it might eventually contain one # return ("no", None, None) if it could never contain one wanted = len("please relay \n")+32*2 if len(buf) < wanted-1 and b"\n" in buf: return ("no", None, None) if len(buf) < wanted: return ("waiting", None, None) mo = re.search(br"^please relay (\w{64})\n", buf, re.M) if mo: token = mo.group(1) return ("yes", wanted, token) return ("no", None, None) def _check_new_handshake(self, buf): # new: "please relay {64} for side {16}\n" wanted = len("please relay for side \n")+32*2+8*2 if len(buf) < wanted-1 and b"\n" in buf: return ("no", None, None, None) if len(buf) < wanted: return ("waiting", None, None, None) mo = re.search(br"^please relay (\w{64}) for side (\w{16})\n", buf, re.M) if mo: token = mo.group(1) side = mo.group(2) return ("yes", wanted, token, side) return ("no", None, None, None) def _got_handshake(self, token, side): self._got_token = token self._got_side = side self._mood = "lonely" # until buddy connects self.factory.connection_got_token(token, side, self) def buddy_connected(self, them): self._buddy = them self._mood = "happy" self.transport.write(b"ok\n") self._sent_ok = True # Connect the two as a producer/consumer pair. We use streaming=True, # so this expects the IPushProducer interface, and uses # pauseProducing() to throttle, and resumeProducing() to unthrottle. self._buddy.transport.registerProducer(self.transport, True) # The Transit object calls buddy_connected() on both protocols, so # there will be two producer/consumer pairs. def buddy_disconnected(self): if self._log_requests: log.msg("buddy_disconnected %s" % self.describeToken()) self._buddy = None self._mood = "jilted" self.transport.loseConnection() def disconnect_error(self): # we haven't finished the handshake, so there are no tokens tracking # us self._mood = "errory" self.transport.loseConnection() if self.factory._debug_log: log.msg("transitFailed %r" % self) def disconnect_redundant(self): # this is called if a buddy connected and we were found unnecessary. # Any token-tracking cleanup will have been done before we're called. self._mood = "redundant" self.transport.loseConnection() def connectionLost(self, reason): finished = time.time() total_time = finished - self._started # Record usage. There are seven cases: # * n1: the handshake failed, not a real client (errory) # * n2: real client disconnected before any buddy appeared (lonely) # * n3: real client closed as redundant after buddy appears (redundant) # * n4: real client connected first, buddy closes first (jilted) # * n5: real client connected first, buddy close last (happy) # * n6: real client connected last, buddy closes first (jilted) # * n7: real client connected last, buddy closes last (happy) # * non-connected clients (1,2,3) always write a usage record # * for connected clients, whoever disconnects first gets to write the # usage record (5, 7). The last disconnect doesn't write a record. if self._mood == "errory": # 1 assert not self._buddy self.factory.recordUsage(self._started, "errory", 0, total_time, None) elif self._mood == "redundant": # 3 assert not self._buddy self.factory.recordUsage(self._started, "redundant", 0, total_time, None) elif self._mood == "jilted": # 4 or 6 # we were connected, but our buddy hung up on us. They record the # usage event, we do not pass elif self._mood == "lonely": # 2 assert not self._buddy self.factory.recordUsage(self._started, "lonely", 0, total_time, None) else: # 5 or 7 # we were connected, we hung up first. We record the event. assert self._mood == "happy", self._mood # TODO: mood==None assert self._buddy starts = [self._started, self._buddy._started] total_time = finished - min(starts) waiting_time = max(starts) - min(starts) total_bytes = self._total_sent + self._buddy._total_sent self.factory.recordUsage(self._started, "happy", total_bytes, total_time, waiting_time) if self._buddy: self._buddy.buddy_disconnected() self.factory.transitFinished(self, self._got_token, self._got_side, self.describeToken()) class Transit(protocol.ServerFactory): # I manage pairs of simultaneous connections to a secondary TCP port, # both forwarded to the other. Clients must begin each connection with # "please relay TOKEN for SIDE\n" (or a legacy form without the "for # SIDE"). Two connections match if they use the same TOKEN and have # different SIDEs (the redundant connections are dropped when a match is # made). Legacy connections match any with the same TOKEN, ignoring SIDE # (so two legacy connections will match each other). # I will send "ok\n" when the matching connection is established, or # disconnect if no matching connection is made within MAX_WAIT_TIME # seconds. I will disconnect if you send data before the "ok\n". All data # you get after the "ok\n" will be from the other side. You will not # receive "ok\n" until the other side has also connected and submitted a # matching token (and differing SIDE). # In addition, the connections will be dropped after MAXLENGTH bytes have # been sent by either side, or MAXTIME seconds have elapsed after the # matching connections were established. A future API will reveal these # limits to clients instead of causing mysterious spontaneous failures. # These relay connections are not half-closeable (unlike full TCP # connections, applications will not receive any data after half-closing # their outgoing side). Applications must negotiate shutdown with their # peer and not close the connection until all data has finished # transferring in both directions. Applications which only need to send # data in one direction can use close() as usual. MAX_WAIT_TIME = 30*SECONDS MAXLENGTH = 10*MB MAXTIME = 60*SECONDS protocol = TransitConnection def __init__(self, blur_usage, log_file, usage_db): self._blur_usage = blur_usage self._log_requests = blur_usage is None if self._blur_usage: log.msg("blurring access times to %d seconds" % self._blur_usage) log.msg("not logging Transit connections to Twisted log") else: log.msg("not blurring access times") self._debug_log = False self._log_file = log_file self._db = None if usage_db: self._db = get_db(usage_db) self._rebooted = time.time() # we don't track TransitConnections until they submit a token self._pending_requests = defaultdict(set) # token -> set((side, TransitConnection)) self._active_connections = set() # TransitConnection def connection_got_token(self, token, new_side, new_tc): potentials = self._pending_requests[token] for old in potentials: (old_side, old_tc) = old if ((old_side is None) or (new_side is None) or (old_side != new_side)): # we found a match if self._debug_log: log.msg("transit relay 2: %s" % new_tc.describeToken()) # drop and stop tracking the rest potentials.remove(old) for (_, leftover_tc) in potentials: # Don't record this as errory. It's just a spare connection # from the same side as a connection that got used. This # can happen if the connection hint contains multiple # addresses (we don't currently support those, but it'd # probably be useful in the future). leftover_tc.disconnect_redundant() self._pending_requests.pop(token) # glue the two ends together self._active_connections.add(new_tc) self._active_connections.add(old_tc) new_tc.buddy_connected(old_tc) old_tc.buddy_connected(new_tc) return if self._debug_log: log.msg("transit relay 1: %s" % new_tc.describeToken()) potentials.add((new_side, new_tc)) # TODO: timer def transitFinished(self, tc, token, side, description): if token in self._pending_requests: side_tc = (side, tc) self._pending_requests[token].discard(side_tc) if not self._pending_requests[token]: # set is now empty del self._pending_requests[token] if self._debug_log: log.msg("transitFinished %s" % (description,)) self._active_connections.discard(tc) # we could update the usage database "current" row immediately, or wait # until the 5-minute timer updates it. If we update it now, just after # losing a connection, we should probably also update it just after # establishing one (at the end of connection_got_token). For now I'm # going to omit these, but maybe someday we'll turn them both on. The # consequence is that a manual execution of the munin scripts ("munin # run wormhole_transit_active") will give the wrong value just after a # connect/disconnect event. Actual munin graphs should accurately # report connections that last longer than the 5-minute sampling # window, which is what we actually care about. #self.timerUpdateStats() def recordUsage(self, started, result, total_bytes, total_time, waiting_time): if self._debug_log: log.msg(format="Transit.recordUsage {bytes}B", bytes=total_bytes) if self._blur_usage: started = self._blur_usage * (started // self._blur_usage) total_bytes = blur_size(total_bytes) if self._log_file is not None: data = {"started": started, "total_time": total_time, "waiting_time": waiting_time, "total_bytes": total_bytes, "mood": result, } self._log_file.write(json.dumps(data)+"\n") self._log_file.flush() if self._db: self._db.execute("INSERT INTO `usage`" " (`started`, `total_time`, `waiting_time`," " `total_bytes`, `result`)" " VALUES (?,?,?, ?,?)", (started, total_time, waiting_time, total_bytes, result)) self._update_stats() self._db.commit() def timerUpdateStats(self): if self._db: self._update_stats() self._db.commit() def _update_stats(self): # current status: should be zero when idle rebooted = self._rebooted updated = time.time() connected = len(self._active_connections) / 2 # TODO: when a connection is half-closed, len(active) will be odd. a # moment later (hopefully) the other side will disconnect, but # _update_stats isn't updated until later. waiting = len(self._pending_requests) # "waiting" doesn't count multiple parallel connections from the same # side incomplete_bytes = sum(tc._total_sent for tc in self._active_connections) self._db.execute("DELETE FROM `current`") self._db.execute("INSERT INTO `current`" " (`rebooted`, `updated`, `connected`, `waiting`," " `incomplete_bytes`)" " VALUES (?, ?, ?, ?, ?)", (rebooted, updated, connected, waiting, incomplete_bytes)) magic-wormhole-transit-relay-0.2.1/tox.ini000066400000000000000000000012731355461071100205410ustar00rootroot00000000000000# Tox (http://tox.testrun.org/) is a tool for running tests # in multiple virtualenvs. This configuration file will run the # test suite on all supported python versions. To use it, "pip install tox" # and then run "tox" from this directory. [tox] envlist = {py27,py35,py36,py37,pypy} skip_missing_interpreters = True minversion = 2.4.0 [testenv] usedevelop = True extras = dev deps = pyflakes >= 1.2.3 commands = pyflakes setup.py src python -m twisted.trial {posargs:wormhole_transit_relay} [testenv:coverage] deps = pyflakes >= 1.2.3 coverage commands = pyflakes setup.py src coverage run --branch -m twisted.trial {posargs:wormhole_transit_relay} coverage xml magic-wormhole-transit-relay-0.2.1/versioneer.py000066400000000000000000002060031355461071100217570ustar00rootroot00000000000000 # Version: 0.18 """The Versioneer - like a rocketeer, but for versions. The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/warner/python-versioneer * Brian Warner * License: Public Domain * Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy * [![Latest Version] (https://pypip.in/version/versioneer/badge.svg?style=flat) ](https://pypi.python.org/pypi/versioneer/) * [![Build Status] (https://travis-ci.org/warner/python-versioneer.png?branch=master) ](https://travis-ci.org/warner/python-versioneer) This is a tool for managing a recorded version number in distutils-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install * `pip install versioneer` to somewhere to your $PATH * add a `[versioneer]` section to your setup.cfg (see below) * run `versioneer install` in your source tree, commit the results ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes. The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation See [INSTALL.md](./INSTALL.md) for detailed installation instructions. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the commit date in ISO 8601 format. This will be None if the date is not available. * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See [details.md](details.md) in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Known Limitations Some situations are known to cause problems for Versioneer. This details the most significant ones. More can be found on Github [issues page](https://github.com/warner/python-versioneer/issues). ### Subprojects Versioneer has limited support for source trees in which `setup.py` is not in the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are two common reasons why `setup.py` might not be in the root: * Source trees which contain multiple subprojects, such as [Buildbot](https://github.com/buildbot/buildbot), which contains both "master" and "slave" subprojects, each with their own `setup.py`, `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI distributions (and upload multiple independently-installable tarballs). * Source trees whose main purpose is to contain a C library, but which also provide bindings to Python (and perhaps other langauges) in subdirectories. Versioneer will look for `.git` in parent directories, and most operations should get the right version string. However `pip` and `setuptools` have bugs and implementation details which frequently cause `pip install .` from a subproject directory to fail to find a correct version string (so it usually defaults to `0+unknown`). `pip install --editable .` should work correctly. `setup.py install` might work too. Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in some later version. [Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking this issue. The discussion in [PR #61](https://github.com/warner/python-versioneer/pull/61) describes the issue from the Versioneer side in more detail. [pip PR#3176](https://github.com/pypa/pip/pull/3176) and [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve pip to let Versioneer work correctly. Versioneer-0.16 and earlier only looked for a `.git` directory next to the `setup.cfg`, so subprojects were completely unsupported with those releases. ### Editable installs with setuptools <= 18.5 `setup.py develop` and `pip install --editable .` allow you to install a project into a virtualenv once, then continue editing the source code (and test) without re-installing after every change. "Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a convenient way to specify executable scripts that should be installed along with the python package. These both work as expected when using modern setuptools. When using setuptools-18.5 or earlier, however, certain operations will cause `pkg_resources.DistributionNotFound` errors when running the entrypoint script, which must be resolved by re-installing the package. This happens when the install happens with one version, then the egg_info data is regenerated while a different version is checked out. Many setup.py commands cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into a different virtualenv), so this can be surprising. [Bug #83](https://github.com/warner/python-versioneer/issues/83) describes this one, but upgrading to a newer version of setuptools should probably resolve it. ### Unicode version strings While Versioneer works (and is continually tested) with both Python 2 and Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. Newer releases probably generate unicode version strings on py2. It's not clear that this is wrong, but it may be surprising for applications when then write these strings to a network connection or include them in bytes-oriented APIs like cryptographic checksums. [Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates this question. ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg`, if necessary, to include any new configuration settings indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. * re-run `versioneer install` in your source tree, to replace `SRC/_version.py` * commit any changed files ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. Specifically, both are released under the Creative Commons "Public Domain Dedication" license (CC0-1.0), as described in https://creativecommons.org/publicdomain/zero/1.0/ . """ from __future__ import print_function try: import configparser except ImportError: import ConfigParser as configparser import errno import json import os import re import subprocess import sys class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_root(): """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. me = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(me)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(me), versioneer_py)) except NameError: pass return root def get_config_from_root(root): """Read the project setup.cfg file to determine Versioneer config.""" # This might raise EnvironmentError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.SafeConfigParser() with open(setup_cfg, "r") as f: parser.readfp(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" cfg.versionfile_source = get(parser, "versionfile_source") cfg.versionfile_build = get(parser, "versionfile_build") cfg.tag_prefix = get(parser, "tag_prefix") if cfg.tag_prefix in ("''", '""'): cfg.tag_prefix = "" cfg.parentdir_prefix = get(parser, "parentdir_prefix") cfg.verbose = get(parser, "verbose") return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode LONG_VERSION_PY['git'] = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%%s*" %% tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%%d" %% pieces["distance"] else: # exception #1 rendered = "0.post.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def do_vcs_install(manifest_in, versionfile_source, ipy): """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: me = __file__ if me.endswith(".pyc") or me.endswith(".pyo"): me = os.path.splitext(me)[0] + ".py" versioneer_file = os.path.relpath(me) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: f = open(".gitattributes", "r") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() except EnvironmentError: pass if not present: f = open(".gitattributes", "a+") f.write("%s export-subst\n" % versionfile_source) f.close() files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.18) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename): """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" def get_versions(verbose=False): """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. """ if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} def get_version(): """Get the short version string for this project.""" return get_versions()["version"] def get_cmdclass(): """Get the custom setuptools/distutils subclasses used by Versioneer.""" if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/warner/python-versioneer/issues/52 cmds = {} # we add "version" to both distutils and setuptools from distutils.core import Command class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? # pip install: # copies source tree to a tempdir before running egg_info/etc # if .git isn't copied too, 'git describe' will fail # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? # we override different "build_py" commands for both environments if "setuptools" in sys.modules: from setuptools.command.build_py import build_py as _build_py else: from distutils.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION # "product_version": versioneer.get_version(), # ... class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] if 'py2exe' in sys.modules: # py2exe enabled? try: from py2exe.distutils_buildexe import py2exe as _py2exe # py3 except ImportError: from py2exe.build_exe import py2exe as _py2exe # py2 class cmd_py2exe(_py2exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _py2exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments if "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist else: from distutils.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ INIT_PY_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ def do_setup(): """Main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except EnvironmentError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(INIT_PY_SNIPPET) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) ipy = None # Make sure both the top-level "versioneer.py" and versionfile_source # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so # they'll be copied into source distributions. Pip won't be able to # install the package without this. manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: with open(manifest_in, "r") as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) except EnvironmentError: pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so # it might give some false negatives. Appending redundant 'include' # lines is safe, though. if "versioneer.py" not in simple_includes: print(" appending 'versioneer.py' to MANIFEST.in") with open(manifest_in, "a") as f: f.write("include versioneer.py\n") else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: print(" versionfile_source already in MANIFEST.in") # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. do_vcs_install(manifest_in, cfg.versionfile_source, ipy) return 0 def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": errors = do_setup() errors += scan_setup_py() if errors: sys.exit(1)