stem-1.8.0/ 0000775 0001750 0001750 00000000000 13602232262 013210 5 ustar atagar atagar 0000000 0000000 stem-1.8.0/cache_manual.py 0000775 0001750 0001750 00000004034 13501272761 016174 0 ustar atagar atagar 0000000 0000000 #!/usr/bin/env python
# Copyright 2015-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Caches tor's latest manual content. Run this to pick new man page changes.
"""
import re
import sys
import stem.manual
import stem.util.system
try:
# account for urllib's change between python 2.x and 3.x
import urllib.request as urllib
except ImportError:
import urllib2 as urllib
GITWEB_MAN_LOG = 'https://gitweb.torproject.org/tor.git/log/doc/tor.1.txt'
MAN_LOG_LINK = "href='/tor.git/commit/doc/tor.1.txt\\?id=([^']*)'"
if __name__ == '__main__':
try:
man_log_page = urllib.urlopen(GITWEB_MAN_LOG).read()
man_commit = re.search(MAN_LOG_LINK, man_log_page).group(1)
except:
print("Unable to determine the latest commit to edit tor's man page: %s" % sys.exc_info()[1])
sys.exit(1)
try:
stem_commit = stem.util.system.call('git rev-parse HEAD')[0]
except IOError as exc:
print("Unable to determine stem's current commit: %s" % exc)
sys.exit(1)
print('Latest tor commit editing man page: %s' % man_commit)
print('Current stem commit: %s' % stem_commit)
print('')
try:
cached_manual = stem.manual.Manual.from_cache()
db_schema = cached_manual.schema
except stem.manual.SchemaMismatch as exc:
cached_manual, db_schema = None, exc.database_schema
except IOError:
cached_manual, db_schema = None, None # local copy has been deleted
if db_schema != stem.manual.SCHEMA_VERSION:
print('Cached database schema is out of date (was %s, but current version is %s)' % (db_schema, stem.manual.SCHEMA_VERSION))
cached_manual = None
latest_manual = stem.manual.Manual.from_remote()
if cached_manual:
if cached_manual == latest_manual:
print('Manual information is already up to date, nothing to do.')
sys.exit(0)
print('Differences detected...\n')
print(stem.manual._manual_differences(cached_manual, latest_manual))
latest_manual.man_commit = man_commit
latest_manual.stem_commit = stem_commit
latest_manual.save(stem.manual.CACHE_PATH)
stem-1.8.0/README.md 0000664 0001750 0001750 00000000637 13501272761 014503 0 ustar atagar atagar 0000000 0000000 ## Stem (Python Tor Library)
Stem is a Python controller library for **[Tor](https://www.torproject.org/)**. With it you can use Tor's [control protocol](https://gitweb.torproject.org/torspec.git/tree/control-spec.txt) to script against the Tor process, or build things such as [Nyx](https://nyx.torproject.org/).
Documentation and tutorials available at **[stem.torproject.org](https://stem.torproject.org/)**.
stem-1.8.0/cache_fallback_directories.py 0000775 0001750 0001750 00000003632 13526633163 021061 0 ustar atagar atagar 0000000 0000000 #!/usr/bin/env python
# Copyright 2016-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Caches tor's latest fallback directories.
"""
import re
import sys
import stem.directory
import stem.util.system
try:
# account for urllib's change between python 2.x and 3.x
import urllib.request as urllib
except ImportError:
import urllib2 as urllib
GITWEB_FALLBACK_LOG = 'https://gitweb.torproject.org/tor.git/log/src/app/config/fallback_dirs.inc'
FALLBACK_DIR_LINK = "href='/tor.git/commit/src/app/config/fallback_dirs.inc\\?id=([^']*)'"
if __name__ == '__main__':
try:
fallback_dir_page = urllib.urlopen(GITWEB_FALLBACK_LOG).read()
fallback_dir_commit = re.search(FALLBACK_DIR_LINK, fallback_dir_page).group(1)
except:
print("Unable to determine the latest commit to edit tor's fallback directories: %s" % sys.exc_info()[1])
sys.exit(1)
try:
stem_commit = stem.util.system.call('git rev-parse HEAD')[0]
except IOError as exc:
print("Unable to determine stem's current commit: %s" % exc)
sys.exit(1)
print('Latest tor commit editing fallback directories: %s' % fallback_dir_commit)
print('Current stem commit: %s' % stem_commit)
print('')
cached_fallback_directories = stem.directory.Fallback.from_cache()
latest_fallback_directories = stem.directory.Fallback.from_remote()
if cached_fallback_directories == latest_fallback_directories:
print('Fallback directories are already up to date, nothing to do.')
sys.exit(0)
# all fallbacks have the same header metadata, so just picking one
headers = latest_fallback_directories.values()[0].header if latest_fallback_directories else None
print('Differences detected...\n')
print(stem.directory._fallback_directory_differences(cached_fallback_directories, latest_fallback_directories))
stem.directory.Fallback._write(latest_fallback_directories, fallback_dir_commit, stem_commit, headers)
stem-1.8.0/setup.py 0000664 0001750 0001750 00000012522 13501272761 014732 0 ustar atagar atagar 0000000 0000000 #!/usr/bin/env python
# Copyright 2012-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
#
# Release Checklist
# =================
#
# * Recache latest information (cache_manual.py and cache_fallback_directories.py)
#
# * Test with python2.6, python2.7, python3, and pypy.
# |- If using tox run...
# |
# | % tox -- --all --target RUN_ALL,ONLINE
# |
# | Otherwise, for each interpreter run...
# |
# | % [python_interpreter] run_tests.py --all --target RUN_ALL,ONLINE
# |
# |- Pypy test instructions for ubuntu are...
# |
# | % sudo apt-get install pypy
# | % wget https://bootstrap.pypa.io/get-pip.py
# | % pypy get-pip.py --user
# | % ~/.local/bin/pip install mock pycodestyle pyflakes --user
# | % pypy ./run_tests.py --all
# |
# +- Some version of python 3.x should be available in your platform's
# repositories. To test against a specific version on ubuntu try the
# following. In this example, Python 3.7...
#
# % sudo apt-get install build-essential python-dev python-setuptools python-pip python-smbus
# % sudo apt-get install libncursesw5-dev libgdbm-dev libc6-dev
# % sudo apt-get install zlib1g-dev libsqlite3-dev tk-dev
# % sudo apt-get install libssl-dev openssl libffi-dev
#
# % wget https://www.python.org/ftp/python/3.7.0/Python-3.7.0.tgz
# % tar -xzf Python-3.7.0.tgz
# % mv Python-3.7.0 ~
#
# % cd ~/Python-3.7.0
# % ./configure
# % make
#
# % cd /path/to/stem
# % ~/Python-3.7.0/python ./run_tests.py --all
#
# * Tag the release
# |- Bump stem's version (in stem/__init__.py and docs/index.rst).
# |- git commit -a -m "Stem release 1.0.0"
# |- git tag -u 9ABBEEC6 -m "stem release 1.0.0" 1.0.0 d0bb81a
# +- git push --tags
#
# * Dry-run release on https://pypi.org/project/stem/
# |- python setup.py sdist --dryrun
# |- gpg --detach-sig --armor dist/stem-dry-run-1.0.0.tar.gz
# |- twine upload dist/*
# +- Check that https://pypi.org/project/stem-dry-run/ looks correct, comparing it to https://pypi.org/project/stem/
# +- Don't worry about the 'Bug Tracker' being missing. That's an attribute of the project itself.
#
# * Final release
# |- rm dist/*
# |- python setup.py sdist
# |- gpg --detach-sig --armor dist/stem-1.0.0.tar.gz
# +- twine upload dist/*
#
# * Contact package maintainers
# * Announce the release (example: https://blog.torproject.org/blog/stem-release-11)
import distutils.core
import os
import sys
import stem
if '--dryrun' in sys.argv:
DRY_RUN = True
sys.argv.remove('--dryrun')
else:
DRY_RUN = False
SUMMARY = 'Stem is a Python controller library that allows applications to interact with Tor (https://www.torproject.org/).'
DRY_RUN_SUMMARY = 'Ignore this package. This is dry-run release creation to work around PyPI limitations (https://github.com/pypa/packaging-problems/issues/74#issuecomment-260716129).'
DESCRIPTION = """
For tutorials and API documentation see `Stem's homepage `_.
Quick Start
-----------
To install you can either use...
::
pip install stem
... or install from the source tarball. Stem supports both the python 2.x and 3.x series. To use its python3 counterpart you simply need to install using that version of python.
::
python3 setup.py install
After that, give some `tutorials `_ a try! For questions or to discuss project ideas we're available on `irc `_ and the `tor-dev@ email list `_.
""".strip()
MANIFEST = """
include cache_fallback_directories.py
include cache_manual.py
include LICENSE
include README.md
include MANIFEST.in
include requirements.txt
include run_tests.py
include tox.ini
graft docs
graft test
global-exclude __pycache__
global-exclude *.orig
global-exclude *.pyc
global-exclude *.swp
global-exclude *.swo
global-exclude .tox
global-exclude *~
recursive-exclude test/data *
recursive-exclude docs/_build *
""".strip()
# installation requires us to be in our setup.py's directory
os.chdir(os.path.dirname(os.path.abspath(__file__)))
with open('MANIFEST.in', 'w') as manifest_file:
manifest_file.write(MANIFEST)
try:
distutils.core.setup(
name = 'stem-dry-run' if DRY_RUN else 'stem',
version = stem.__version__,
description = DRY_RUN_SUMMARY if DRY_RUN else SUMMARY,
long_description = DESCRIPTION,
license = stem.__license__,
author = stem.__author__,
author_email = stem.__contact__,
url = stem.__url__,
packages = ['stem', 'stem.client', 'stem.descriptor', 'stem.interpreter', 'stem.response', 'stem.util'],
keywords = 'tor onion controller',
scripts = ['tor-prompt'],
package_data = {
'stem': ['cached_fallbacks.cfg', 'cached_manual.sqlite', 'settings.cfg'],
'stem.interpreter': ['settings.cfg'],
'stem.util': ['ports.cfg'],
}, classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Topic :: Security',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
finally:
if os.path.exists('MANIFEST.in'):
os.remove('MANIFEST.in')
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
stem-1.8.0/stem/ 0000775 0001750 0001750 00000000000 13602232262 014160 5 ustar atagar atagar 0000000 0000000 stem-1.8.0/stem/process.py 0000664 0001750 0001750 00000023530 13501272761 016221 0 ustar atagar atagar 0000000 0000000 # Copyright 2011-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Helper functions for working with tor as a process.
:NO_TORRC:
when provided as a torrc_path tor is ran with a blank configuration
:DEFAULT_INIT_TIMEOUT:
number of seconds before we time out our attempt to start a tor instance
**Module Overview:**
::
launch_tor - starts up a tor process
launch_tor_with_config - starts a tor process with a custom torrc
"""
import os
import re
import signal
import subprocess
import tempfile
import threading
import stem.prereq
import stem.util.str_tools
import stem.util.system
import stem.version
NO_TORRC = ''
DEFAULT_INIT_TIMEOUT = 90
def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False, close_output = True, stdin = None):
"""
Initializes a tor process. This blocks until initialization completes or we
error out.
If tor's data directory is missing or stale then bootstrapping will include
making several requests to the directory authorities which can take a little
while. Usually this is done in 50 seconds or so, but occasionally calls seem
to get stuck, taking well over the default timeout.
**To work to must log at NOTICE runlevel to stdout.** It does this by
default, but if you have a 'Log' entry in your torrc then you'll also need
'Log NOTICE stdout'.
Note: The timeout argument does not work on Windows or when outside the
main thread, and relies on the global state of the signal module.
.. versionchanged:: 1.6.0
Allowing the timeout argument to be a float.
.. versionchanged:: 1.7.0
Added the **close_output** argument.
:param str tor_cmd: command for starting tor
:param list args: additional arguments for tor
:param str torrc_path: location of the torrc for us to use
:param int completion_percent: percent of bootstrap completion at which
this'll return
:param functor init_msg_handler: optional functor that will be provided with
tor's initialization stdout as we get it
:param int timeout: time after which the attempt to start tor is aborted, no
timeouts are applied if **None**
:param bool take_ownership: asserts ownership over the tor process so it
aborts if this python process terminates or a :class:`~stem.control.Controller`
we establish to it disconnects
:param bool close_output: closes tor's stdout and stderr streams when
bootstrapping is complete if true
:param str stdin: content to provide on stdin
:returns: **subprocess.Popen** instance for the tor subprocess
:raises: **OSError** if we either fail to create the tor process or reached a
timeout without success
"""
if stem.util.system.is_windows():
if timeout is not None and timeout != DEFAULT_INIT_TIMEOUT:
raise OSError('You cannot launch tor with a timeout on Windows')
timeout = None
elif threading.current_thread().__class__.__name__ != '_MainThread':
if timeout is not None and timeout != DEFAULT_INIT_TIMEOUT:
raise OSError('Launching tor with a timeout can only be done in the main thread')
timeout = None
# sanity check that we got a tor binary
if os.path.sep in tor_cmd:
# got a path (either relative or absolute), check what it leads to
if os.path.isdir(tor_cmd):
raise OSError("'%s' is a directory, not the tor executable" % tor_cmd)
elif not os.path.isfile(tor_cmd):
raise OSError("'%s' doesn't exist" % tor_cmd)
elif not stem.util.system.is_available(tor_cmd):
raise OSError("'%s' isn't available on your system. Maybe it's not in your PATH?" % tor_cmd)
# double check that we have a torrc to work with
if torrc_path not in (None, NO_TORRC) and not os.path.exists(torrc_path):
raise OSError("torrc doesn't exist (%s)" % torrc_path)
# starts a tor subprocess, raising an OSError if it fails
runtime_args, temp_file = [tor_cmd], None
if args:
runtime_args += args
if torrc_path:
if torrc_path == NO_TORRC:
temp_file = tempfile.mkstemp(prefix = 'empty-torrc-', text = True)[1]
runtime_args += ['-f', temp_file]
else:
runtime_args += ['-f', torrc_path]
if take_ownership:
runtime_args += ['__OwningControllerProcess', str(os.getpid())]
tor_process = None
try:
tor_process = subprocess.Popen(runtime_args, stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.PIPE)
if stdin:
tor_process.stdin.write(stem.util.str_tools._to_bytes(stdin))
tor_process.stdin.close()
if timeout:
def timeout_handler(signum, frame):
raise OSError('reached a %i second timeout without success' % timeout)
signal.signal(signal.SIGALRM, timeout_handler)
signal.setitimer(signal.ITIMER_REAL, timeout)
bootstrap_line = re.compile('Bootstrapped ([0-9]+)%')
problem_line = re.compile('\\[(warn|err)\\] (.*)$')
last_problem = 'Timed out'
while True:
# Tor's stdout will be read as ASCII bytes. This is fine for python 2, but
# in python 3 that means it'll mismatch with other operations (for instance
# the bootstrap_line.search() call later will fail).
#
# It seems like python 2.x is perfectly happy for this to be unicode, so
# normalizing to that.
init_line = tor_process.stdout.readline().decode('utf-8', 'replace').strip()
# this will provide empty results if the process is terminated
if not init_line:
raise OSError('Process terminated: %s' % last_problem)
# provide the caller with the initialization message if they want it
if init_msg_handler:
init_msg_handler(init_line)
# return the process if we're done with bootstrapping
bootstrap_match = bootstrap_line.search(init_line)
problem_match = problem_line.search(init_line)
if bootstrap_match and int(bootstrap_match.group(1)) >= completion_percent:
return tor_process
elif problem_match:
runlevel, msg = problem_match.groups()
if 'see warnings above' not in msg:
if ': ' in msg:
msg = msg.split(': ')[-1].strip()
last_problem = msg
except:
if tor_process:
tor_process.kill() # don't leave a lingering process
tor_process.wait()
raise
finally:
if timeout:
signal.alarm(0) # stop alarm
if tor_process and close_output:
if tor_process.stdout:
tor_process.stdout.close()
if tor_process.stderr:
tor_process.stderr.close()
if temp_file:
try:
os.remove(temp_file)
except:
pass
def launch_tor_with_config(config, tor_cmd = 'tor', completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False, close_output = True):
"""
Initializes a tor process, like :func:`~stem.process.launch_tor`, but with a
customized configuration. This writes a temporary torrc to disk, launches
tor, then deletes the torrc.
For example...
::
tor_process = stem.process.launch_tor_with_config(
config = {
'ControlPort': '2778',
'Log': [
'NOTICE stdout',
'ERR file /tmp/tor_error_log',
],
},
)
.. versionchanged:: 1.7.0
Added the **close_output** argument.
:param dict config: configuration options, such as "{'ControlPort': '9051'}",
values can either be a **str** or **list of str** if for multiple values
:param str tor_cmd: command for starting tor
:param int completion_percent: percent of bootstrap completion at which
this'll return
:param functor init_msg_handler: optional functor that will be provided with
tor's initialization stdout as we get it
:param int timeout: time after which the attempt to start tor is aborted, no
timeouts are applied if **None**
:param bool take_ownership: asserts ownership over the tor process so it
aborts if this python process terminates or a :class:`~stem.control.Controller`
we establish to it disconnects
:param bool close_output: closes tor's stdout and stderr streams when
bootstrapping is complete if true
:returns: **subprocess.Popen** instance for the tor subprocess
:raises: **OSError** if we either fail to create the tor process or reached a
timeout without success
"""
# TODO: Drop this version check when tor 0.2.6.3 or higher is the only game
# in town.
try:
use_stdin = stem.version.get_system_tor_version(tor_cmd) >= stem.version.Requirement.TORRC_VIA_STDIN
except IOError:
use_stdin = False
# we need to be sure that we're logging to stdout to figure out when we're
# done bootstrapping
if 'Log' in config:
stdout_options = ['DEBUG stdout', 'INFO stdout', 'NOTICE stdout']
if isinstance(config['Log'], str):
config['Log'] = [config['Log']]
has_stdout = False
for log_config in config['Log']:
if log_config in stdout_options:
has_stdout = True
break
if not has_stdout:
config['Log'].append('NOTICE stdout')
config_str = ''
for key, values in list(config.items()):
if isinstance(values, str):
config_str += '%s %s\n' % (key, values)
else:
for value in values:
config_str += '%s %s\n' % (key, value)
if use_stdin:
return launch_tor(tor_cmd, ['-f', '-'], None, completion_percent, init_msg_handler, timeout, take_ownership, close_output, stdin = config_str)
else:
torrc_descriptor, torrc_path = tempfile.mkstemp(prefix = 'torrc-', text = True)
try:
with open(torrc_path, 'w') as torrc_file:
torrc_file.write(config_str)
# prevents tor from erroring out due to a missing torrc if it gets a sighup
args = ['__ReloadTorrcOnSIGHUP', '0']
return launch_tor(tor_cmd, args, torrc_path, completion_percent, init_msg_handler, timeout, take_ownership)
finally:
try:
os.close(torrc_descriptor)
os.remove(torrc_path)
except:
pass
stem-1.8.0/stem/exit_policy.py 0000664 0001750 0001750 00000105436 13501272761 017101 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Representation of tor exit policies. These can be easily used to check if
exiting to a destination is permissible or not. For instance...
::
>>> from stem.exit_policy import ExitPolicy, MicroExitPolicy
>>> policy = ExitPolicy('accept *:80', 'accept *:443', 'reject *:*')
>>> print(policy)
accept *:80, accept *:443, reject *:*
>>> print(policy.summary())
accept 80, 443
>>> policy.can_exit_to('75.119.206.243', 80)
True
>>> policy = MicroExitPolicy('accept 80,443')
>>> print(policy)
accept 80,443
>>> policy.can_exit_to('75.119.206.243', 80)
True
::
ExitPolicy - Exit policy for a Tor relay
|- MicroExitPolicy - Microdescriptor exit policy
|
|- can_exit_to - check if exiting to this destination is allowed or not
|- is_exiting_allowed - check if any exiting is allowed
|- summary - provides a short label, similar to a microdescriptor
|- has_private - checks if policy has anything expanded from the 'private' keyword
|- strip_private - provides a copy of the policy without 'private' entries
|- has_default - checks if policy ends with the defaultly appended suffix
|- strip_default - provides a copy of the policy without the default suffix
|- __str__ - string representation
+- __iter__ - ExitPolicyRule entries that this contains
ExitPolicyRule - Single rule of an exit policy chain
|- MicroExitPolicyRule - Single rule for a microdescriptor policy
|
|- is_address_wildcard - checks if we'll accept any address
|- is_port_wildcard - checks if we'll accept any port
|- get_address_type - provides the protocol our ip address belongs to
|- is_match - checks if we match a given destination
|- get_mask - provides the address representation of our mask
|- get_masked_bits - provides the bit representation of our mask
|- is_default - flag indicating if this was part of the default end of a policy
|- is_private - flag indicating if this was expanded from a 'private' keyword
+- __str__ - string representation for this rule
get_config_policy - provides the ExitPolicy based on torrc rules
.. data:: AddressType (enum)
Enumerations for IP address types that can be in an exit policy.
============ ===========
AddressType Description
============ ===========
**WILDCARD** any address of either IPv4 or IPv6
**IPv4** IPv4 address
**IPv6** IPv6 address
============ ===========
"""
from __future__ import absolute_import
import re
import socket
import zlib
import stem.prereq
import stem.util
import stem.util.connection
import stem.util.enum
import stem.util.str_tools
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
AddressType = stem.util.enum.Enum(('WILDCARD', 'Wildcard'), ('IPv4', 'IPv4'), ('IPv6', 'IPv6'))
# Addresses aliased by the 'private' policy. From the tor man page...
#
# To specify all internal and link-local networks (including 0.0.0.0/8,
# 169.254.0.0/16, 127.0.0.0/8, 192.168.0.0/16, 10.0.0.0/8, and 172.16.0.0/12),
# you can use the 'private' alias instead of an address.
PRIVATE_ADDRESSES = (
'0.0.0.0/8',
'169.254.0.0/16',
'127.0.0.0/8',
'192.168.0.0/16',
'10.0.0.0/8',
'172.16.0.0/12',
)
def get_config_policy(rules, ip_address = None):
"""
Converts an ExitPolicy found in a torrc to a proper exit pattern. This
accounts for...
* ports being optional
* the 'private' keyword
.. deprecated:: 1.7.0
Tor's torrc parameters lack a formal spec, making it difficult for this
method to be reliable. Callers are encouraged to move to
:func:`~stem.control.Controller.get_exit_policy` instead.
:param str,list rules: comma separated rules or list to be converted
:param str ip_address: this relay's IP address for the 'private' policy if
it's present, this defaults to the local address
:returns: :class:`~stem.exit_policy.ExitPolicy` reflected by the rules
:raises: **ValueError** if input isn't a valid tor exit policy
"""
if ip_address and not (stem.util.connection.is_valid_ipv4_address(ip_address) or stem.util.connection.is_valid_ipv6_address(ip_address, allow_brackets = True)):
raise ValueError("%s isn't a valid IP address" % ip_address)
elif ip_address and stem.util.connection.is_valid_ipv6_address(ip_address, allow_brackets = True) and not (ip_address[0] == '[' and ip_address[-1] == ']'):
ip_address = '[%s]' % ip_address # ExitPolicy validation expects IPv6 addresses to be bracketed
if stem.util._is_str(rules):
rules = rules.split(',')
result = []
for rule in rules:
rule = rule.strip()
if not rule:
continue
if not re.search(':[\\d\\-\\*]+$', rule):
rule = '%s:*' % rule
if 'private' in rule:
acceptance = rule.split(' ', 1)[0]
port = rule.rsplit(':', 1)[1]
addresses = list(PRIVATE_ADDRESSES)
if ip_address:
addresses.append(ip_address)
else:
try:
addresses.append(socket.gethostbyname(socket.gethostname()))
except:
pass # we might not have a network connection
for private_addr in addresses:
result.append(ExitPolicyRule('%s %s:%s' % (acceptance, private_addr, port)))
else:
result.append(ExitPolicyRule(rule))
return ExitPolicy(*result)
def _flag_private_rules(rules):
"""
Determine if part of our policy was expanded from the 'private' keyword. This
doesn't differentiate if this actually came from the 'private' keyword or a
series of rules exactly matching it.
"""
matches = [] # find all possible starting indexes
for i, rule in enumerate(rules):
if i + len(PRIVATE_ADDRESSES) > len(rules):
break
rule_str = '%s/%s' % (rule.address, rule.get_masked_bits())
if rule_str == PRIVATE_ADDRESSES[0]:
matches.append(i)
for start_index in matches:
# To match the private policy the following must all be true...
#
# * series of addresses and bit masks match PRIVATE_ADDRESSES
# * all rules have the same port range
# * all rules have the same acceptance (all accept or reject entries)
#
# The last rule is dynamically based on the relay's public address. It may
# not be present if get_config_policy() created this policy and we couldn't
# resolve our address.
last_index = start_index + len(PRIVATE_ADDRESSES)
rule_set = rules[start_index:last_index]
last_rule = rules[last_index] if len(rules) > last_index else None
is_match = True
min_port, max_port = rule_set[0].min_port, rule_set[0].max_port
is_accept = rule_set[0].is_accept
for i, rule in enumerate(rule_set):
rule_str = '%s/%s' % (rule.address, rule.get_masked_bits())
if rule_str != PRIVATE_ADDRESSES[i] or rule.min_port != min_port or rule.max_port != max_port or rule.is_accept != is_accept:
is_match = False
break
if is_match:
for rule in rule_set:
rule._is_private = True
if last_rule and not last_rule.is_address_wildcard() and last_rule.min_port == min_port and last_rule.max_port == max_port and last_rule.is_accept == is_accept:
last_rule._is_private = True
def _flag_default_rules(rules):
"""
Determine if part of our policy ends with the defaultly appended suffix.
"""
if len(rules) >= len(DEFAULT_POLICY_RULES):
rules_suffix = tuple(rules[-len(DEFAULT_POLICY_RULES):])
if rules_suffix == DEFAULT_POLICY_RULES:
for rule in rules_suffix:
rule._is_default_suffix = True
class ExitPolicy(object):
"""
Policy for the destinations that a relay allows or denies exiting to. This
is, in effect, just a list of :class:`~stem.exit_policy.ExitPolicyRule`
entries.
:param list rules: **str** or :class:`~stem.exit_policy.ExitPolicyRule`
entries that make up this policy
"""
def __init__(self, *rules):
# sanity check the types
for rule in rules:
if not stem.util._is_str(rule) and not isinstance(rule, ExitPolicyRule):
raise TypeError('Exit policy rules can only contain strings or ExitPolicyRules, got a %s (%s)' % (type(rule), rules))
# Unparsed representation of the rules we were constructed with. Our
# _get_rules() method consumes this to provide ExitPolicyRule instances.
# This is lazily evaluated so we don't need to actually parse the exit
# policy if it's never used.
is_all_str = True
for rule in rules:
if not stem.util._is_str(rule):
is_all_str = False
if rules and is_all_str:
byte_rules = [stem.util.str_tools._to_bytes(r) for r in rules]
self._input_rules = zlib.compress(b','.join(byte_rules))
else:
self._input_rules = rules
self._rules = None
self._hash = None
# Result when no rules apply. According to the spec policies default to 'is
# allowed', but our microdescriptor policy subclass might want to change
# this.
self._is_allowed_default = True
@lru_cache()
def can_exit_to(self, address = None, port = None, strict = False):
"""
Checks if this policy allows exiting to a given destination or not. If the
address or port is omitted then this will check if we're allowed to exit to
any instances of the defined address or port.
:param str address: IPv4 or IPv6 address (with or without brackets)
:param int port: port number
:param bool strict: if the address or port is excluded then check if we can
exit to **all** instances of the defined address or port
:returns: **True** if exiting to this destination is allowed, **False** otherwise
"""
if not self.is_exiting_allowed():
return False
for rule in self._get_rules():
if rule.is_match(address, port, strict):
return rule.is_accept
return self._is_allowed_default
@lru_cache()
def is_exiting_allowed(self):
"""
Provides **True** if the policy allows exiting whatsoever, **False**
otherwise.
"""
rejected_ports = set()
for rule in self._get_rules():
if rule.is_accept:
for port in range(rule.min_port, rule.max_port + 1):
if port not in rejected_ports:
return True
elif rule.is_address_wildcard():
if rule.is_port_wildcard():
return False
else:
rejected_ports.update(range(rule.min_port, rule.max_port + 1))
return self._is_allowed_default
@lru_cache()
def summary(self):
"""
Provides a short description of our policy chain, similar to a
microdescriptor. This excludes entries that don't cover all IP
addresses, and is either white-list or blacklist policy based on
the final entry. For instance...
::
>>> policy = ExitPolicy('accept *:80', 'accept *:443', 'reject *:*')
>>> policy.summary()
'accept 80, 443'
>>> policy = ExitPolicy('accept *:443', 'reject *:1-1024', 'accept *:*')
>>> policy.summary()
'reject 1-442, 444-1024'
:returns: **str** with a concise summary for our policy
"""
# determines if we're a white-list or blacklist
is_whitelist = not self._is_allowed_default
for rule in self._get_rules():
if rule.is_address_wildcard() and rule.is_port_wildcard():
is_whitelist = not rule.is_accept
break
# Iterates over the policies and adds the the ports we'll return (ie,
# allows if a white-list and rejects if a blacklist). Regardless of a
# port's allow/reject policy, all further entries with that port are
# ignored since policies respect the first matching policy.
display_ports, skip_ports = [], set()
for rule in self._get_rules():
if not rule.is_address_wildcard():
continue
elif rule.is_port_wildcard():
break
for port in range(rule.min_port, rule.max_port + 1):
if port in skip_ports:
continue
# if accept + white-list or reject + blacklist then add
if rule.is_accept == is_whitelist:
display_ports.append(port)
# all further entries with this port should be ignored
skip_ports.add(port)
# convert port list to a list of ranges (ie, ['1-3'] rather than [1, 2, 3])
if display_ports:
display_ranges, temp_range = [], []
display_ports.sort()
display_ports.append(None) # ending item to include last range in loop
for port in display_ports:
if not temp_range or temp_range[-1] + 1 == port:
temp_range.append(port)
else:
if len(temp_range) > 1:
display_ranges.append('%i-%i' % (temp_range[0], temp_range[-1]))
else:
display_ranges.append(str(temp_range[0]))
temp_range = [port]
else:
# everything for the inverse
is_whitelist = not is_whitelist
display_ranges = ['1-65535']
# constructs the summary string
label_prefix = 'accept ' if is_whitelist else 'reject '
return (label_prefix + ', '.join(display_ranges)).strip()
def has_private(self):
"""
Checks if we have any rules expanded from the 'private' keyword. Tor
appends these by default to the start of the policy and includes a dynamic
address (the relay's public IP).
.. versionadded:: 1.3.0
:returns: **True** if we have any private rules expanded from the 'private'
keyword, **False** otherwise
"""
for rule in self._get_rules():
if rule.is_private():
return True
return False
def strip_private(self):
"""
Provides a copy of this policy without 'private' policy entries.
.. versionadded:: 1.3.0
:returns: **ExitPolicy** without private rules
"""
return ExitPolicy(*[rule for rule in self._get_rules() if not rule.is_private()])
def has_default(self):
"""
Checks if we have the default policy suffix.
.. versionadded:: 1.3.0
:returns: **True** if we have the default policy suffix, **False** otherwise
"""
for rule in self._get_rules():
if rule.is_default():
return True
return False
def strip_default(self):
"""
Provides a copy of this policy without the default policy suffix.
.. versionadded:: 1.3.0
:returns: **ExitPolicy** without default rules
"""
return ExitPolicy(*[rule for rule in self._get_rules() if not rule.is_default()])
def _get_rules(self):
# Local reference to our input_rules so this can be lock free. Otherwise
# another thread might unset our input_rules while processing them.
input_rules = self._input_rules
if self._rules is None and input_rules is not None:
rules = []
is_all_accept, is_all_reject = True, True
if isinstance(input_rules, bytes):
decompressed_rules = zlib.decompress(input_rules).split(b',')
else:
decompressed_rules = input_rules
for rule in decompressed_rules:
if isinstance(rule, bytes):
rule = stem.util.str_tools._to_unicode(rule)
if stem.util._is_str(rule):
if not rule.strip():
continue
rule = ExitPolicyRule(rule.strip())
if rule.is_accept:
is_all_reject = False
else:
is_all_accept = False
rules.append(rule)
if rule.is_address_wildcard() and rule.is_port_wildcard():
break # this is a catch-all, no reason to include more
# If we only have one kind of entry *and* end with a wildcard then
# we might as well use the simpler version. For instance...
#
# reject *:80, reject *:443, reject *:*
#
# ... could also be represented as simply...
#
# reject *:*
#
# This mostly comes up with reject-all policies because the
# 'reject private:*' appends an extra seven rules that have no
# effect.
if rules and (rules[-1].is_address_wildcard() and rules[-1].is_port_wildcard()):
if is_all_accept:
rules = [ExitPolicyRule('accept *:*')]
elif is_all_reject:
rules = [ExitPolicyRule('reject *:*')]
_flag_private_rules(rules)
_flag_default_rules(rules)
self._rules = rules
self._input_rules = None
return self._rules
def __len__(self):
return len(self._get_rules())
def __iter__(self):
for rule in self._get_rules():
yield rule
@lru_cache()
def __str__(self):
return ', '.join([str(rule) for rule in self._get_rules()])
def __hash__(self):
if self._hash is None:
my_hash = 0
for rule in self._get_rules():
my_hash *= 1024
my_hash += hash(rule)
self._hash = my_hash
return self._hash
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, ExitPolicy) else False
def __ne__(self, other):
return not self == other
class MicroExitPolicy(ExitPolicy):
"""
Exit policy provided by the microdescriptors. This is a distilled version of
a normal :class:`~stem.exit_policy.ExitPolicy` contains, just consisting of a
list of ports that are either accepted or rejected. For instance...
::
accept 80,443 # only accepts common http ports
reject 1-1024 # only accepts non-privileged ports
Since these policies are a subset of the exit policy information (lacking IP
ranges) clients can only use them to guess if a relay will accept traffic or
not. To quote the `dir-spec `_ (section 3.2.1)...
::
With microdescriptors, clients don't learn exact exit policies:
clients can only guess whether a relay accepts their request, try the
BEGIN request, and might get end-reason-exit-policy if they guessed
wrong, in which case they'll have to try elsewhere.
:var bool is_accept: **True** if these are ports that we accept, **False** if
they're ports that we reject
:param str policy: policy string that describes this policy
"""
def __init__(self, policy):
# Microdescriptor policies are of the form...
#
# MicrodescriptrPolicy ::= ("accept" / "reject") SP PortList NL
# PortList ::= PortOrRange
# PortList ::= PortList "," PortOrRange
# PortOrRange ::= INT "-" INT / INT
self._policy = policy
if policy.startswith('accept'):
self.is_accept = True
elif policy.startswith('reject'):
self.is_accept = False
else:
raise ValueError("A microdescriptor exit policy must start with either 'accept' or 'reject': %s" % policy)
policy = policy[6:]
if not policy.startswith(' '):
raise ValueError('A microdescriptor exit policy should have a space separating accept/reject from its port list: %s' % self._policy)
policy = policy.lstrip()
# convert our port list into MicroExitPolicyRule
rules = []
for port_entry in policy.split(','):
if '-' in port_entry:
min_port, max_port = port_entry.split('-', 1)
else:
min_port = max_port = port_entry
if not stem.util.connection.is_valid_port(min_port) or \
not stem.util.connection.is_valid_port(max_port):
raise ValueError("'%s' is an invalid port range" % port_entry)
rules.append(MicroExitPolicyRule(self.is_accept, int(min_port), int(max_port)))
super(MicroExitPolicy, self).__init__(*rules)
self._is_allowed_default = not self.is_accept
def __str__(self):
return self._policy
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, MicroExitPolicy) else False
def __ne__(self, other):
return not self == other
class ExitPolicyRule(object):
"""
Single rule from the user's exit policy. These rules are chained together to
form complete policies that describe where a relay will and will not allow
traffic to exit.
The format of these rules are formally described in the `dir-spec
`_ as an
'exitpattern'. Note that while these are similar to tor's man page entry for
ExitPolicies, it's not the exact same. An exitpattern is better defined and
stricter in what it'll accept. For instance, ports are not optional and it
does not contain the 'private' alias.
This should be treated as an immutable object.
.. versionchanged:: 1.5.0
Support for 'accept6/reject6' entries and '\\*4/6' wildcards.
:var bool is_accept: indicates if exiting is allowed or disallowed
:var str address: address that this rule is for
:var int min_port: lower end of the port range that we include (inclusive)
:var int max_port: upper end of the port range that we include (inclusive)
:param str rule: exit policy rule to be parsed
:raises: **ValueError** if input isn't a valid tor exit policy rule
"""
def __init__(self, rule):
# policy ::= "accept[6]" exitpattern | "reject[6]" exitpattern
# exitpattern ::= addrspec ":" portspec
rule = stem.util.str_tools._to_unicode(rule)
self.is_accept = rule.startswith('accept')
is_ipv6_only = rule.startswith('accept6') or rule.startswith('reject6')
if rule.startswith('accept6') or rule.startswith('reject6'):
exitpattern = rule[7:]
elif rule.startswith('accept') or rule.startswith('reject'):
exitpattern = rule[6:]
else:
raise ValueError("An exit policy must start with either 'accept[6]' or 'reject[6]': %s" % rule)
if not exitpattern.startswith(' '):
raise ValueError('An exit policy should have a space separating its accept/reject from the exit pattern: %s' % rule)
exitpattern = exitpattern.lstrip()
if ':' not in exitpattern or ']' in exitpattern.rsplit(':', 1)[1]:
raise ValueError("An exitpattern must be of the form 'addrspec:portspec': %s" % rule)
self.address = None
self._address_type = None
self._masked_bits = None
self.min_port = self.max_port = None
self._hash = None
# Our mask in ip notation (ex. '255.255.255.0'). This is only set if we
# either have a custom mask that can't be represented by a number of bits,
# or the user has called mask(), lazily loading this.
self._mask = None
# Malformed exit policies are rejected, but there's an exception where it's
# just skipped: when an accept6/reject6 rule has an IPv4 address...
#
# "Using an IPv4 address with accept6 or reject6 is ignored and generates
# a warning."
self._skip_rule = False
addrspec, portspec = exitpattern.rsplit(':', 1)
self._apply_addrspec(rule, addrspec, is_ipv6_only)
self._apply_portspec(rule, portspec)
# Flags to indicate if this rule seems to be expanded from the 'private'
# keyword or tor's default policy suffix.
self._is_private = False
self._is_default_suffix = False
def is_address_wildcard(self):
"""
**True** if we'll match against **any** address, **False** otherwise.
Note that this is different than \\*4, \\*6, or '/0' address which are
wildcards for only either IPv4 or IPv6.
:returns: **bool** for if our address matching is a wildcard
"""
return self._address_type == _address_type_to_int(AddressType.WILDCARD)
def is_port_wildcard(self):
"""
**True** if we'll match against any port, **False** otherwise.
:returns: **bool** for if our port matching is a wildcard
"""
return self.min_port in (0, 1) and self.max_port == 65535
def is_match(self, address = None, port = None, strict = False):
"""
**True** if we match against the given destination, **False** otherwise. If
the address or port is omitted then this will check if we're allowed to
exit to any instances of the defined address or port.
:param str address: IPv4 or IPv6 address (with or without brackets)
:param int port: port number
:param bool strict: if the address or port is excluded then check if we can
exit to **all** instances of the defined address or port
:returns: **bool** indicating if we match against this destination
:raises: **ValueError** if provided with a malformed address or port
"""
if self._skip_rule:
return False
# validate our input and check if the argument doesn't match our address type
if address is not None:
address_type = self.get_address_type()
if stem.util.connection.is_valid_ipv4_address(address):
if address_type == AddressType.IPv6:
return False
elif stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True):
if address_type == AddressType.IPv4:
return False
address = address.lstrip('[').rstrip(']')
else:
raise ValueError("'%s' isn't a valid IPv4 or IPv6 address" % address)
if port is not None and not stem.util.connection.is_valid_port(port):
raise ValueError("'%s' isn't a valid port" % port)
# If we're not matching against an address or port but the rule has one
# then we're a fuzzy match. When that happens...
#
# * If strict and a reject rule then we're a match ('can exit to *all* instances').
# * If not strict and an accept rule then match ('an exit ot *any* instance').
fuzzy_match = False
if not self.is_address_wildcard():
# Already got the integer representation of our mask and our address
# with the mask applied. Just need to check if this address with the
# mask applied matches.
if address is None:
fuzzy_match = True
else:
comparison_addr_bin = stem.util.connection.address_to_int(address)
comparison_addr_bin &= self._get_mask_bin()
if self._get_address_bin() != comparison_addr_bin:
return False
if not self.is_port_wildcard():
if port is None:
fuzzy_match = True
elif port < self.min_port or port > self.max_port:
return False
if fuzzy_match:
return strict != self.is_accept
else:
return True
def get_address_type(self):
"""
Provides the :data:`~stem.exit_policy.AddressType` for our policy.
:returns: :data:`~stem.exit_policy.AddressType` for the type of address that we have
"""
return _int_to_address_type(self._address_type)
def get_mask(self, cache = True):
"""
Provides the address represented by our mask. This is **None** if our
address type is a wildcard.
:param bool cache: caches the result if **True**
:returns: str of our subnet mask for the address (ex. '255.255.255.0')
"""
# Lazy loading our mask because it is very infrequently requested. There's
# no reason to usually use memory for it.
if not self._mask:
address_type = self.get_address_type()
if address_type == AddressType.WILDCARD:
mask = None
elif address_type == AddressType.IPv4:
mask = stem.util.connection.get_mask_ipv4(self._masked_bits)
elif address_type == AddressType.IPv6:
mask = stem.util.connection.get_mask_ipv6(self._masked_bits)
if not cache:
return mask
self._mask = mask
return self._mask
def get_masked_bits(self):
"""
Provides the number of bits our subnet mask represents. This is **None** if
our mask can't have a bit representation.
:returns: int with the bit representation of our mask
"""
return self._masked_bits
def is_private(self):
"""
Checks if this rule was expanded from the 'private' policy keyword.
.. versionadded:: 1.3.0
:returns: **True** if this rule was expanded from the 'private' keyword, **False** otherwise.
"""
return self._is_private
def is_default(self):
"""
Checks if this rule belongs to the default exit policy suffix.
.. versionadded:: 1.3.0
:returns: **True** if this rule was part of the default end of a policy, **False** otherwise.
"""
return self._is_default_suffix
@lru_cache()
def __str__(self):
"""
Provides the string representation of our policy. This does not
necessarily match the rule that we were constructed from (due to things
like IPv6 address collapsing or the multiple representations that our mask
can have). However, it is a valid that would be accepted by our constructor
to re-create this rule.
"""
label = 'accept ' if self.is_accept else 'reject '
if self.is_address_wildcard():
label += '*:'
else:
address_type = self.get_address_type()
if address_type == AddressType.IPv4:
label += self.address
else:
label += '[%s]' % self.address
# Including our mask label as follows...
# - exclude our mask if it doesn't do anything
# - use our masked bit count if we can
# - use the mask itself otherwise
if (address_type == AddressType.IPv4 and self._masked_bits == 32) or \
(address_type == AddressType.IPv6 and self._masked_bits == 128):
label += ':'
elif self._masked_bits is not None:
label += '/%i:' % self._masked_bits
else:
label += '/%s:' % self.get_mask()
if self.is_port_wildcard():
label += '*'
elif self.min_port == self.max_port:
label += str(self.min_port)
else:
label += '%i-%i' % (self.min_port, self.max_port)
return label
@lru_cache()
def _get_mask_bin(self):
# provides an integer representation of our mask
return int(stem.util.connection._address_to_binary(self.get_mask(False)), 2)
@lru_cache()
def _get_address_bin(self):
# provides an integer representation of our address
return stem.util.connection.address_to_int(self.address) & self._get_mask_bin()
def _apply_addrspec(self, rule, addrspec, is_ipv6_only):
# Parses the addrspec...
# addrspec ::= "*" | ip4spec | ip6spec
# Expand IPv4 and IPv6 specific wildcards into /0 entries so we have one
# fewer bizarre special case headaches to deal with.
if addrspec == '*4':
addrspec = '0.0.0.0/0'
elif addrspec == '*6' or (addrspec == '*' and is_ipv6_only):
addrspec = '[0000:0000:0000:0000:0000:0000:0000:0000]/0'
if '/' in addrspec:
self.address, addr_extra = addrspec.split('/', 1)
else:
self.address, addr_extra = addrspec, None
if addrspec == '*':
self._address_type = _address_type_to_int(AddressType.WILDCARD)
self.address = self._masked_bits = None
elif stem.util.connection.is_valid_ipv4_address(self.address):
# ipv4spec ::= ip4 | ip4 "/" num_ip4_bits | ip4 "/" ip4mask
# ip4 ::= an IPv4 address in dotted-quad format
# ip4mask ::= an IPv4 mask in dotted-quad format
# num_ip4_bits ::= an integer between 0 and 32
if is_ipv6_only:
self._skip_rule = True
self._address_type = _address_type_to_int(AddressType.IPv4)
if addr_extra is None:
self._masked_bits = 32
elif stem.util.connection.is_valid_ipv4_address(addr_extra):
# provided with an ip4mask
try:
self._masked_bits = stem.util.connection._get_masked_bits(addr_extra)
except ValueError:
# mask can't be represented as a number of bits (ex. '255.255.0.255')
self._mask = addr_extra
self._masked_bits = None
elif addr_extra.isdigit():
# provided with a num_ip4_bits
self._masked_bits = int(addr_extra)
if self._masked_bits < 0 or self._masked_bits > 32:
raise ValueError('IPv4 masks must be in the range of 0-32 bits')
else:
raise ValueError("The '%s' isn't a mask nor number of bits: %s" % (addr_extra, rule))
elif self.address.startswith('[') and self.address.endswith(']') and \
stem.util.connection.is_valid_ipv6_address(self.address[1:-1]):
# ip6spec ::= ip6 | ip6 "/" num_ip6_bits
# ip6 ::= an IPv6 address, surrounded by square brackets.
# num_ip6_bits ::= an integer between 0 and 128
self.address = stem.util.connection.expand_ipv6_address(self.address[1:-1].upper())
self._address_type = _address_type_to_int(AddressType.IPv6)
if addr_extra is None:
self._masked_bits = 128
elif addr_extra.isdigit():
# provided with a num_ip6_bits
self._masked_bits = int(addr_extra)
if self._masked_bits < 0 or self._masked_bits > 128:
raise ValueError('IPv6 masks must be in the range of 0-128 bits')
else:
raise ValueError("The '%s' isn't a number of bits: %s" % (addr_extra, rule))
else:
raise ValueError("'%s' isn't a wildcard, IPv4, or IPv6 address: %s" % (addrspec, rule))
def _apply_portspec(self, rule, portspec):
# Parses the portspec...
# portspec ::= "*" | port | port "-" port
# port ::= an integer between 1 and 65535, inclusive.
#
# Due to a tor bug the spec says that we should accept port of zero, but
# connections to port zero are never permitted.
if portspec == '*':
self.min_port, self.max_port = 1, 65535
elif portspec.isdigit():
# provided with a single port
if stem.util.connection.is_valid_port(portspec, allow_zero = True):
self.min_port = self.max_port = int(portspec)
else:
raise ValueError("'%s' isn't within a valid port range: %s" % (portspec, rule))
elif '-' in portspec:
# provided with a port range
port_comp = portspec.split('-', 1)
if stem.util.connection.is_valid_port(port_comp, allow_zero = True):
self.min_port = int(port_comp[0])
self.max_port = int(port_comp[1])
if self.min_port > self.max_port:
raise ValueError("Port range has a lower bound that's greater than its upper bound: %s" % rule)
else:
raise ValueError('Malformed port range: %s' % rule)
else:
raise ValueError("Port value isn't a wildcard, integer, or range: %s" % rule)
def __hash__(self):
if self._hash is None:
self._hash = stem.util._hash_attr(self, 'is_accept', 'address', 'min_port', 'max_port') * 1024 + hash(self.get_mask(False))
return self._hash
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, ExitPolicyRule) else False
def __ne__(self, other):
return not self == other
def _address_type_to_int(address_type):
return AddressType.index_of(address_type)
def _int_to_address_type(address_type_int):
return list(AddressType)[address_type_int]
class MicroExitPolicyRule(ExitPolicyRule):
"""
Lighter weight ExitPolicyRule derivative for microdescriptors.
"""
def __init__(self, is_accept, min_port, max_port):
self.is_accept = is_accept
self.address = None # wildcard address
self.min_port = min_port
self.max_port = max_port
self._skip_rule = False
def is_address_wildcard(self):
return True
def get_address_type(self):
return AddressType.WILDCARD
def get_mask(self, cache = True):
return None
def get_masked_bits(self):
return None
def __hash__(self):
return stem.util._hash_attr(self, 'is_accept', 'min_port', 'max_port', cache = True)
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, MicroExitPolicyRule) else False
def __ne__(self, other):
return not self == other
DEFAULT_POLICY_RULES = tuple([ExitPolicyRule(rule) for rule in (
'reject *:25',
'reject *:119',
'reject *:135-139',
'reject *:445',
'reject *:563',
'reject *:1214',
'reject *:4661-4666',
'reject *:6346-6429',
'reject *:6699',
'reject *:6881-6999',
'accept *:*',
)])
stem-1.8.0/stem/descriptor/ 0000775 0001750 0001750 00000000000 13602232262 016336 5 ustar atagar atagar 0000000 0000000 stem-1.8.0/stem/descriptor/certificate.py 0000664 0001750 0001750 00000040746 13565322274 021220 0 ustar atagar atagar 0000000 0000000 # Copyright 2017-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for `Tor Ed25519 certificates
`_, which are
used to for a variety of purposes...
* validating the key used to sign server descriptors
* validating the key used to sign hidden service v3 descriptors
* signing and encrypting hidden service v3 indroductory points
.. versionadded:: 1.6.0
**Module Overview:**
::
Ed25519Certificate - Ed25519 signing key certificate
| +- Ed25519CertificateV1 - version 1 Ed25519 certificate
| |- is_expired - checks if certificate is presently expired
| |- signing_key - certificate signing key
| +- validate - validates a descriptor's signature
|
|- from_base64 - decodes a base64 encoded certificate
|- to_base64 - base64 encoding of this certificate
|
|- unpack - decodes a byte encoded certificate
+- pack - byte encoding of this certificate
Ed25519Extension - extension included within an Ed25519Certificate
.. data:: CertType (enum)
Purpose of Ed25519 certificate. For more information see...
* `cert-spec.txt `_ section A.1
* `rend-spec-v3.txt `_ appendix E
.. deprecated:: 1.8.0
Replaced with :data:`stem.client.datatype.CertType`
======================== ===========
CertType Description
======================== ===========
**SIGNING** signing key with an identity key
**LINK_CERT** TLS link certificate signed with ed25519 signing key
**AUTH** authentication key signed with ed25519 signing key
**HS_V3_DESC_SIGNING** hidden service v3 short-term descriptor signing key
**HS_V3_INTRO_AUTH** hidden service v3 introductory point authentication key
**HS_V3_INTRO_ENCRYPT** hidden service v3 introductory point encryption key
======================== ===========
.. data:: ExtensionType (enum)
Recognized exception types.
==================== ===========
ExtensionType Description
==================== ===========
**HAS_SIGNING_KEY** includes key used to sign the certificate
==================== ===========
.. data:: ExtensionFlag (enum)
Flags that can be assigned to Ed25519 certificate extensions.
====================== ===========
ExtensionFlag Description
====================== ===========
**AFFECTS_VALIDATION** extension affects whether the certificate is valid
**UNKNOWN** extension includes flags not yet recognized by stem
====================== ===========
"""
import base64
import binascii
import datetime
import hashlib
import re
import stem.descriptor.hidden_service
import stem.descriptor.server_descriptor
import stem.prereq
import stem.util
import stem.util.enum
import stem.util.str_tools
from stem.client.datatype import Field, Size, split
# TODO: Importing under an alternate name until we can deprecate our redundant
# CertType enum in Stem 2.x.
from stem.client.datatype import CertType as ClientCertType
ED25519_KEY_LENGTH = 32
ED25519_HEADER_LENGTH = 40
ED25519_SIGNATURE_LENGTH = 64
SIG_PREFIX_SERVER_DESC = b'Tor router descriptor signature v1'
SIG_PREFIX_HS_V3 = b'Tor onion service descriptor sig v3'
DEFAULT_EXPIRATION_HOURS = 54 # HSv3 certificate expiration of tor
CertType = stem.util.enum.UppercaseEnum(
'SIGNING',
'LINK_CERT',
'AUTH',
'HS_V3_DESC_SIGNING',
'HS_V3_INTRO_AUTH',
'HS_V3_INTRO_ENCRYPT',
)
ExtensionType = stem.util.enum.Enum(('HAS_SIGNING_KEY', 4),)
ExtensionFlag = stem.util.enum.UppercaseEnum('AFFECTS_VALIDATION', 'UNKNOWN')
class Ed25519Extension(Field):
"""
Extension within an Ed25519 certificate.
:var stem.descriptor.certificate.ExtensionType type: extension type
:var list flags: extension attribute flags
:var int flag_int: integer encoding of the extension attribute flags
:var bytes data: data the extension concerns
"""
def __init__(self, ext_type, flag_val, data):
self.type = ext_type
self.flags = []
self.flag_int = flag_val if flag_val else 0
self.data = data
if flag_val and flag_val % 2 == 1:
self.flags.append(ExtensionFlag.AFFECTS_VALIDATION)
flag_val -= 1
if flag_val:
self.flags.append(ExtensionFlag.UNKNOWN)
if ext_type == ExtensionType.HAS_SIGNING_KEY and len(data) != 32:
raise ValueError('Ed25519 HAS_SIGNING_KEY extension must be 32 bytes, but was %i.' % len(data))
def pack(self):
encoded = bytearray()
encoded += Size.SHORT.pack(len(self.data))
encoded += Size.CHAR.pack(self.type)
encoded += Size.CHAR.pack(self.flag_int)
encoded += self.data
return bytes(encoded)
@staticmethod
def pop(content):
if len(content) < 4:
raise ValueError('Ed25519 extension is missing header fields')
data_size, content = Size.SHORT.pop(content)
ext_type, content = Size.CHAR.pop(content)
flags, content = Size.CHAR.pop(content)
data, content = split(content, data_size)
if len(data) != data_size:
raise ValueError("Ed25519 extension is truncated. It should have %i bytes of data but there's only %i." % (data_size, len(data)))
return Ed25519Extension(ext_type, flags, data), content
def __hash__(self):
return stem.util._hash_attr(self, 'type', 'flag_int', 'data', cache = True)
class Ed25519Certificate(object):
"""
Base class for an Ed25519 certificate.
:var int version: certificate format version
:var unicode encoded: base64 encoded ed25519 certificate
"""
def __init__(self, version):
self.version = version
self.encoded = None # TODO: remove in stem 2.x
@staticmethod
def unpack(content):
"""
Parses a byte encoded ED25519 certificate.
:param bytes content: encoded certificate
:returns: :class:`~stem.descriptor.certificate.Ed25519Certificate` subclsss
for the given certificate
:raises: **ValueError** if certificate is malformed
"""
version = Size.CHAR.pop(content)[0]
if version == 1:
return Ed25519CertificateV1.unpack(content)
else:
raise ValueError('Ed25519 certificate is version %i. Parser presently only supports version 1.' % version)
@staticmethod
def from_base64(content):
"""
Parses a base64 encoded ED25519 certificate.
:param str content: base64 encoded certificate
:returns: :class:`~stem.descriptor.certificate.Ed25519Certificate` subclsss
for the given certificate
:raises: **ValueError** if content is malformed
"""
content = stem.util.str_tools._to_unicode(content)
if content.startswith('-----BEGIN ED25519 CERT-----\n') and content.endswith('\n-----END ED25519 CERT-----'):
content = content[29:-27]
try:
decoded = base64.b64decode(content)
if not decoded:
raise TypeError('empty')
instance = Ed25519Certificate.unpack(decoded)
instance.encoded = content
return instance
except (TypeError, binascii.Error) as exc:
raise ValueError("Ed25519 certificate wasn't propoerly base64 encoded (%s):\n%s" % (exc, content))
def pack(self):
"""
Encoded byte representation of our certificate.
:returns: **bytes** for our encoded certificate representation
"""
raise NotImplementedError('Certificate encoding has not been implemented for %s' % type(self).__name__)
def to_base64(self, pem = False):
"""
Base64 encoded certificate data.
:param bool pem: include `PEM header/footer
`_, for more
information see `RFC 7468 `_
:returns: **unicode** for our encoded certificate representation
"""
encoded = b'\n'.join(stem.util.str_tools._split_by_length(base64.b64encode(self.pack()), 64))
if pem:
encoded = b'-----BEGIN ED25519 CERT-----\n%s\n-----END ED25519 CERT-----' % encoded
return stem.util.str_tools._to_unicode(encoded)
@staticmethod
def _from_descriptor(keyword, attribute):
def _parse(descriptor, entries):
value, block_type, block_contents = entries[keyword][0]
if not block_contents or block_type != 'ED25519 CERT':
raise ValueError("'%s' should be followed by a ED25519 CERT block, but was a %s" % (keyword, block_type))
setattr(descriptor, attribute, Ed25519Certificate.from_base64(block_contents))
return _parse
def __str__(self):
return self.to_base64(pem = True)
@staticmethod
def parse(content):
return Ed25519Certificate.from_base64(content) # TODO: drop this alias in stem 2.x
class Ed25519CertificateV1(Ed25519Certificate):
"""
Version 1 Ed25519 certificate, which are used for signing tor server
descriptors.
:var stem.client.datatype.CertType type: certificate purpose
:var int type_int: integer value of the certificate purpose
:var datetime expiration: expiration of the certificate
:var int key_type: format of the key
:var bytes key: key content
:var list extensions: :class:`~stem.descriptor.certificate.Ed25519Extension` in this certificate
:var bytes signature: certificate signature
:param bytes signature: pre-calculated certificate signature
:param cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey signing_key: certificate signing key
"""
def __init__(self, cert_type = None, expiration = None, key_type = None, key = None, extensions = None, signature = None, signing_key = None):
super(Ed25519CertificateV1, self).__init__(1)
if cert_type is None:
raise ValueError('Certificate type is required')
elif key is None:
raise ValueError('Certificate key is required')
self.type, self.type_int = ClientCertType.get(cert_type)
self.expiration = expiration if expiration else datetime.datetime.utcnow() + datetime.timedelta(hours = DEFAULT_EXPIRATION_HOURS)
self.key_type = key_type if key_type else 1
self.key = stem.util._pubkey_bytes(key)
self.extensions = extensions if extensions else []
self.signature = signature
if signing_key:
calculated_sig = signing_key.sign(self.pack())
# if caller provides both signing key *and* signature then ensure they match
if self.signature and self.signature != calculated_sig:
raise ValueError("Signature calculated from its key (%s) mismatches '%s'" % (calculated_sig, self.signature))
self.signature = calculated_sig
if self.type in (ClientCertType.LINK, ClientCertType.IDENTITY, ClientCertType.AUTHENTICATE):
raise ValueError('Ed25519 certificate cannot have a type of %i. This is reserved for CERTS cells.' % self.type_int)
elif self.type == ClientCertType.ED25519_IDENTITY:
raise ValueError('Ed25519 certificate cannot have a type of 7. This is reserved for RSA identity cross-certification.')
elif self.type == ClientCertType.UNKNOWN:
raise ValueError('Ed25519 certificate type %i is unrecognized' % self.type_int)
def pack(self):
encoded = bytearray()
encoded += Size.CHAR.pack(self.version)
encoded += Size.CHAR.pack(self.type_int)
encoded += Size.LONG.pack(int(stem.util.datetime_to_unix(self.expiration) / 3600))
encoded += Size.CHAR.pack(self.key_type)
encoded += self.key
encoded += Size.CHAR.pack(len(self.extensions))
for extension in self.extensions:
encoded += extension.pack()
if self.signature:
encoded += self.signature
return bytes(encoded)
@staticmethod
def unpack(content):
if len(content) < ED25519_HEADER_LENGTH + ED25519_SIGNATURE_LENGTH:
raise ValueError('Ed25519 certificate was %i bytes, but should be at least %i' % (len(content), ED25519_HEADER_LENGTH + ED25519_SIGNATURE_LENGTH))
header, signature = split(content, len(content) - ED25519_SIGNATURE_LENGTH)
version, header = Size.CHAR.pop(header)
cert_type, header = Size.CHAR.pop(header)
expiration_hours, header = Size.LONG.pop(header)
key_type, header = Size.CHAR.pop(header)
key, header = split(header, ED25519_KEY_LENGTH)
extension_count, extension_data = Size.CHAR.pop(header)
if version != 1:
raise ValueError('Ed25519 v1 parser cannot read version %i certificates' % version)
extensions = []
for i in range(extension_count):
extension, extension_data = Ed25519Extension.pop(extension_data)
extensions.append(extension)
if extension_data:
raise ValueError('Ed25519 certificate had %i bytes of unused extension data' % len(extension_data))
return Ed25519CertificateV1(cert_type, datetime.datetime.utcfromtimestamp(expiration_hours * 3600), key_type, key, extensions, signature)
def is_expired(self):
"""
Checks if this certificate is presently expired or not.
:returns: **True** if the certificate has expired, **False** otherwise
"""
return datetime.datetime.now() > self.expiration
def signing_key(self):
"""
Provides this certificate's signing key.
.. versionadded:: 1.8.0
:returns: **bytes** with the first signing key on the certificate, None if
not present
"""
for extension in self.extensions:
if extension.type == ExtensionType.HAS_SIGNING_KEY:
return extension.data
return None
def validate(self, descriptor):
"""
Validate our descriptor content matches its ed25519 signature. Supported
descriptor types include...
* :class:`~stem.descriptor.server_descriptor.RelayDescriptor`
* :class:`~stem.descriptor.hidden_service.HiddenServiceDescriptorV3`
:param stem.descriptor.__init__.Descriptor descriptor: descriptor to validate
:raises:
* **ValueError** if signing key or descriptor are invalid
* **TypeError** if descriptor type is unsupported
* **ImportError** if cryptography module or ed25519 support unavailable
"""
if not stem.prereq.is_crypto_available(ed25519 = True):
raise ImportError('Certificate validation requires the cryptography module and ed25519 support')
if isinstance(descriptor, stem.descriptor.server_descriptor.RelayDescriptor):
signed_content = hashlib.sha256(Ed25519CertificateV1._signed_content(descriptor)).digest()
signature = stem.util.str_tools._decode_b64(descriptor.ed25519_signature)
self._validate_server_desc_signing_key(descriptor)
elif isinstance(descriptor, stem.descriptor.hidden_service.HiddenServiceDescriptorV3):
signed_content = Ed25519CertificateV1._signed_content(descriptor)
signature = stem.util.str_tools._decode_b64(descriptor.signature)
else:
raise TypeError('Certificate validation only supported for server and hidden service descriptors, not %s' % type(descriptor).__name__)
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PublicKey
from cryptography.exceptions import InvalidSignature
try:
key = Ed25519PublicKey.from_public_bytes(self.key)
key.verify(signature, signed_content)
except InvalidSignature:
raise ValueError('Descriptor Ed25519 certificate signature invalid (signature forged or corrupt)')
@staticmethod
def _signed_content(descriptor):
"""
Provides this descriptor's signing constant, appended with the portion of
the descriptor that's signed.
"""
if isinstance(descriptor, stem.descriptor.server_descriptor.RelayDescriptor):
prefix = SIG_PREFIX_SERVER_DESC
regex = b'(.+router-sig-ed25519 )'
elif isinstance(descriptor, stem.descriptor.hidden_service.HiddenServiceDescriptorV3):
prefix = SIG_PREFIX_HS_V3
regex = b'(.+)signature '
else:
raise ValueError('BUG: %s type unexpected' % type(descriptor).__name__)
match = re.search(regex, descriptor.get_bytes(), re.DOTALL)
if not match:
raise ValueError('Malformed descriptor missing signature line')
return prefix + match.group(1)
def _validate_server_desc_signing_key(self, descriptor):
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PublicKey
from cryptography.exceptions import InvalidSignature
if descriptor.ed25519_master_key:
signing_key = base64.b64decode(stem.util.str_tools._to_bytes(descriptor.ed25519_master_key) + b'=')
else:
signing_key = self.signing_key()
if not signing_key:
raise ValueError('Server descriptor missing an ed25519 signing key')
try:
key = Ed25519PublicKey.from_public_bytes(signing_key)
key.verify(self.signature, base64.b64decode(stem.util.str_tools._to_bytes(self.encoded))[:-ED25519_SIGNATURE_LENGTH])
except InvalidSignature:
raise ValueError('Ed25519KeyCertificate signing key is invalid (signature forged or corrupt)')
stem-1.8.0/stem/descriptor/reader.py 0000664 0001750 0001750 00000045253 13501272761 020171 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Utilities for reading descriptors from local directories and archives. This is
mostly done through the :class:`~stem.descriptor.reader.DescriptorReader`
class, which is an iterator for the descriptor data in a series of
destinations. For example...
::
my_descriptors = [
'/tmp/server-descriptors-2012-03.tar.bz2',
'/tmp/archived_descriptors/',
]
# prints the contents of all the descriptor files
with DescriptorReader(my_descriptors) as reader:
for descriptor in reader:
print descriptor
This ignores files that cannot be processed due to read errors or unparsable
content. To be notified of skipped files you can register a listener with
:func:`~stem.descriptor.reader.DescriptorReader.register_skip_listener`.
The :class:`~stem.descriptor.reader.DescriptorReader` keeps track of the last
modified timestamps for descriptor files that it has read so it can skip
unchanged files if run again. This listing of processed files can also be
persisted and applied to other
:class:`~stem.descriptor.reader.DescriptorReader` instances. For example, the
following prints descriptors as they're changed over the course of a minute,
and picks up where it left off if run again...
::
reader = DescriptorReader(['/tmp/descriptor_data'])
try:
processed_files = load_processed_files('/tmp/used_descriptors')
reader.set_processed_files(processed_files)
except: pass # could not load, maybe this is the first run
start_time = time.time()
while (time.time() - start_time) < 60:
# prints any descriptors that have changed since last checked
with reader:
for descriptor in reader:
print descriptor
time.sleep(1)
save_processed_files('/tmp/used_descriptors', reader.get_processed_files())
**Module Overview:**
::
load_processed_files - Loads a listing of processed files
save_processed_files - Saves a listing of processed files
DescriptorReader - Iterator for descriptor data on the local file system
|- get_processed_files - provides the listing of files that we've processed
|- set_processed_files - sets our tracking of the files we have processed
|- register_read_listener - adds a listener for when files are read
|- register_skip_listener - adds a listener that's notified of skipped files
|- start - begins reading descriptor data
|- stop - stops reading descriptor data
|- __enter__ / __exit__ - manages the descriptor reader thread in the context
+- __iter__ - iterates over descriptor data in unread files
FileSkipped - Base exception for a file that was skipped
|- AlreadyRead - We've already read a file with this last modified timestamp
|- ParsingFailure - Contents can't be parsed as descriptor data
|- UnrecognizedType - File extension indicates non-descriptor data
+- ReadFailed - Wraps an error that was raised while reading the file
+- FileMissing - File does not exist
.. deprecated:: 1.8.0
This module will likely be removed in Stem 2.0 due to lack of usage. If you
use this modle please `let me know `_.
"""
import mimetypes
import os
import tarfile
import threading
try:
import queue
except ImportError:
import Queue as queue
import stem.descriptor
import stem.prereq
import stem.util
import stem.util.str_tools
import stem.util.system
# flag to indicate when the reader thread is out of descriptor files to read
FINISHED = 'DONE'
class FileSkipped(Exception):
"Base error when we can't provide descriptor data from a file."
class AlreadyRead(FileSkipped):
"""
Already read a file with this 'last modified' timestamp or later.
:param int last_modified: unix timestamp for when the file was last modified
:param int last_modified_when_read: unix timestamp for the modification time
when we last read this file
"""
def __init__(self, last_modified, last_modified_when_read):
super(AlreadyRead, self).__init__('File has already been read since it was last modified. modification time: %s, last read: %s' % (last_modified, last_modified_when_read))
self.last_modified = last_modified
self.last_modified_when_read = last_modified_when_read
class ParsingFailure(FileSkipped):
"""
File contents could not be parsed as descriptor data.
:param ValueError exception: issue that arose when parsing
"""
def __init__(self, parsing_exception):
super(ParsingFailure, self).__init__(parsing_exception)
self.exception = parsing_exception
class UnrecognizedType(FileSkipped):
"""
File doesn't contain descriptor data. This could either be due to its file
type or because it doesn't conform to a recognizable descriptor type.
:param tuple mime_type: the (type, encoding) tuple provided by mimetypes.guess_type()
"""
def __init__(self, mime_type):
super(UnrecognizedType, self).__init__('Unrecognized mime type: %s (%s)' % mime_type)
self.mime_type = mime_type
class ReadFailed(FileSkipped):
"""
An IOError occurred while trying to read the file.
:param IOError exception: issue that arose when reading the file, **None** if
this arose due to the file not being present
"""
def __init__(self, read_exception):
super(ReadFailed, self).__init__(read_exception)
self.exception = read_exception
class FileMissing(ReadFailed):
'File does not exist.'
def __init__(self):
super(FileMissing, self).__init__('File does not exist')
def load_processed_files(path):
"""
Loads a dictionary of 'path => last modified timestamp' mappings, as
persisted by :func:`~stem.descriptor.reader.save_processed_files`, from a
file.
:param str path: location to load the processed files dictionary from
:returns: **dict** of 'path (**str**) => last modified unix timestamp
(**int**)' mappings
:raises:
* **IOError** if unable to read the file
* **TypeError** if unable to parse the file's contents
"""
processed_files = {}
with open(path, 'rb') as input_file:
for line in input_file.readlines():
line = stem.util.str_tools._to_unicode(line.strip())
if not line:
continue # skip blank lines
if ' ' not in line:
raise TypeError('Malformed line: %s' % line)
path, timestamp = line.rsplit(' ', 1)
if not os.path.isabs(path):
raise TypeError("'%s' is not an absolute path" % path)
elif not timestamp.isdigit():
raise TypeError("'%s' is not an integer timestamp" % timestamp)
processed_files[path] = int(timestamp)
return processed_files
def save_processed_files(path, processed_files):
"""
Persists a dictionary of 'path => last modified timestamp' mappings (as
provided by the DescriptorReader's
:func:`~stem.descriptor.reader.DescriptorReader.get_processed_files` method)
so that they can be loaded later and applied to another
:class:`~stem.descriptor.reader.DescriptorReader`.
:param str path: location to save the processed files dictionary to
:param dict processed_files: 'path => last modified' mappings
:raises:
* **IOError** if unable to write to the file
* **TypeError** if processed_files is of the wrong type
"""
# makes the parent directory if it doesn't already exist
try:
path_dir = os.path.dirname(path)
if not os.path.exists(path_dir):
os.makedirs(path_dir)
except OSError as exc:
raise IOError(exc)
with open(path, 'w') as output_file:
for path, timestamp in list(processed_files.items()):
if not os.path.isabs(path):
raise TypeError('Only absolute paths are acceptable: %s' % path)
output_file.write('%s %i\n' % (path, timestamp))
class DescriptorReader(object):
"""
Iterator for the descriptor data on the local file system. This can process
text files, tarball archives (gzip or bzip2), or recurse directories.
By default this limits the number of descriptors that we'll read ahead before
waiting for our caller to fetch some of them. This is included to avoid
unbounded memory usage.
Our persistence_path argument is a convenient method to persist the listing
of files we have processed between runs, however it doesn't allow for error
handling. If you want that then use the
:func:`~stem.descriptor.reader.load_processed_files` and
:func:`~stem.descriptor.reader.save_processed_files` functions instead.
:param str,list target: path or list of paths for files or directories to be read from
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param bool follow_links: determines if we'll follow symlinks when traversing
directories (requires python 2.6)
:param int buffer_size: descriptors we'll buffer before waiting for some to
be read, this is unbounded if zero
:param str persistence_path: if set we will load and save processed file
listings from this path, errors are ignored
:param stem.descriptor.__init__.DocumentHandler document_handler: method in
which to parse :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
:param dict kwargs: additional arguments for the descriptor constructor
"""
def __init__(self, target, validate = False, follow_links = False, buffer_size = 100, persistence_path = None, document_handler = stem.descriptor.DocumentHandler.ENTRIES, **kwargs):
self._targets = [target] if stem.util._is_str(target) else target
# expand any relative paths we got
self._targets = list(map(os.path.abspath, self._targets))
self._validate = validate
self._follow_links = follow_links
self._persistence_path = persistence_path
self._document_handler = document_handler
self._kwargs = kwargs
self._read_listeners = []
self._skip_listeners = []
self._processed_files = {}
self._reader_thread = None
self._reader_thread_lock = threading.RLock()
self._iter_lock = threading.RLock()
self._iter_notice = threading.Event()
self._is_stopped = threading.Event()
self._is_stopped.set()
# Descriptors that we have read but not yet provided to the caller. A
# FINISHED entry is used by the reading thread to indicate the end.
self._unreturned_descriptors = queue.Queue(buffer_size)
if self._persistence_path:
try:
processed_files = load_processed_files(self._persistence_path)
self.set_processed_files(processed_files)
except:
pass
def get_processed_files(self):
"""
For each file that we have read descriptor data from this provides a
mapping of the form...
::
absolute path (str) => last modified unix timestamp (int)
This includes entries set through the
:func:`~stem.descriptor.reader.DescriptorReader.set_processed_files`
method. Each run resets this to only the files that were present during
that run.
:returns: **dict** with the absolute paths and unix timestamp for the last
modified times of the files we have processed
"""
# make sure that we only provide back absolute paths
return dict((os.path.abspath(k), v) for (k, v) in list(self._processed_files.items()))
def set_processed_files(self, processed_files):
"""
Sets the listing of the files we have processed. Most often this is used
with a newly created :class:`~stem.descriptor.reader.DescriptorReader` to
pre-populate the listing of descriptor files that we have seen.
:param dict processed_files: mapping of absolute paths (**str**) to unix
timestamps for the last modified time (**int**)
"""
self._processed_files = dict(processed_files)
def register_read_listener(self, listener):
"""
Registers a listener for when files are read. This is executed prior to
processing files. Listeners are expected to be of the form...
::
my_listener(path)
:param functor listener: functor to be notified when files are read
"""
self._read_listeners.append(listener)
def register_skip_listener(self, listener):
"""
Registers a listener for files that are skipped. This listener is expected
to be a functor of the form...
::
my_listener(path, exception)
:param functor listener: functor to be notified of files that are skipped
to read errors or because they couldn't be parsed as valid descriptor data
"""
self._skip_listeners.append(listener)
def get_buffered_descriptor_count(self):
"""
Provides the number of descriptors that are waiting to be iterated over.
This is limited to the buffer_size that we were constructed with.
:returns: **int** for the estimated number of currently enqueued
descriptors, this is not entirely reliable
"""
return self._unreturned_descriptors.qsize()
def start(self):
"""
Starts reading our descriptor files.
:raises: **ValueError** if we're already reading the descriptor files
"""
with self._reader_thread_lock:
if self._reader_thread:
raise ValueError('Already running, you need to call stop() first')
else:
self._is_stopped.clear()
self._reader_thread = threading.Thread(target = self._read_descriptor_files, name='Descriptor reader')
self._reader_thread.setDaemon(True)
self._reader_thread.start()
def stop(self):
"""
Stops further reading of descriptor files.
"""
with self._reader_thread_lock:
self._is_stopped.set()
self._iter_notice.set()
# clears our queue to unblock enqueue calls
try:
while True:
self._unreturned_descriptors.get_nowait()
except queue.Empty:
pass
self._reader_thread.join()
self._reader_thread = None
if self._persistence_path:
try:
processed_files = self.get_processed_files()
save_processed_files(self._persistence_path, processed_files)
except:
pass
def _read_descriptor_files(self):
new_processed_files = {}
remaining_files = list(self._targets)
while remaining_files and not self._is_stopped.is_set():
target = remaining_files.pop(0)
if not os.path.exists(target):
self._notify_skip_listeners(target, FileMissing())
continue
if os.path.isdir(target):
walker = os.walk(target, followlinks = self._follow_links)
self._handle_walker(walker, new_processed_files)
else:
self._handle_file(target, new_processed_files)
self._processed_files = new_processed_files
if not self._is_stopped.is_set():
self._unreturned_descriptors.put(FINISHED)
self._iter_notice.set()
def __iter__(self):
with self._iter_lock:
while not self._is_stopped.is_set():
try:
descriptor = self._unreturned_descriptors.get_nowait()
if descriptor == FINISHED:
break
else:
yield descriptor
except queue.Empty:
self._iter_notice.wait()
self._iter_notice.clear()
def _handle_walker(self, walker, new_processed_files):
for root, _, files in walker:
for filename in files:
self._handle_file(os.path.join(root, filename), new_processed_files)
# this can take a while if, say, we're including the root directory
if self._is_stopped.is_set():
return
def _handle_file(self, target, new_processed_files):
# This is a file. Register its last modified timestamp and check if
# it's a file that we should skip.
try:
last_modified = int(os.stat(target).st_mtime)
last_used = self._processed_files.get(target)
new_processed_files[target] = last_modified
except OSError as exc:
self._notify_skip_listeners(target, ReadFailed(exc))
return
if last_used and last_used >= last_modified:
self._notify_skip_listeners(target, AlreadyRead(last_modified, last_used))
return
# Block devices and such are never descriptors, and can cause us to block
# for quite a while so skipping anything that isn't a regular file.
if not os.path.isfile(target):
return
# The mimetypes module only checks the file extension. To actually
# check the content (like the 'file' command) we'd need something like
# pymagic (https://github.com/cloudburst/pymagic).
target_type = mimetypes.guess_type(target)
if target_type[0] in (None, 'text/plain'):
# either '.txt' or an unknown type
self._handle_descriptor_file(target, target_type)
elif stem.util.system.is_tarfile(target):
# handles gzip, bz2, and decompressed tarballs among others
self._handle_archive(target)
else:
self._notify_skip_listeners(target, UnrecognizedType(target_type))
def _handle_descriptor_file(self, target, mime_type):
try:
self._notify_read_listeners(target)
with open(target, 'rb') as target_file:
for desc in stem.descriptor.parse_file(target_file, validate = self._validate, document_handler = self._document_handler, **self._kwargs):
if self._is_stopped.is_set():
return
self._unreturned_descriptors.put(desc)
self._iter_notice.set()
except TypeError:
self._notify_skip_listeners(target, UnrecognizedType(mime_type))
except ValueError as exc:
self._notify_skip_listeners(target, ParsingFailure(exc))
except IOError as exc:
self._notify_skip_listeners(target, ReadFailed(exc))
def _handle_archive(self, target):
# TODO: When dropping python 2.6 support go back to using 'with' for
# tarfiles...
#
# http://bugs.python.org/issue7232
tar_file = None
try:
self._notify_read_listeners(target)
tar_file = tarfile.open(target)
for tar_entry in tar_file:
if tar_entry.isfile():
entry = tar_file.extractfile(tar_entry)
try:
for desc in stem.descriptor.parse_file(entry, validate = self._validate, document_handler = self._document_handler, **self._kwargs):
if self._is_stopped.is_set():
return
desc._set_path(os.path.abspath(target))
desc._set_archive_path(tar_entry.name)
self._unreturned_descriptors.put(desc)
self._iter_notice.set()
except TypeError as exc:
self._notify_skip_listeners(target, ParsingFailure(exc))
except ValueError as exc:
self._notify_skip_listeners(target, ParsingFailure(exc))
finally:
entry.close()
except IOError as exc:
self._notify_skip_listeners(target, ReadFailed(exc))
finally:
if tar_file:
tar_file.close()
def _notify_read_listeners(self, path):
for listener in self._read_listeners:
listener(path)
def _notify_skip_listeners(self, path, exception):
for listener in self._skip_listeners:
listener(path, exception)
def __enter__(self):
self.start()
return self
def __exit__(self, exit_type, value, traceback):
self.stop()
stem-1.8.0/stem/descriptor/networkstatus.py 0000664 0001750 0001750 00000234643 13601502033 021654 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for Tor network status documents. This supports both the v2 and v3
`dir-spec `_.
Documents can be obtained from a few sources...
* The 'cached-consensus' file in Tor's data directory.
* Archived descriptors provided by `CollecTor
`_.
* Directory authorities and mirrors via their DirPort.
... and contain the following sections...
* document header
* list of :class:`stem.descriptor.networkstatus.DirectoryAuthority`
* list of :class:`stem.descriptor.router_status_entry.RouterStatusEntry`
* document footer
**For a great graphical overview see** `Jordan Wright's chart describing the
anatomy of the consensus
`_.
Of these, the router status entry section can be quite large (on the order of
hundreds of kilobytes). As such we provide a couple of methods for reading
network status documents through :func:`~stem.descriptor.__init__.parse_file`.
For more information see :func:`~stem.descriptor.__init__.DocumentHandler`...
::
from stem.descriptor import parse_file, DocumentHandler
with open('.tor/cached-consensus', 'rb') as consensus_file:
# Processes the routers as we read them in. The routers refer to a document
# with an unset 'routers' attribute.
for router in parse_file(consensus_file, 'network-status-consensus-3 1.0', document_handler = DocumentHandler.ENTRIES):
print router.nickname
**Module Overview:**
::
NetworkStatusDocument - Network status document
|- NetworkStatusDocumentV2 - Version 2 network status document
|- NetworkStatusDocumentV3 - Version 3 network status document
+- BridgeNetworkStatusDocument - Version 3 network status document for bridges
KeyCertificate - Certificate used to authenticate an authority
DocumentSignature - Signature of a document by a directory authority
DetachedSignature - Stand alone signature used when making the consensus
DirectoryAuthority - Directory authority as defined in a v3 network status document
"""
import collections
import datetime
import hashlib
import io
import stem.descriptor.router_status_entry
import stem.util.str_tools
import stem.util.tor_tools
import stem.version
from stem.descriptor import (
PGP_BLOCK_END,
Descriptor,
DigestHash,
DigestEncoding,
TypeAnnotation,
DocumentHandler,
_descriptor_content,
_descriptor_components,
_read_until_keywords,
_value,
_values,
_parse_simple_line,
_parse_if_present,
_parse_timestamp_line,
_parse_forty_character_hex,
_parse_protocol_line,
_parse_key_block,
_mappings_for,
_random_nickname,
_random_fingerprint,
_random_ipv4_address,
_random_date,
_random_crypto_blob,
)
from stem.descriptor.router_status_entry import (
RouterStatusEntryV2,
RouterStatusEntryBridgeV2,
RouterStatusEntryV3,
RouterStatusEntryMicroV3,
)
# Version 2 network status document fields, tuples of the form...
# (keyword, is_mandatory)
NETWORK_STATUS_V2_FIELDS = (
('network-status-version', True),
('dir-source', True),
('fingerprint', True),
('contact', True),
('dir-signing-key', True),
('client-versions', False),
('server-versions', False),
('published', True),
('dir-options', False),
('directory-signature', True),
)
# Network status document are either a 'vote' or 'consensus', with different
# mandatory fields for each. Both though require that their fields appear in a
# specific order. This is an ordered listing of the following...
#
# (field, in_votes, in_consensus, is_mandatory)
HEADER_STATUS_DOCUMENT_FIELDS = (
('network-status-version', True, True, True),
('vote-status', True, True, True),
('consensus-methods', True, False, False),
('consensus-method', False, True, False),
('published', True, False, True),
('valid-after', True, True, True),
('fresh-until', True, True, True),
('valid-until', True, True, True),
('voting-delay', True, True, True),
('client-versions', True, True, False),
('server-versions', True, True, False),
('package', True, True, False),
('known-flags', True, True, True),
('flag-thresholds', True, False, False),
('shared-rand-participate', True, False, False),
('shared-rand-commit', True, False, False),
('shared-rand-previous-value', True, True, False),
('shared-rand-current-value', True, True, False),
('bandwidth-file-headers', True, False, False),
('bandwidth-file-digest', True, False, False),
('recommended-client-protocols', True, True, False),
('recommended-relay-protocols', True, True, False),
('required-client-protocols', True, True, False),
('required-relay-protocols', True, True, False),
('params', True, True, False),
)
FOOTER_STATUS_DOCUMENT_FIELDS = (
('directory-footer', True, True, False),
('bandwidth-weights', False, True, False),
('directory-signature', True, True, True),
)
AUTH_START = 'dir-source'
ROUTERS_START = 'r'
FOOTER_START = 'directory-footer'
V2_FOOTER_START = 'directory-signature'
DEFAULT_PARAMS = {
'bwweightscale': 10000,
'cbtdisabled': 0,
'cbtnummodes': 3,
'cbtrecentcount': 20,
'cbtmaxtimeouts': 18,
'cbtmincircs': 100,
'cbtquantile': 80,
'cbtclosequantile': 95,
'cbttestfreq': 60,
'cbtmintimeout': 2000,
'cbtinitialtimeout': 60000,
'cbtlearntimeout': 180,
'cbtmaxopencircs': 10,
'UseOptimisticData': 1,
'Support022HiddenServices': 1,
'usecreatefast': 1,
'max-consensuses-age-to-cache-for-diff': 72,
'try-diff-for-consensus-newer-than': 72,
'onion-key-rotation-days': 28,
'onion-key-grace-period-days': 7,
'hs_service_max_rdv_failures': 2,
'circ_max_cell_queue_size': 50000,
'circpad_max_circ_queued_cells': 1000,
'HiddenServiceEnableIntroDoSDefense': 0,
}
# KeyCertificate fields, tuple is of the form...
# (keyword, is_mandatory)
KEY_CERTIFICATE_PARAMS = (
('dir-key-certificate-version', True),
('dir-address', False),
('fingerprint', True),
('dir-identity-key', True),
('dir-key-published', True),
('dir-key-expires', True),
('dir-signing-key', True),
('dir-key-crosscert', False),
('dir-key-certification', True),
)
# DetchedSignature fields, tuple is of the form...
# (keyword, is_mandatory, is_multiple)
DETACHED_SIGNATURE_PARAMS = (
('consensus-digest', True, False),
('valid-after', True, False),
('fresh-until', True, False),
('valid-until', True, False),
('additional-digest', False, True),
('additional-signature', False, True),
('directory-signature', False, True),
)
# all parameters are constrained to int32 range
MIN_PARAM, MAX_PARAM = -2147483648, 2147483647
PARAM_RANGE = {
'circwindow': (100, 1000),
'CircuitPriorityHalflifeMsec': (-1, MAX_PARAM),
'perconnbwrate': (-1, MAX_PARAM),
'perconnbwburst': (-1, MAX_PARAM),
'refuseunknownexits': (0, 1),
'bwweightscale': (1, MAX_PARAM),
'cbtdisabled': (0, 1),
'cbtnummodes': (1, 20),
'cbtrecentcount': (3, 1000),
'cbtmaxtimeouts': (3, 10000),
'cbtmincircs': (1, 10000),
'cbtquantile': (10, 99),
'cbtclosequantile': (MIN_PARAM, 99),
'cbttestfreq': (1, MAX_PARAM),
'cbtmintimeout': (500, MAX_PARAM),
'cbtlearntimeout': (10, 60000),
'cbtmaxopencircs': (0, 14),
'UseOptimisticData': (0, 1),
'Support022HiddenServices': (0, 1),
'usecreatefast': (0, 1),
'UseNTorHandshake': (0, 1),
'FastFlagMinThreshold': (4, MAX_PARAM),
'NumDirectoryGuards': (0, 10),
'NumEntryGuards': (1, 10),
'GuardLifetime': (2592000, 157766400), # min: 30 days, max: 1826 days
'NumNTorsPerTAP': (1, 100000),
'AllowNonearlyExtend': (0, 1),
'AuthDirNumSRVAgreements': (1, MAX_PARAM),
'max-consensuses-age-to-cache-for-diff': (0, 8192),
'try-diff-for-consensus-newer-than': (0, 8192),
'onion-key-rotation-days': (1, 90),
'onion-key-grace-period-days': (1, 90), # max is the highest onion-key-rotation-days
'hs_service_max_rdv_failures': (1, 10),
'circ_max_cell_queue_size': (1000, 4294967295),
'circpad_max_circ_queued_cells': (0, 50000),
'HiddenServiceEnableIntroDoSDefense': (0, 1),
}
class PackageVersion(collections.namedtuple('PackageVersion', ['name', 'version', 'url', 'digests'])):
"""
Latest recommended version of a package that's available.
:var str name: name of the package
:var str version: latest recommended version
:var str url: package's url
:var dict digests: mapping of digest types to their value
"""
class SharedRandomnessCommitment(collections.namedtuple('SharedRandomnessCommitment', ['version', 'algorithm', 'identity', 'commit', 'reveal'])):
"""
Directory authority's commitment for generating the next shared random value.
:var int version: shared randomness protocol version
:var str algorithm: hash algorithm used to make the commitment
:var str identity: authority's sha1 identity fingerprint
:var str commit: base64 encoded commitment hash to the shared random value
:var str reveal: base64 encoded commitment to the shared random value,
**None** of not provided
"""
class DocumentDigest(collections.namedtuple('DocumentDigest', ['flavor', 'algorithm', 'digest'])):
"""
Digest of a consensus document.
.. versionadded:: 1.8.0
:var str flavor: consensus type this digest is for (for example, 'microdesc')
:var str algorithm: hash algorithm used to make the digest
:var str digest: digest value of the consensus
"""
def _parse_file(document_file, document_type = None, validate = False, is_microdescriptor = False, document_handler = DocumentHandler.ENTRIES, **kwargs):
"""
Parses a network status and iterates over the RouterStatusEntry in it. The
document that these instances reference have an empty 'routers' attribute to
allow for limited memory usage.
:param file document_file: file with network status document content
:param class document_type: NetworkStatusDocument subclass
:param bool validate: checks the validity of the document's contents if
**True**, skips these checks otherwise
:param bool is_microdescriptor: **True** if this is for a microdescriptor
consensus, **False** otherwise
:param stem.descriptor.__init__.DocumentHandler document_handler: method in
which to parse :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
:param dict kwargs: additional arguments for the descriptor constructor
:returns: :class:`stem.descriptor.networkstatus.NetworkStatusDocument` object
:raises:
* **ValueError** if the document_version is unrecognized or the contents is
malformed and validate is **True**
* **IOError** if the file can't be read
"""
# we can't properly default this since NetworkStatusDocumentV3 isn't defined yet
if document_type is None:
document_type = NetworkStatusDocumentV3
if document_type == NetworkStatusDocumentV2:
document_type, router_type = NetworkStatusDocumentV2, RouterStatusEntryV2
elif document_type == NetworkStatusDocumentV3:
router_type = RouterStatusEntryMicroV3 if is_microdescriptor else RouterStatusEntryV3
elif document_type == BridgeNetworkStatusDocument:
document_type, router_type = BridgeNetworkStatusDocument, RouterStatusEntryBridgeV2
elif document_type == DetachedSignature:
yield document_type(document_file.read(), validate, **kwargs)
return
else:
raise ValueError("Document type %i isn't recognized (only able to parse v2, v3, and bridge)" % document_type)
if document_handler == DocumentHandler.DOCUMENT:
yield document_type(document_file.read(), validate, **kwargs)
return
# getting the document without the routers section
header = _read_until_keywords((ROUTERS_START, FOOTER_START, V2_FOOTER_START), document_file)
if header and header[0].startswith(b'@type'):
header = header[1:]
routers_start = document_file.tell()
_read_until_keywords((FOOTER_START, V2_FOOTER_START), document_file, skip = True)
routers_end = document_file.tell()
footer = document_file.readlines()
document_content = bytes.join(b'', header + footer)
if document_handler == DocumentHandler.BARE_DOCUMENT:
yield document_type(document_content, validate, **kwargs)
elif document_handler == DocumentHandler.ENTRIES:
desc_iterator = stem.descriptor.router_status_entry._parse_file(
document_file,
validate,
entry_class = router_type,
entry_keyword = ROUTERS_START,
start_position = routers_start,
end_position = routers_end,
extra_args = (document_type(document_content, validate),),
**kwargs
)
for desc in desc_iterator:
yield desc
else:
raise ValueError('Unrecognized document_handler: %s' % document_handler)
def _parse_file_key_certs(certificate_file, validate = False):
"""
Parses a file containing one or more authority key certificates.
:param file certificate_file: file with key certificates
:param bool validate: checks the validity of the certificate's contents if
**True**, skips these checks otherwise
:returns: iterator for :class:`stem.descriptor.networkstatus.KeyCertificate`
instances in the file
:raises:
* **ValueError** if the key certificates are invalid and validate is **True**
* **IOError** if the file can't be read
"""
while True:
keycert_content = _read_until_keywords('dir-key-certification', certificate_file)
# we've reached the 'router-signature', now include the pgp style block
block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0]
keycert_content += _read_until_keywords(block_end_prefix, certificate_file, True)
if keycert_content:
yield stem.descriptor.networkstatus.KeyCertificate(bytes.join(b'', keycert_content), validate = validate)
else:
break # done parsing file
def _parse_file_detached_sigs(detached_signature_file, validate = False):
"""
Parses a file containing one or more detached signatures.
:param file detached_signature_file: file with detached signatures
:param bool validate: checks the validity of the detached signature's
contents if **True**, skips these checks otherwise
:returns: iterator for :class:`stem.descriptor.networkstatus.DetachedSignature`
instances in the file
:raises:
* **ValueError** if the detached signatures are invalid and validate is **True**
* **IOError** if the file can't be read
"""
while True:
detached_sig_content = _read_until_keywords('consensus-digest', detached_signature_file, ignore_first = True)
if detached_sig_content:
yield stem.descriptor.networkstatus.DetachedSignature(bytes.join(b'', detached_sig_content), validate = validate)
else:
break # done parsing file
class NetworkStatusDocument(Descriptor):
"""
Common parent for network status documents.
"""
def digest(self, hash_type = DigestHash.SHA1, encoding = DigestEncoding.HEX):
"""
Digest of this descriptor's content. These are referenced by...
* **DetachedSignature**
* Referer: :class:`~stem.descriptor.networkstatus.DetachedSignature` **consensus_digest** attribute
* Format: **SHA1/HEX**
.. versionadded:: 1.8.0
:param stem.descriptor.DigestHash hash_type: digest hashing algorithm
:param stem.descriptor.DigestEncoding encoding: digest encoding
:returns: **hashlib.HASH** or **str** based on our encoding argument
"""
content = self._content_range(end = '\ndirectory-signature ')
if hash_type == DigestHash.SHA1:
return stem.descriptor._encode_digest(hashlib.sha1(content), encoding)
elif hash_type == DigestHash.SHA256:
return stem.descriptor._encode_digest(hashlib.sha256(content), encoding)
else:
raise NotImplementedError('Network status document digests are only available in sha1 and sha256, not %s' % hash_type)
def _parse_version_line(keyword, attribute, expected_version):
def _parse(descriptor, entries):
value = _value(keyword, entries)
if not value.isdigit():
raise ValueError('Document has a non-numeric version: %s %s' % (keyword, value))
setattr(descriptor, attribute, int(value))
if int(value) != expected_version:
raise ValueError("Expected a version %i document, but got version '%s' instead" % (expected_version, value))
return _parse
def _parse_dir_source_line(descriptor, entries):
value = _value('dir-source', entries)
dir_source_comp = value.split()
if len(dir_source_comp) < 3:
raise ValueError("The 'dir-source' line of a v2 network status document must have three values: dir-source %s" % value)
if not dir_source_comp[0]:
# https://trac.torproject.org/7055
raise ValueError("Authority's hostname can't be blank: dir-source %s" % value)
elif not stem.util.connection.is_valid_ipv4_address(dir_source_comp[1]):
raise ValueError("Authority's address isn't a valid IPv4 address: %s" % dir_source_comp[1])
elif not stem.util.connection.is_valid_port(dir_source_comp[2], allow_zero = True):
raise ValueError("Authority's DirPort is invalid: %s" % dir_source_comp[2])
descriptor.hostname = dir_source_comp[0]
descriptor.address = dir_source_comp[1]
descriptor.dir_port = None if dir_source_comp[2] == '0' else int(dir_source_comp[2])
def _parse_additional_digests(descriptor, entries):
digests = []
for val in _values('additional-digest', entries):
comp = val.split(' ')
if len(comp) < 3:
raise ValueError("additional-digest lines should be of the form 'additional-digest [flavor] [algname] [digest]' but was: %s" % val)
digests.append(DocumentDigest(*comp[:3]))
descriptor.additional_digests = digests
def _parse_additional_signatures(descriptor, entries):
signatures = []
for val, block_type, block_contents in entries['additional-signature']:
comp = val.split(' ')
if len(comp) < 4:
raise ValueError("additional-signature lines should be of the form 'additional-signature [flavor] [algname] [identity] [signing_key_digest]' but was: %s" % val)
elif not block_contents or block_type != 'SIGNATURE':
raise ValueError("'additional-signature' should be followed by a SIGNATURE block, but was a %s" % block_type)
signatures.append(DocumentSignature(comp[1], comp[2], comp[3], block_contents, flavor = comp[0], validate = True))
descriptor.additional_signatures = signatures
_parse_network_status_version_line = _parse_version_line('network-status-version', 'version', 2)
_parse_fingerprint_line = _parse_forty_character_hex('fingerprint', 'fingerprint')
_parse_contact_line = _parse_simple_line('contact', 'contact')
_parse_dir_signing_key_line = _parse_key_block('dir-signing-key', 'signing_key', 'RSA PUBLIC KEY')
_parse_client_versions_line = _parse_simple_line('client-versions', 'client_versions', func = lambda v: v.split(','))
_parse_server_versions_line = _parse_simple_line('server-versions', 'server_versions', func = lambda v: v.split(','))
_parse_published_line = _parse_timestamp_line('published', 'published')
_parse_dir_options_line = _parse_simple_line('dir-options', 'options', func = lambda v: v.split())
_parse_directory_signature_line = _parse_key_block('directory-signature', 'signature', 'SIGNATURE', value_attribute = 'signing_authority')
_parse_consensus_digest_line = _parse_simple_line('consensus-digest', 'consensus_digest')
class NetworkStatusDocumentV2(NetworkStatusDocument):
"""
Version 2 network status document. These have been deprecated and are no
longer generated by Tor.
:var dict routers: fingerprints to :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2`
contained in the document
:var int version: **\\*** document version
:var str hostname: **\\*** hostname of the authority
:var str address: **\\*** authority's IP address
:var int dir_port: **\\*** authority's DirPort
:var str fingerprint: **\\*** authority's fingerprint
:var str contact: **\\*** authority's contact information
:var str signing_key: **\\*** authority's public signing key
:var list client_versions: list of recommended client tor version strings
:var list server_versions: list of recommended server tor version strings
:var datetime published: **\\*** time when the document was published
:var list options: **\\*** list of things that this authority decides
:var str signing_authority: **\\*** name of the authority signing the document
:var str signature: **\\*** authority's signature for the document
**\\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
TYPE_ANNOTATION_NAME = 'network-status-2'
ATTRIBUTES = {
'version': (None, _parse_network_status_version_line),
'hostname': (None, _parse_dir_source_line),
'address': (None, _parse_dir_source_line),
'dir_port': (None, _parse_dir_source_line),
'fingerprint': (None, _parse_fingerprint_line),
'contact': (None, _parse_contact_line),
'signing_key': (None, _parse_dir_signing_key_line),
'client_versions': ([], _parse_client_versions_line),
'server_versions': ([], _parse_server_versions_line),
'published': (None, _parse_published_line),
'options': ([], _parse_dir_options_line),
'signing_authority': (None, _parse_directory_signature_line),
'signatures': (None, _parse_directory_signature_line),
}
PARSER_FOR_LINE = {
'network-status-version': _parse_network_status_version_line,
'dir-source': _parse_dir_source_line,
'fingerprint': _parse_fingerprint_line,
'contact': _parse_contact_line,
'dir-signing-key': _parse_dir_signing_key_line,
'client-versions': _parse_client_versions_line,
'server-versions': _parse_server_versions_line,
'published': _parse_published_line,
'dir-options': _parse_dir_options_line,
'directory-signature': _parse_directory_signature_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('network-status-version', '2'),
('dir-source', '%s %s 80' % (_random_ipv4_address(), _random_ipv4_address())),
('fingerprint', _random_fingerprint()),
('contact', 'arma at mit dot edu'),
('published', _random_date()),
('dir-signing-key', _random_crypto_blob('RSA PUBLIC KEY')),
), (
('directory-signature', 'moria2' + _random_crypto_blob('SIGNATURE')),
))
def __init__(self, raw_content, validate = False):
super(NetworkStatusDocumentV2, self).__init__(raw_content, lazy_load = not validate)
# Splitting the document from the routers. Unlike v3 documents we're not
# bending over backwards on the validation by checking the field order or
# that header/footer attributes aren't in the wrong section. This is a
# deprecated descriptor type - patches welcome if you want those checks.
document_file = io.BytesIO(raw_content)
document_content = bytes.join(b'', _read_until_keywords((ROUTERS_START, V2_FOOTER_START), document_file))
router_iter = stem.descriptor.router_status_entry._parse_file(
document_file,
validate,
entry_class = RouterStatusEntryV2,
entry_keyword = ROUTERS_START,
section_end_keywords = (V2_FOOTER_START,),
extra_args = (self,),
)
self.routers = dict((desc.fingerprint, desc) for desc in router_iter)
entries = _descriptor_components(document_content + b'\n' + document_file.read(), validate)
if validate:
self._check_constraints(entries)
self._parse(entries, validate)
# 'client-versions' and 'server-versions' are only required if 'Versions'
# is among the options
if 'Versions' in self.options and not ('client-versions' in entries and 'server-versions' in entries):
raise ValueError("Version 2 network status documents must have a 'client-versions' and 'server-versions' when 'Versions' is listed among its dir-options:\n%s" % str(self))
else:
self._entries = entries
def _check_constraints(self, entries):
required_fields = [field for (field, is_mandatory) in NETWORK_STATUS_V2_FIELDS if is_mandatory]
for keyword in required_fields:
if keyword not in entries:
raise ValueError("Network status document (v2) must have a '%s' line:\n%s" % (keyword, str(self)))
# all recognized fields can only appear once
single_fields = [field for (field, _) in NETWORK_STATUS_V2_FIELDS]
for keyword in single_fields:
if keyword in entries and len(entries[keyword]) > 1:
raise ValueError("Network status document (v2) can only have a single '%s' line, got %i:\n%s" % (keyword, len(entries[keyword]), str(self)))
if 'network-status-version' != list(entries.keys())[0]:
raise ValueError("Network status document (v2) are expected to start with a 'network-status-version' line:\n%s" % str(self))
def _parse_header_network_status_version_line(descriptor, entries):
# "network-status-version" version
value = _value('network-status-version', entries)
if ' ' in value:
version, flavor = value.split(' ', 1)
else:
version, flavor = value, 'ns'
if not version.isdigit():
raise ValueError('Network status document has a non-numeric version: network-status-version %s' % value)
descriptor.version = int(version)
descriptor.version_flavor = flavor
descriptor.is_microdescriptor = flavor == 'microdesc'
if descriptor.version != 3:
raise ValueError("Expected a version 3 network status document, got version '%s' instead" % descriptor.version)
def _parse_header_vote_status_line(descriptor, entries):
# "vote-status" type
#
# The consensus-method and consensus-methods fields are optional since
# they weren't included in version 1. Setting a default now that we
# know if we're a vote or not.
value = _value('vote-status', entries)
if value == 'consensus':
descriptor.is_consensus, descriptor.is_vote = True, False
elif value == 'vote':
descriptor.is_consensus, descriptor.is_vote = False, True
else:
raise ValueError("A network status document's vote-status line can only be 'consensus' or 'vote', got '%s' instead" % value)
def _parse_header_consensus_methods_line(descriptor, entries):
# "consensus-methods" IntegerList
if descriptor._lazy_loading and descriptor.is_vote:
descriptor.consensus_methods = [1]
value, consensus_methods = _value('consensus-methods', entries), []
for entry in value.split(' '):
if not entry.isdigit():
raise ValueError("A network status document's consensus-methods must be a list of integer values, but was '%s'" % value)
consensus_methods.append(int(entry))
descriptor.consensus_methods = consensus_methods
def _parse_header_consensus_method_line(descriptor, entries):
# "consensus-method" Integer
if descriptor._lazy_loading and descriptor.is_consensus:
descriptor.consensus_method = 1
value = _value('consensus-method', entries)
if not value.isdigit():
raise ValueError("A network status document's consensus-method must be an integer, but was '%s'" % value)
descriptor.consensus_method = int(value)
def _parse_header_voting_delay_line(descriptor, entries):
# "voting-delay" VoteSeconds DistSeconds
value = _value('voting-delay', entries)
value_comp = value.split(' ')
if len(value_comp) == 2 and value_comp[0].isdigit() and value_comp[1].isdigit():
descriptor.vote_delay = int(value_comp[0])
descriptor.dist_delay = int(value_comp[1])
else:
raise ValueError("A network status document's 'voting-delay' line must be a pair of integer values, but was '%s'" % value)
def _parse_versions_line(keyword, attribute):
def _parse(descriptor, entries):
value, entries = _value(keyword, entries), []
for entry in value.split(','):
try:
entries.append(stem.version._get_version(entry))
except ValueError:
raise ValueError("Network status document's '%s' line had '%s', which isn't a parsable tor version: %s %s" % (keyword, entry, keyword, value))
setattr(descriptor, attribute, entries)
return _parse
def _parse_header_flag_thresholds_line(descriptor, entries):
# "flag-thresholds" SP THRESHOLDS
value, thresholds = _value('flag-thresholds', entries).strip(), {}
for key, val in _mappings_for('flag-thresholds', value):
try:
if val.endswith('%'):
# opting for string manipulation rather than just
# 'float(entry_value) / 100' because floating point arithmetic
# will lose precision
thresholds[key] = float('0.' + val[:-1].replace('.', '', 1))
elif '.' in val:
thresholds[key] = float(val)
else:
thresholds[key] = int(val)
except ValueError:
raise ValueError("Network status document's 'flag-thresholds' line is expected to have float values, got: flag-thresholds %s" % value)
descriptor.flag_thresholds = thresholds
def _parse_header_parameters_line(descriptor, entries):
# "params" [Parameters]
# Parameter ::= Keyword '=' Int32
# Int32 ::= A decimal integer between -2147483648 and 2147483647.
# Parameters ::= Parameter | Parameters SP Parameter
if descriptor._lazy_loading:
descriptor.params = dict(DEFAULT_PARAMS) if descriptor._default_params else {}
value = _value('params', entries)
if value != '':
descriptor.params = _parse_int_mappings('params', value, True)
descriptor._check_params_constraints()
def _parse_directory_footer_line(descriptor, entries):
# nothing to parse, simply checking that we don't have a value
value = _value('directory-footer', entries)
if value:
raise ValueError("A network status document's 'directory-footer' line shouldn't have any content, got 'directory-footer %s'" % value)
def _parse_footer_directory_signature_line(descriptor, entries):
signatures = []
for sig_value, block_type, block_contents in entries['directory-signature']:
if sig_value.count(' ') not in (1, 2):
raise ValueError("Authority signatures in a network status document are expected to be of the form 'directory-signature [METHOD] FINGERPRINT KEY_DIGEST', received: %s" % sig_value)
if not block_contents or block_type != 'SIGNATURE':
raise ValueError("'directory-signature' should be followed by a SIGNATURE block, but was a %s" % block_type)
if sig_value.count(' ') == 1:
method = 'sha1' # default if none was provided
fingerprint, key_digest = sig_value.split(' ', 1)
else:
method, fingerprint, key_digest = sig_value.split(' ', 2)
signatures.append(DocumentSignature(method, fingerprint, key_digest, block_contents, validate = True))
descriptor.signatures = signatures
def _parse_package_line(descriptor, entries):
package_versions = []
for value, _, _ in entries['package']:
value_comp = value.split(' ', 3)
if len(value_comp) < 3:
raise ValueError("'package' must at least have a 'PackageName Version URL': %s" % value)
name, version, url = value_comp[:3]
digests = {}
if len(value_comp) == 4:
for key, val in _mappings_for('package', value_comp[3]):
digests[key] = val
package_versions.append(PackageVersion(name, version, url, digests))
descriptor.packages = package_versions
def _parsed_shared_rand_commit(descriptor, entries):
# "shared-rand-commit" Version AlgName Identity Commit [Reveal]
commitments = []
for value, _, _ in entries['shared-rand-commit']:
value_comp = value.split()
if len(value_comp) < 4:
raise ValueError("'shared-rand-commit' must at least have a 'Version AlgName Identity Commit': %s" % value)
version, algorithm, identity, commit = value_comp[:4]
reveal = value_comp[4] if len(value_comp) >= 5 else None
if not version.isdigit():
raise ValueError("The version on our 'shared-rand-commit' line wasn't an integer: %s" % value)
commitments.append(SharedRandomnessCommitment(int(version), algorithm, identity, commit, reveal))
descriptor.shared_randomness_commitments = commitments
def _parse_shared_rand_previous_value(descriptor, entries):
# "shared-rand-previous-value" NumReveals Value
value = _value('shared-rand-previous-value', entries)
value_comp = value.split(' ')
if len(value_comp) == 2 and value_comp[0].isdigit():
descriptor.shared_randomness_previous_reveal_count = int(value_comp[0])
descriptor.shared_randomness_previous_value = value_comp[1]
else:
raise ValueError("A network status document's 'shared-rand-previous-value' line must be a pair of values, the first an integer but was '%s'" % value)
def _parse_shared_rand_current_value(descriptor, entries):
# "shared-rand-current-value" NumReveals Value
value = _value('shared-rand-current-value', entries)
value_comp = value.split(' ')
if len(value_comp) == 2 and value_comp[0].isdigit():
descriptor.shared_randomness_current_reveal_count = int(value_comp[0])
descriptor.shared_randomness_current_value = value_comp[1]
else:
raise ValueError("A network status document's 'shared-rand-current-value' line must be a pair of values, the first an integer but was '%s'" % value)
def _parse_bandwidth_file_headers(descriptor, entries):
# "bandwidth-file-headers" KeyValues
# KeyValues ::= "" | KeyValue | KeyValues SP KeyValue
# KeyValue ::= Keyword '=' Value
# Value ::= ArgumentChar+
value = _value('bandwidth-file-headers', entries)
results = {}
for key, val in _mappings_for('bandwidth-file-headers', value):
results[key] = val
descriptor.bandwidth_file_headers = results
def _parse_bandwidth_file_digest(descriptor, entries):
# "bandwidth-file-digest" 1*(SP algorithm "=" digest)
value = _value('bandwidth-file-digest', entries)
results = {}
for key, val in _mappings_for('bandwidth-file-digest', value):
results[key] = val
descriptor.bandwidth_file_digest = results
_parse_header_valid_after_line = _parse_timestamp_line('valid-after', 'valid_after')
_parse_header_fresh_until_line = _parse_timestamp_line('fresh-until', 'fresh_until')
_parse_header_valid_until_line = _parse_timestamp_line('valid-until', 'valid_until')
_parse_header_client_versions_line = _parse_versions_line('client-versions', 'client_versions')
_parse_header_server_versions_line = _parse_versions_line('server-versions', 'server_versions')
_parse_header_known_flags_line = _parse_simple_line('known-flags', 'known_flags', func = lambda v: [entry for entry in v.split(' ') if entry])
_parse_footer_bandwidth_weights_line = _parse_simple_line('bandwidth-weights', 'bandwidth_weights', func = lambda v: _parse_int_mappings('bandwidth-weights', v, True))
_parse_shared_rand_participate_line = _parse_if_present('shared-rand-participate', 'is_shared_randomness_participate')
_parse_recommended_client_protocols_line = _parse_protocol_line('recommended-client-protocols', 'recommended_client_protocols')
_parse_recommended_relay_protocols_line = _parse_protocol_line('recommended-relay-protocols', 'recommended_relay_protocols')
_parse_required_client_protocols_line = _parse_protocol_line('required-client-protocols', 'required_client_protocols')
_parse_required_relay_protocols_line = _parse_protocol_line('required-relay-protocols', 'required_relay_protocols')
class NetworkStatusDocumentV3(NetworkStatusDocument):
"""
Version 3 network status document. This could be either a vote or consensus.
:var dict routers: fingerprint to :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
mapping for relays contained in the document
:var int version: **\\*** document version
:var str version_flavor: **\\*** flavor associated with the document (such as 'ns' or 'microdesc')
:var bool is_consensus: **\\*** **True** if the document is a consensus
:var bool is_vote: **\\*** **True** if the document is a vote
:var bool is_microdescriptor: **\\*** **True** if this is a microdescriptor
flavored document, **False** otherwise
:var datetime valid_after: **\\*** time when the consensus became valid
:var datetime fresh_until: **\\*** time when the next consensus should be produced
:var datetime valid_until: **\\*** time when this consensus becomes obsolete
:var int vote_delay: **\\*** number of seconds allowed for collecting votes
from all authorities
:var int dist_delay: **\\*** number of seconds allowed for collecting
signatures from all authorities
:var list client_versions: list of recommended client tor versions
:var list server_versions: list of recommended server tor versions
:var list packages: **\\*** list of :data:`~stem.descriptor.networkstatus.PackageVersion` entries
:var list known_flags: **\\*** list of :data:`~stem.Flag` for the router's flags
:var dict params: **\\*** dict of parameter(**str**) => value(**int**) mappings
:var list directory_authorities: **\\*** list of :class:`~stem.descriptor.networkstatus.DirectoryAuthority`
objects that have generated this document
:var list signatures: **\\*** :class:`~stem.descriptor.networkstatus.DocumentSignature`
of the authorities that have signed the document
**Consensus Attributes:**
:var int consensus_method: method version used to generate this consensus
:var dict bandwidth_weights: dict of weight(str) => value(int) mappings
:var int shared_randomness_current_reveal_count: number of commitments
used to generate the current shared random value
:var str shared_randomness_current_value: base64 encoded current shared
random value
:var int shared_randomness_previous_reveal_count: number of commitments
used to generate the last shared random value
:var str shared_randomness_previous_value: base64 encoded last shared random
value
**Vote Attributes:**
:var list consensus_methods: list of ints for the supported method versions
:var datetime published: time when the document was published
:var dict flag_thresholds: **\\*** mapping of internal performance thresholds used while making the vote, values are **ints** or **floats**
:var dict recommended_client_protocols: recommended protocols for clients
:var dict recommended_relay_protocols: recommended protocols for relays
:var dict required_client_protocols: required protocols for clients
:var dict required_relay_protocols: required protocols for relays
:var dict bandwidth_file_headers: headers from the bandwidth authority that
generated this vote
:var dict bandwidth_file_digest: hashes of the bandwidth authority file used
to generate this vote, this is a mapping of hash functions to their resulting
digest value
**\\*** attribute is either required when we're parsed with validation or has
a default value, others are left as None if undefined
.. versionchanged:: 1.4.0
Added the packages attribute.
.. versionchanged:: 1.5.0
Added the is_shared_randomness_participate, shared_randomness_commitments,
shared_randomness_previous_reveal_count,
shared_randomness_previous_value,
shared_randomness_current_reveal_count, and
shared_randomness_current_value attributes.
.. versionchanged:: 1.6.0
Added the recommended_client_protocols, recommended_relay_protocols,
required_client_protocols, and required_relay_protocols attributes.
.. versionchanged:: 1.6.0
The is_shared_randomness_participate and shared_randomness_commitments
were misdocumented in the tor spec and as such never set. They're now an
attribute of votes in the **directory_authorities**.
.. versionchanged:: 1.7.0
The shared_randomness_current_reveal_count and
shared_randomness_previous_reveal_count attributes were undocumented and
not provided properly if retrieved before their shred_randomness_*_value
counterpart.
.. versionchanged:: 1.7.0
Added the bandwidth_file_headers attributbute.
.. versionchanged:: 1.8.0
Added the bandwidth_file_digest attributbute.
"""
ATTRIBUTES = {
'version': (None, _parse_header_network_status_version_line),
'version_flavor': ('ns', _parse_header_network_status_version_line),
'is_consensus': (True, _parse_header_vote_status_line),
'is_vote': (False, _parse_header_vote_status_line),
'is_microdescriptor': (False, _parse_header_network_status_version_line),
'consensus_methods': ([], _parse_header_consensus_methods_line),
'published': (None, _parse_published_line),
'consensus_method': (None, _parse_header_consensus_method_line),
'valid_after': (None, _parse_header_valid_after_line),
'fresh_until': (None, _parse_header_fresh_until_line),
'valid_until': (None, _parse_header_valid_until_line),
'vote_delay': (None, _parse_header_voting_delay_line),
'dist_delay': (None, _parse_header_voting_delay_line),
'client_versions': ([], _parse_header_client_versions_line),
'server_versions': ([], _parse_header_server_versions_line),
'packages': ([], _parse_package_line),
'known_flags': ([], _parse_header_known_flags_line),
'flag_thresholds': ({}, _parse_header_flag_thresholds_line),
'recommended_client_protocols': ({}, _parse_recommended_client_protocols_line),
'recommended_relay_protocols': ({}, _parse_recommended_relay_protocols_line),
'required_client_protocols': ({}, _parse_required_client_protocols_line),
'required_relay_protocols': ({}, _parse_required_relay_protocols_line),
'params': ({}, _parse_header_parameters_line),
'shared_randomness_previous_reveal_count': (None, _parse_shared_rand_previous_value),
'shared_randomness_previous_value': (None, _parse_shared_rand_previous_value),
'shared_randomness_current_reveal_count': (None, _parse_shared_rand_current_value),
'shared_randomness_current_value': (None, _parse_shared_rand_current_value),
'bandwidth_file_headers': ({}, _parse_bandwidth_file_headers),
'bandwidth_file_digest': ({}, _parse_bandwidth_file_digest),
'signatures': ([], _parse_footer_directory_signature_line),
'bandwidth_weights': ({}, _parse_footer_bandwidth_weights_line),
}
_HEADER_PARSER_FOR_LINE = {
'network-status-version': _parse_header_network_status_version_line,
'vote-status': _parse_header_vote_status_line,
'consensus-methods': _parse_header_consensus_methods_line,
'consensus-method': _parse_header_consensus_method_line,
'published': _parse_published_line,
'valid-after': _parse_header_valid_after_line,
'fresh-until': _parse_header_fresh_until_line,
'valid-until': _parse_header_valid_until_line,
'voting-delay': _parse_header_voting_delay_line,
'client-versions': _parse_header_client_versions_line,
'server-versions': _parse_header_server_versions_line,
'package': _parse_package_line,
'known-flags': _parse_header_known_flags_line,
'flag-thresholds': _parse_header_flag_thresholds_line,
'recommended-client-protocols': _parse_recommended_client_protocols_line,
'recommended-relay-protocols': _parse_recommended_relay_protocols_line,
'required-client-protocols': _parse_required_client_protocols_line,
'required-relay-protocols': _parse_required_relay_protocols_line,
'params': _parse_header_parameters_line,
'shared-rand-previous-value': _parse_shared_rand_previous_value,
'shared-rand-current-value': _parse_shared_rand_current_value,
'bandwidth-file-headers': _parse_bandwidth_file_headers,
'bandwidth-file-digest': _parse_bandwidth_file_digest,
}
_FOOTER_PARSER_FOR_LINE = {
'directory-footer': _parse_directory_footer_line,
'bandwidth-weights': _parse_footer_bandwidth_weights_line,
'directory-signature': _parse_footer_directory_signature_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False, authorities = None, routers = None):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
attr = {} if attr is None else dict(attr)
is_vote = attr.get('vote-status') == 'vote'
if is_vote:
extra_defaults = {'consensus-methods': '1 9', 'published': _random_date()}
else:
extra_defaults = {'consensus-method': '9'}
if is_vote and authorities is None:
authorities = [DirectoryAuthority.create(is_vote = is_vote)]
for k, v in extra_defaults.items():
if exclude and k in exclude:
continue # explicitly excluding this field
elif k not in attr:
attr[k] = v
desc_content = _descriptor_content(attr, exclude, (
('network-status-version', '3'),
('vote-status', 'consensus'),
('consensus-methods', None),
('consensus-method', None),
('published', None),
('valid-after', _random_date()),
('fresh-until', _random_date()),
('valid-until', _random_date()),
('voting-delay', '300 300'),
('client-versions', None),
('server-versions', None),
('package', None),
('known-flags', 'Authority BadExit Exit Fast Guard HSDir Named Running Stable Unnamed V2Dir Valid'),
('params', None),
), (
('directory-footer', ''),
('bandwidth-weights', None),
('directory-signature', '%s %s%s' % (_random_fingerprint(), _random_fingerprint(), _random_crypto_blob('SIGNATURE'))),
))
# inject the authorities and/or routers between the header and footer
if authorities:
if b'directory-footer' in desc_content:
footer_div = desc_content.find(b'\ndirectory-footer') + 1
elif b'directory-signature' in desc_content:
footer_div = desc_content.find(b'\ndirectory-signature') + 1
else:
if routers:
desc_content += b'\n'
footer_div = len(desc_content) + 1
authority_content = stem.util.str_tools._to_bytes('\n'.join([str(a) for a in authorities]) + '\n')
desc_content = desc_content[:footer_div] + authority_content + desc_content[footer_div:]
if routers:
if b'directory-footer' in desc_content:
footer_div = desc_content.find(b'\ndirectory-footer') + 1
elif b'directory-signature' in desc_content:
footer_div = desc_content.find(b'\ndirectory-signature') + 1
else:
if routers:
desc_content += b'\n'
footer_div = len(desc_content) + 1
router_content = stem.util.str_tools._to_bytes('\n'.join([str(r) for r in routers]) + '\n')
desc_content = desc_content[:footer_div] + router_content + desc_content[footer_div:]
return desc_content
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, authorities = None, routers = None):
return cls(cls.content(attr, exclude, sign, authorities, routers), validate = validate)
def __init__(self, raw_content, validate = False, default_params = True):
"""
Parse a v3 network status document.
:param str raw_content: raw network status document data
:param bool validate: **True** if the document is to be validated, **False** otherwise
:param bool default_params: includes defaults in our params dict, otherwise
it just contains values from the document
:raises: **ValueError** if the document is invalid
"""
super(NetworkStatusDocumentV3, self).__init__(raw_content, lazy_load = not validate)
document_file = io.BytesIO(raw_content)
# TODO: Tor misdocumented these as being in the header rather than the
# authority section. As such these have never been set but we need the
# attributes for stem 1.5 compatability. Drop these in 2.0.
self.is_shared_randomness_participate = False
self.shared_randomness_commitments = []
self._default_params = default_params
self._header(document_file, validate)
self.directory_authorities = tuple(stem.descriptor.router_status_entry._parse_file(
document_file,
validate,
entry_class = DirectoryAuthority,
entry_keyword = AUTH_START,
section_end_keywords = (ROUTERS_START, FOOTER_START, V2_FOOTER_START),
extra_args = (self.is_vote,),
))
if validate and self.is_vote and len(self.directory_authorities) != 1:
raise ValueError('Votes should only have an authority entry for the one that issued it, got %i: %s' % (len(self.directory_authorities), self.directory_authorities))
router_iter = stem.descriptor.router_status_entry._parse_file(
document_file,
validate,
entry_class = RouterStatusEntryMicroV3 if self.is_microdescriptor else RouterStatusEntryV3,
entry_keyword = ROUTERS_START,
section_end_keywords = (FOOTER_START, V2_FOOTER_START),
extra_args = (self,),
)
self.routers = dict((desc.fingerprint, desc) for desc in router_iter)
self._footer(document_file, validate)
def type_annotation(self):
if isinstance(self, BridgeNetworkStatusDocument):
return TypeAnnotation('bridge-network-status', 1, 0)
elif not self.is_microdescriptor:
return TypeAnnotation('network-status-consensus-3' if not self.is_vote else 'network-status-vote-3', 1, 0)
else:
# Directory authorities do not issue a 'microdescriptor consensus' vote,
# so unlike the above there isn't a 'network-status-microdesc-vote-3'
# counterpart here.
return TypeAnnotation('network-status-microdesc-consensus-3', 1, 0)
def is_valid(self):
"""
Checks if the current time is between this document's **valid_after** and
**valid_until** timestamps. To be valid means the information within this
document reflects the current network state.
.. versionadded:: 1.8.0
:returns: **True** if this consensus is presently valid and **False**
otherwise
"""
return self.valid_after < datetime.datetime.utcnow() < self.valid_until
def is_fresh(self):
"""
Checks if the current time is between this document's **valid_after** and
**fresh_until** timestamps. To be fresh means this should be the latest
consensus.
.. versionadded:: 1.8.0
:returns: **True** if this consensus is presently fresh and **False**
otherwise
"""
return self.valid_after < datetime.datetime.utcnow() < self.fresh_until
def validate_signatures(self, key_certs):
"""
Validates we're properly signed by the signing certificates.
.. versionadded:: 1.6.0
:param list key_certs: :class:`~stem.descriptor.networkstatus.KeyCertificates`
to validate the consensus against
:raises: **ValueError** if an insufficient number of valid signatures are present.
"""
# sha1 hash of the body and header
digest_content = self._content_range('network-status-version', 'directory-signature ')
local_digest = hashlib.sha1(digest_content).hexdigest().upper()
valid_digests, total_digests = 0, 0
required_digests = len(self.signatures) / 2.0
signing_keys = dict([(cert.fingerprint, cert.signing_key) for cert in key_certs])
for sig in self.signatures:
if sig.identity not in signing_keys:
continue
signed_digest = self._digest_for_signature(signing_keys[sig.identity], sig.signature)
total_digests += 1
if signed_digest == local_digest:
valid_digests += 1
if valid_digests < required_digests:
raise ValueError('Network Status Document has %i valid signatures out of %i total, needed %i' % (valid_digests, total_digests, required_digests))
def get_unrecognized_lines(self):
if self._lazy_loading:
self._parse(self._header_entries, False, parser_for_line = self._HEADER_PARSER_FOR_LINE)
self._parse(self._footer_entries, False, parser_for_line = self._FOOTER_PARSER_FOR_LINE)
self._lazy_loading = False
return super(NetworkStatusDocumentV3, self).get_unrecognized_lines()
def meets_consensus_method(self, method):
"""
Checks if we meet the given consensus-method. This works for both votes and
consensuses, checking our 'consensus-method' and 'consensus-methods'
entries.
:param int method: consensus-method to check for
:returns: **True** if we meet the given consensus-method, and **False** otherwise
"""
if self.consensus_method is not None:
return self.consensus_method >= method
elif self.consensus_methods is not None:
return bool([x for x in self.consensus_methods if x >= method])
else:
return False # malformed document
def _header(self, document_file, validate):
content = bytes.join(b'', _read_until_keywords((AUTH_START, ROUTERS_START, FOOTER_START), document_file))
entries = _descriptor_components(content, validate)
header_fields = [attr[0] for attr in HEADER_STATUS_DOCUMENT_FIELDS]
if validate:
# all known header fields can only appear once except
for keyword, values in list(entries.items()):
if len(values) > 1 and keyword in header_fields and keyword != 'package' and keyword != 'shared-rand-commit':
raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
if self._default_params:
self.params = dict(DEFAULT_PARAMS)
self._parse(entries, validate, parser_for_line = self._HEADER_PARSER_FOR_LINE)
# should only appear in consensus-method 7 or later
if not self.meets_consensus_method(7) and 'params' in list(entries.keys()):
raise ValueError("A network status document's 'params' line should only appear in consensus-method 7 or later")
_check_for_missing_and_disallowed_fields(self, entries, HEADER_STATUS_DOCUMENT_FIELDS)
# default consensus_method and consensus_methods based on if we're a consensus or vote
if self.is_consensus and not self.consensus_method:
self.consensus_method = 1
elif self.is_vote and not self.consensus_methods:
self.consensus_methods = [1]
else:
self._header_entries = entries
self._entries.update(entries)
def _footer(self, document_file, validate):
entries = _descriptor_components(document_file.read(), validate)
footer_fields = [attr[0] for attr in FOOTER_STATUS_DOCUMENT_FIELDS]
if validate:
for keyword, values in list(entries.items()):
# all known footer fields can only appear once except...
# * 'directory-signature' in a consensus
if len(values) > 1 and keyword in footer_fields:
if not (keyword == 'directory-signature' and self.is_consensus):
raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
self._parse(entries, validate, parser_for_line = self._FOOTER_PARSER_FOR_LINE)
# Check that the footer has the right initial line. Prior to consensus
# method 9 it's a 'directory-signature' and after that footers start with
# 'directory-footer'.
if entries:
if self.meets_consensus_method(9):
if list(entries.keys())[0] != 'directory-footer':
raise ValueError("Network status document's footer should start with a 'directory-footer' line in consensus-method 9 or later")
else:
if list(entries.keys())[0] != 'directory-signature':
raise ValueError("Network status document's footer should start with a 'directory-signature' line prior to consensus-method 9")
_check_for_missing_and_disallowed_fields(self, entries, FOOTER_STATUS_DOCUMENT_FIELDS)
else:
self._footer_entries = entries
self._entries.update(entries)
def _check_params_constraints(self):
"""
Checks that the params we know about are within their documented ranges.
"""
for key, value in self.params.items():
minimum, maximum = PARAM_RANGE.get(key, (MIN_PARAM, MAX_PARAM))
# there's a few dynamic parameter ranges
if key == 'cbtclosequantile':
minimum = self.params.get('cbtquantile', minimum)
elif key == 'cbtinitialtimeout':
minimum = self.params.get('cbtmintimeout', minimum)
if value < minimum or value > maximum:
raise ValueError("'%s' value on the params line must be in the range of %i - %i, was %i" % (key, minimum, maximum, value))
def _check_for_missing_and_disallowed_fields(document, entries, fields):
"""
Checks that we have mandatory fields for our type, and that we don't have
any fields exclusive to the other (ie, no vote-only fields appear in a
consensus or vice versa).
:param NetworkStatusDocumentV3 document: network status document
:param dict entries: ordered keyword/value mappings of the header or footer
:param list fields: expected field attributes (either
**HEADER_STATUS_DOCUMENT_FIELDS** or **FOOTER_STATUS_DOCUMENT_FIELDS**)
:raises: **ValueError** if we're missing mandatory fields or have fields we shouldn't
"""
missing_fields, disallowed_fields = [], []
for field, in_votes, in_consensus, mandatory in fields:
if mandatory and ((document.is_consensus and in_consensus) or (document.is_vote and in_votes)):
# mandatory field, check that we have it
if field not in entries.keys():
missing_fields.append(field)
elif (document.is_consensus and not in_consensus) or (document.is_vote and not in_votes):
# field we shouldn't have, check that we don't
if field in entries.keys():
disallowed_fields.append(field)
if missing_fields:
raise ValueError('Network status document is missing mandatory field: %s' % ', '.join(missing_fields))
if disallowed_fields:
raise ValueError("Network status document has fields that shouldn't appear in this document type or version: %s" % ', '.join(disallowed_fields))
def _parse_int_mappings(keyword, value, validate):
# Parse a series of 'key=value' entries, checking the following:
# - values are integers
# - keys are sorted in lexical order
results, seen_keys = {}, []
error_template = "Unable to parse network status document's '%s' line (%%s): %s'" % (keyword, value)
for key, val in _mappings_for(keyword, value):
if validate:
# parameters should be in ascending order by their key
for prior_key in seen_keys:
if prior_key > key:
raise ValueError(error_template % 'parameters must be sorted by their key')
try:
# the int() function accepts things like '+123', but we don't want to
if val.startswith('+'):
raise ValueError()
results[key] = int(val)
except ValueError:
raise ValueError(error_template % ("'%s' is a non-numeric value" % val))
seen_keys.append(key)
return results
def _parse_dirauth_source_line(descriptor, entries):
# "dir-source" nickname identity address IP dirport orport
value = _value('dir-source', entries)
dir_source_comp = value.split(' ')
if len(dir_source_comp) < 6:
raise ValueError("Authority entry's 'dir-source' line must have six values: dir-source %s" % value)
if not stem.util.tor_tools.is_valid_nickname(dir_source_comp[0].rstrip('-legacy')):
raise ValueError("Authority's nickname is invalid: %s" % dir_source_comp[0])
elif not stem.util.tor_tools.is_valid_fingerprint(dir_source_comp[1]):
raise ValueError("Authority's v3ident is invalid: %s" % dir_source_comp[1])
elif not dir_source_comp[2]:
# https://trac.torproject.org/7055
raise ValueError("Authority's hostname can't be blank: dir-source %s" % value)
elif not stem.util.connection.is_valid_ipv4_address(dir_source_comp[3]):
raise ValueError("Authority's address isn't a valid IPv4 address: %s" % dir_source_comp[3])
elif not stem.util.connection.is_valid_port(dir_source_comp[4], allow_zero = True):
raise ValueError("Authority's DirPort is invalid: %s" % dir_source_comp[4])
elif not stem.util.connection.is_valid_port(dir_source_comp[5]):
raise ValueError("Authority's ORPort is invalid: %s" % dir_source_comp[5])
descriptor.nickname = dir_source_comp[0]
descriptor.v3ident = dir_source_comp[1]
descriptor.hostname = dir_source_comp[2]
descriptor.address = dir_source_comp[3]
descriptor.dir_port = None if dir_source_comp[4] == '0' else int(dir_source_comp[4])
descriptor.or_port = int(dir_source_comp[5])
descriptor.is_legacy = descriptor.nickname.endswith('-legacy')
_parse_legacy_dir_key_line = _parse_forty_character_hex('legacy-dir-key', 'legacy_dir_key')
_parse_vote_digest_line = _parse_forty_character_hex('vote-digest', 'vote_digest')
class DirectoryAuthority(Descriptor):
"""
Directory authority information obtained from a v3 network status document.
Authorities can optionally use a legacy format. These are no longer found in
practice, but have the following differences...
* The authority's nickname ends with '-legacy'.
* There's no **contact** or **vote_digest** attribute.
:var str nickname: **\\*** authority's nickname
:var str v3ident: **\\*** identity key fingerprint used to sign votes and consensus
:var str hostname: **\\*** hostname of the authority
:var str address: **\\*** authority's IP address
:var int dir_port: **\\*** authority's DirPort
:var int or_port: **\\*** authority's ORPort
:var bool is_legacy: **\\*** if the authority's using the legacy format
:var str contact: contact information, this is included if is_legacy is **False**
**Consensus Attributes:**
:var str vote_digest: digest of the authority that contributed to the consensus, this is included if is_legacy is **False**
**Vote Attributes:**
:var str legacy_dir_key: fingerprint of and obsolete identity key
:var stem.descriptor.networkstatus.KeyCertificate key_certificate: **\\***
authority's key certificate
:var bool is_shared_randomness_participate: **\\*** **True** if this authority
participates in establishing a shared random value, **False** otherwise
:var list shared_randomness_commitments: **\\*** list of
:data:`~stem.descriptor.networkstatus.SharedRandomnessCommitment` entries
:var int shared_randomness_previous_reveal_count: number of commitments
used to generate the last shared random value
:var str shared_randomness_previous_value: base64 encoded last shared random
value
:var int shared_randomness_current_reveal_count: number of commitments
used to generate the current shared random value
:var str shared_randomness_current_value: base64 encoded current shared
random value
**\\*** mandatory attribute
.. versionchanged:: 1.4.0
Renamed our 'fingerprint' attribute to 'v3ident' (prior attribute exists
for backward compatability, but is deprecated).
.. versionchanged:: 1.6.0
Added the is_shared_randomness_participate, shared_randomness_commitments,
shared_randomness_previous_reveal_count,
shared_randomness_previous_value,
shared_randomness_current_reveal_count, and
shared_randomness_current_value attributes.
"""
ATTRIBUTES = {
'nickname': (None, _parse_dirauth_source_line),
'v3ident': (None, _parse_dirauth_source_line),
'hostname': (None, _parse_dirauth_source_line),
'address': (None, _parse_dirauth_source_line),
'dir_port': (None, _parse_dirauth_source_line),
'or_port': (None, _parse_dirauth_source_line),
'is_legacy': (False, _parse_dirauth_source_line),
'contact': (None, _parse_contact_line),
'vote_digest': (None, _parse_vote_digest_line),
'legacy_dir_key': (None, _parse_legacy_dir_key_line),
'is_shared_randomness_participate': (False, _parse_shared_rand_participate_line),
'shared_randomness_commitments': ([], _parsed_shared_rand_commit),
'shared_randomness_previous_reveal_count': (None, _parse_shared_rand_previous_value),
'shared_randomness_previous_value': (None, _parse_shared_rand_previous_value),
'shared_randomness_current_reveal_count': (None, _parse_shared_rand_current_value),
'shared_randomness_current_value': (None, _parse_shared_rand_current_value),
}
PARSER_FOR_LINE = {
'dir-source': _parse_dirauth_source_line,
'contact': _parse_contact_line,
'legacy-dir-key': _parse_legacy_dir_key_line,
'vote-digest': _parse_vote_digest_line,
'shared-rand-participate': _parse_shared_rand_participate_line,
'shared-rand-commit': _parsed_shared_rand_commit,
'shared-rand-previous-value': _parse_shared_rand_previous_value,
'shared-rand-current-value': _parse_shared_rand_current_value,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False, is_vote = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
attr = {} if attr is None else dict(attr)
# include mandatory 'vote-digest' if a consensus
if not is_vote and not ('vote-digest' in attr or (exclude and 'vote-digest' in exclude)):
attr['vote-digest'] = _random_fingerprint()
content = _descriptor_content(attr, exclude, (
('dir-source', '%s %s no.place.com %s 9030 9090' % (_random_nickname(), _random_fingerprint(), _random_ipv4_address())),
('contact', 'Mike Perry '),
))
if is_vote:
content += b'\n' + KeyCertificate.content()
return content
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, is_vote = False):
return cls(cls.content(attr, exclude, sign, is_vote), validate = validate, is_vote = is_vote)
def __init__(self, raw_content, validate = False, is_vote = False):
"""
Parse a directory authority entry in a v3 network status document.
:param str raw_content: raw directory authority entry information
:param bool validate: checks the validity of the content if True, skips
these checks otherwise
:param bool is_vote: True if this is for a vote, False if it's for a consensus
:raises: ValueError if the descriptor data is invalid
"""
super(DirectoryAuthority, self).__init__(raw_content, lazy_load = not validate)
content = stem.util.str_tools._to_unicode(raw_content)
# separate the directory authority entry from its key certificate
key_div = content.find('\ndir-key-certificate-version')
if key_div != -1:
self.key_certificate = KeyCertificate(content[key_div + 1:], validate)
content = content[:key_div + 1]
else:
self.key_certificate = None
entries = _descriptor_components(content, validate)
if validate and 'dir-source' != list(entries.keys())[0]:
raise ValueError("Authority entries are expected to start with a 'dir-source' line:\n%s" % (content))
# check that we have mandatory fields
if validate:
is_legacy, dir_source_entry = False, entries.get('dir-source')
if dir_source_entry:
is_legacy = dir_source_entry[0][0].split()[0].endswith('-legacy')
required_fields, excluded_fields = ['dir-source'], []
if not is_legacy:
required_fields += ['contact']
if is_vote:
if not self.key_certificate:
raise ValueError('Authority votes must have a key certificate:\n%s' % content)
excluded_fields += ['vote-digest']
elif not is_vote:
if self.key_certificate:
raise ValueError("Authority consensus entries shouldn't have a key certificate:\n%s" % content)
if not is_legacy:
required_fields += ['vote-digest']
excluded_fields += ['legacy-dir-key']
for keyword in required_fields:
if keyword not in entries:
raise ValueError("Authority entries must have a '%s' line:\n%s" % (keyword, content))
for keyword in entries:
if keyword in excluded_fields:
type_label = 'votes' if is_vote else 'consensus entries'
raise ValueError("Authority %s shouldn't have a '%s' line:\n%s" % (type_label, keyword, content))
# all known attributes can only appear at most once
for keyword, values in list(entries.items()):
if len(values) > 1 and keyword in ('dir-source', 'contact', 'legacy-dir-key', 'vote-digest'):
raise ValueError("Authority entries can only have a single '%s' line, got %i:\n%s" % (keyword, len(values), content))
self._parse(entries, validate)
else:
self._entries = entries
# TODO: Due to a bug we had a 'fingerprint' rather than 'v3ident' attribute
# for a long while. Keeping this around for backward compatability, but
# this will be dropped in stem's 2.0 release.
self.fingerprint = self.v3ident
def _parse_dir_address_line(descriptor, entries):
# "dir-address" IPPort
value = _value('dir-address', entries)
if ':' not in value:
raise ValueError("Key certificate's 'dir-address' is expected to be of the form ADDRESS:PORT: dir-address %s" % value)
address, dirport = value.rsplit(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address):
raise ValueError("Key certificate's address isn't a valid IPv4 address: dir-address %s" % value)
elif not stem.util.connection.is_valid_port(dirport):
raise ValueError("Key certificate's dirport is invalid: dir-address %s" % value)
descriptor.address = address
descriptor.dir_port = int(dirport)
_parse_dir_key_certificate_version_line = _parse_version_line('dir-key-certificate-version', 'version', 3)
_parse_dir_key_published_line = _parse_timestamp_line('dir-key-published', 'published')
_parse_dir_key_expires_line = _parse_timestamp_line('dir-key-expires', 'expires')
_parse_identity_key_line = _parse_key_block('dir-identity-key', 'identity_key', 'RSA PUBLIC KEY')
_parse_signing_key_line = _parse_key_block('dir-signing-key', 'signing_key', 'RSA PUBLIC KEY')
_parse_dir_key_crosscert_line = _parse_key_block('dir-key-crosscert', 'crosscert', 'ID SIGNATURE')
_parse_dir_key_certification_line = _parse_key_block('dir-key-certification', 'certification', 'SIGNATURE')
class KeyCertificate(Descriptor):
"""
Directory key certificate for a v3 network status document.
:var int version: **\\*** version of the key certificate
:var str address: authority's IP address
:var int dir_port: authority's DirPort
:var str fingerprint: **\\*** authority's fingerprint
:var str identity_key: **\\*** long term authority identity key
:var datetime published: **\\*** time when this key was generated
:var datetime expires: **\\*** time after which this key becomes invalid
:var str signing_key: **\\*** directory server's public signing key
:var str crosscert: signature made using certificate's signing key
:var str certification: **\\*** signature of this key certificate signed with
the identity key
**\\*** mandatory attribute
"""
TYPE_ANNOTATION_NAME = 'dir-key-certificate-3'
ATTRIBUTES = {
'version': (None, _parse_dir_key_certificate_version_line),
'address': (None, _parse_dir_address_line),
'dir_port': (None, _parse_dir_address_line),
'fingerprint': (None, _parse_fingerprint_line),
'identity_key': (None, _parse_identity_key_line),
'published': (None, _parse_dir_key_published_line),
'expires': (None, _parse_dir_key_expires_line),
'signing_key': (None, _parse_signing_key_line),
'crosscert': (None, _parse_dir_key_crosscert_line),
'certification': (None, _parse_dir_key_certification_line),
}
PARSER_FOR_LINE = {
'dir-key-certificate-version': _parse_dir_key_certificate_version_line,
'dir-address': _parse_dir_address_line,
'fingerprint': _parse_fingerprint_line,
'dir-key-published': _parse_dir_key_published_line,
'dir-key-expires': _parse_dir_key_expires_line,
'dir-identity-key': _parse_identity_key_line,
'dir-signing-key': _parse_signing_key_line,
'dir-key-crosscert': _parse_dir_key_crosscert_line,
'dir-key-certification': _parse_dir_key_certification_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('dir-key-certificate-version', '3'),
('fingerprint', _random_fingerprint()),
('dir-key-published', _random_date()),
('dir-key-expires', _random_date()),
('dir-identity-key', _random_crypto_blob('RSA PUBLIC KEY')),
('dir-signing-key', _random_crypto_blob('RSA PUBLIC KEY')),
), (
('dir-key-certification', _random_crypto_blob('SIGNATURE')),
))
def __init__(self, raw_content, validate = False):
super(KeyCertificate, self).__init__(raw_content, lazy_load = not validate)
entries = _descriptor_components(raw_content, validate)
if validate:
if 'dir-key-certificate-version' != list(entries.keys())[0]:
raise ValueError("Key certificates must start with a 'dir-key-certificate-version' line:\n%s" % (raw_content))
elif 'dir-key-certification' != list(entries.keys())[-1]:
raise ValueError("Key certificates must end with a 'dir-key-certification' line:\n%s" % (raw_content))
# check that we have mandatory fields and that our known fields only
# appear once
for keyword, is_mandatory in KEY_CERTIFICATE_PARAMS:
if is_mandatory and keyword not in entries:
raise ValueError("Key certificates must have a '%s' line:\n%s" % (keyword, raw_content))
entry_count = len(entries.get(keyword, []))
if entry_count > 1:
raise ValueError("Key certificates can only have a single '%s' line, got %i:\n%s" % (keyword, entry_count, raw_content))
self._parse(entries, validate)
else:
self._entries = entries
class DocumentSignature(object):
"""
Directory signature of a v3 network status document.
:var str method: algorithm used to make the signature
:var str identity: fingerprint of the authority that made the signature
:var str key_digest: digest of the signing key
:var str signature: document signature
:var str flavor: consensus type this signature is for (such as 'microdesc'),
**None** if for the standard consensus
:param bool validate: checks validity if **True**
:raises: **ValueError** if a validity check fails
"""
def __init__(self, method, identity, key_digest, signature, flavor = None, validate = False):
# Checking that these attributes are valid. Technically the key
# digest isn't a fingerprint, but it has the same characteristics.
if validate:
if not stem.util.tor_tools.is_valid_fingerprint(identity):
raise ValueError('Malformed fingerprint (%s) in the document signature' % identity)
if not stem.util.tor_tools.is_valid_fingerprint(key_digest):
raise ValueError('Malformed key digest (%s) in the document signature' % key_digest)
self.method = method
self.identity = identity
self.key_digest = key_digest
self.signature = signature
self.flavor = flavor
def _compare(self, other, method):
if not isinstance(other, DocumentSignature):
return False
for attr in ('method', 'identity', 'key_digest', 'signature', 'flavor'):
if getattr(self, attr) != getattr(other, attr):
return method(getattr(self, attr), getattr(other, attr))
return method(True, True) # we're equal
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
class DetachedSignature(Descriptor):
"""
Stand alone signature of the consensus. These are exchanged between directory
authorities when determining the next hour's consensus.
Detached signatures are defined in section 3.10 of the dir-spec, and only
available to be downloaded for five minutes between minute 55 until the end
of the hour.
.. versionadded:: 1.8.0
:var str consensus_digest: **\\*** digest of the consensus being signed
:var datetime valid_after: **\\*** time when the consensus became valid
:var datetime fresh_until: **\\*** time when the next consensus should be produced
:var datetime valid_until: **\\*** time when this consensus becomes obsolete
:var list additional_digests: **\\***
:class:`~stem.descriptor.networkstatus.DocumentDigest` for additional
consensus flavors
:var list additional_signatures: **\\***
:class:`~stem.descriptor.networkstatus.DocumentSignature` for additional
consensus flavors
:var list signatures: **\\*** :class:`~stem.descriptor.networkstatus.DocumentSignature`
of the authorities that have signed the document
**\\*** mandatory attribute
"""
TYPE_ANNOTATION_NAME = 'detached-signature-3'
ATTRIBUTES = {
'consensus_digest': (None, _parse_consensus_digest_line),
'valid_after': (None, _parse_header_valid_after_line),
'fresh_until': (None, _parse_header_fresh_until_line),
'valid_until': (None, _parse_header_valid_until_line),
'additional_digests': ([], _parse_additional_digests),
'additional_signatures': ([], _parse_additional_signatures),
'signatures': ([], _parse_footer_directory_signature_line),
}
PARSER_FOR_LINE = {
'consensus-digest': _parse_consensus_digest_line,
'valid-after': _parse_header_valid_after_line,
'fresh-until': _parse_header_fresh_until_line,
'valid-until': _parse_header_valid_until_line,
'additional-digest': _parse_additional_digests,
'additional-signature': _parse_additional_signatures,
'directory-signature': _parse_footer_directory_signature_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('consensus-digest', '6D3CC0EFA408F228410A4A8145E1B0BB0670E442'),
('valid-after', _random_date()),
('fresh-until', _random_date()),
('valid-until', _random_date()),
))
def __init__(self, raw_content, validate = False):
super(DetachedSignature, self).__init__(raw_content, lazy_load = not validate)
entries = _descriptor_components(raw_content, validate)
if validate:
if 'consensus-digest' != list(entries.keys())[0]:
raise ValueError("Detached signatures must start with a 'consensus-digest' line:\n%s" % (raw_content))
# check that we have mandatory fields and certain fields only appear once
for keyword, is_mandatory, is_multiple in DETACHED_SIGNATURE_PARAMS:
if is_mandatory and keyword not in entries:
raise ValueError("Detached signatures must have a '%s' line:\n%s" % (keyword, raw_content))
entry_count = len(entries.get(keyword, []))
if not is_multiple and entry_count > 1:
raise ValueError("Detached signatures can only have a single '%s' line, got %i:\n%s" % (keyword, entry_count, raw_content))
self._parse(entries, validate)
else:
self._entries = entries
class BridgeNetworkStatusDocument(NetworkStatusDocument):
"""
Network status document containing bridges. This is only available through
the metrics site.
:var dict routers: fingerprint to :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
mapping for relays contained in the document
:var datetime published: time when the document was published
"""
TYPE_ANNOTATION_NAME = 'bridge-network-status'
def __init__(self, raw_content, validate = False):
super(BridgeNetworkStatusDocument, self).__init__(raw_content)
self.published = None
document_file = io.BytesIO(raw_content)
published_line = stem.util.str_tools._to_unicode(document_file.readline())
if published_line.startswith('published '):
published_line = published_line.split(' ', 1)[1].strip()
try:
self.published = stem.util.str_tools._parse_timestamp(published_line)
except ValueError:
if validate:
raise ValueError("Bridge network status document's 'published' time wasn't parsable: %s" % published_line)
elif validate:
raise ValueError("Bridge network status documents must start with a 'published' line:\n%s" % stem.util.str_tools._to_unicode(raw_content))
router_iter = stem.descriptor.router_status_entry._parse_file(
document_file,
validate,
entry_class = RouterStatusEntryV2,
extra_args = (self,),
)
self.routers = dict((desc.fingerprint, desc) for desc in router_iter)
stem-1.8.0/stem/descriptor/extrainfo_descriptor.py 0000664 0001750 0001750 00000127033 13530351613 023155 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for Tor extra-info descriptors. These are published by relays whenever
their server descriptor is published and have a similar format. However, unlike
server descriptors these don't contain information that Tor clients require to
function and as such aren't fetched by default.
Defined in section 2.1.2 of the `dir-spec
`_,
extra-info descriptors contain interesting but non-vital information such as
usage statistics. Tor clients cannot request these documents for bridges.
Extra-info descriptors are available from a few sources...
* If you have 'DownloadExtraInfo 1' in your torrc...
* control port via 'GETINFO extra-info/digest/\\*' queries
* the 'cached-extrainfo' file in tor's data directory
* Archived descriptors provided by `CollecTor `_.
* Directory authorities and mirrors via their DirPort.
**Module Overview:**
::
ExtraInfoDescriptor - Tor extra-info descriptor.
|- RelayExtraInfoDescriptor - Extra-info descriptor for a relay.
|- BridgeExtraInfoDescriptor - Extra-info descriptor for a bridge.
|
+- digest - calculates the upper-case hex digest value for our content
.. data:: DirResponse (enum)
Enumeration for known statuses for ExtraInfoDescriptor's dir_*_responses.
=================== ===========
DirResponse Description
=================== ===========
**OK** network status requests that were answered
**NOT_ENOUGH_SIGS** network status wasn't signed by enough authorities
**UNAVAILABLE** requested network status was unavailable
**NOT_FOUND** requested network status was not found
**NOT_MODIFIED** network status unmodified since If-Modified-Since time
**BUSY** directory was busy
=================== ===========
.. data:: DirStat (enum)
Enumeration for known stats for ExtraInfoDescriptor's dir_*_direct_dl and
dir_*_tunneled_dl.
===================== ===========
DirStat Description
===================== ===========
**COMPLETE** requests that completed successfully
**TIMEOUT** requests that didn't complete within a ten minute timeout
**RUNNING** requests still in process when measurement's taken
**MIN** smallest rate at which a descriptor was downloaded in B/s
**MAX** largest rate at which a descriptor was downloaded in B/s
**D1-4** and **D6-9** rate of the slowest x/10 download rates in B/s
**Q1** and **Q3** rate of the slowest and fastest quarter download rates in B/s
**MD** median download rate in B/s
===================== ===========
"""
import functools
import hashlib
import re
import stem.prereq
import stem.util.connection
import stem.util.enum
import stem.util.str_tools
from stem.descriptor import (
PGP_BLOCK_END,
Descriptor,
DigestHash,
DigestEncoding,
create_signing_key,
_descriptor_content,
_read_until_keywords,
_descriptor_components,
_value,
_values,
_parse_simple_line,
_parse_int_line,
_parse_timestamp_line,
_parse_forty_character_hex,
_parse_key_block,
_mappings_for,
_append_router_signature,
_random_nickname,
_random_fingerprint,
_random_date,
_random_crypto_blob,
)
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
# known statuses for dirreq-v2-resp and dirreq-v3-resp...
DirResponse = stem.util.enum.Enum(
('OK', 'ok'),
('NOT_ENOUGH_SIGS', 'not-enough-sigs'),
('UNAVAILABLE', 'unavailable'),
('NOT_FOUND', 'not-found'),
('NOT_MODIFIED', 'not-modified'),
('BUSY', 'busy'),
)
# known stats for dirreq-v2/3-direct-dl and dirreq-v2/3-tunneled-dl...
dir_stats = ['complete', 'timeout', 'running', 'min', 'max', 'q1', 'q3', 'md']
dir_stats += ['d%i' % i for i in range(1, 5)]
dir_stats += ['d%i' % i for i in range(6, 10)]
DirStat = stem.util.enum.Enum(*[(stat.upper(), stat) for stat in dir_stats])
# relay descriptors must have exactly one of the following
REQUIRED_FIELDS = (
'extra-info',
'published',
'router-signature',
)
# optional entries that can appear at most once
SINGLE_FIELDS = (
'read-history',
'write-history',
'geoip-db-digest',
'geoip6-db-digest',
'bridge-stats-end',
'bridge-ips',
'dirreq-stats-end',
'dirreq-v2-ips',
'dirreq-v3-ips',
'dirreq-v2-reqs',
'dirreq-v3-reqs',
'dirreq-v2-share',
'dirreq-v3-share',
'dirreq-v2-resp',
'dirreq-v3-resp',
'dirreq-v2-direct-dl',
'dirreq-v3-direct-dl',
'dirreq-v2-tunneled-dl',
'dirreq-v3-tunneled-dl',
'dirreq-read-history',
'dirreq-write-history',
'entry-stats-end',
'entry-ips',
'cell-stats-end',
'cell-processed-cells',
'cell-queued-cells',
'cell-time-in-queue',
'cell-circuits-per-decile',
'conn-bi-direct',
'exit-stats-end',
'exit-kibibytes-written',
'exit-kibibytes-read',
'exit-streams-opened',
)
_timestamp_re = re.compile('^(.*) \\(([0-9]+) s\\)( .*)?$')
_locale_re = re.compile('^[a-zA-Z0-9\\?]{2}$')
def _parse_file(descriptor_file, is_bridge = False, validate = False, **kwargs):
"""
Iterates over the extra-info descriptors in a file.
:param file descriptor_file: file with descriptor content
:param bool is_bridge: parses the file as being a bridge descriptor
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param dict kwargs: additional arguments for the descriptor constructor
:returns: iterator for :class:`~stem.descriptor.extrainfo_descriptor.ExtraInfoDescriptor`
instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is **True**
* **IOError** if the file can't be read
"""
while True:
if not is_bridge:
extrainfo_content = _read_until_keywords('router-signature', descriptor_file)
# we've reached the 'router-signature', now include the pgp style block
block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0]
extrainfo_content += _read_until_keywords(block_end_prefix, descriptor_file, True)
else:
extrainfo_content = _read_until_keywords('router-digest', descriptor_file, True)
if extrainfo_content:
if extrainfo_content[0].startswith(b'@type'):
extrainfo_content = extrainfo_content[1:]
if is_bridge:
yield BridgeExtraInfoDescriptor(bytes.join(b'', extrainfo_content), validate, **kwargs)
else:
yield RelayExtraInfoDescriptor(bytes.join(b'', extrainfo_content), validate, **kwargs)
else:
break # done parsing file
def _parse_timestamp_and_interval(keyword, content):
"""
Parses a 'YYYY-MM-DD HH:MM:SS (NSEC s) *' entry.
:param str keyword: line's keyword
:param str content: line content to be parsed
:returns: **tuple** of the form (timestamp (**datetime**), interval
(**int**), remaining content (**str**))
:raises: **ValueError** if the content is malformed
"""
line = '%s %s' % (keyword, content)
content_match = _timestamp_re.match(content)
if not content_match:
raise ValueError('Malformed %s line: %s' % (keyword, line))
timestamp_str, interval, remainder = content_match.groups()
if remainder:
remainder = remainder[1:] # remove leading space
if not interval.isdigit():
raise ValueError("%s line's interval wasn't a number: %s" % (keyword, line))
try:
timestamp = stem.util.str_tools._parse_timestamp(timestamp_str)
return timestamp, int(interval), remainder
except ValueError:
raise ValueError("%s line's timestamp wasn't parsable: %s" % (keyword, line))
def _parse_extra_info_line(descriptor, entries):
# "extra-info" Nickname Fingerprint
value = _value('extra-info', entries)
extra_info_comp = value.split()
if len(extra_info_comp) < 2:
raise ValueError('Extra-info line must have two values: extra-info %s' % value)
elif not stem.util.tor_tools.is_valid_nickname(extra_info_comp[0]):
raise ValueError("Extra-info line entry isn't a valid nickname: %s" % extra_info_comp[0])
elif not stem.util.tor_tools.is_valid_fingerprint(extra_info_comp[1]):
raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % extra_info_comp[1])
descriptor.nickname = extra_info_comp[0]
descriptor.fingerprint = extra_info_comp[1]
def _parse_transport_line(descriptor, entries):
# "transport" transportname address:port [arglist]
# Everything after the transportname is scrubbed in published bridge
# descriptors, so we'll never see it in practice.
#
# These entries really only make sense for bridges, but have been seen
# on non-bridges in the wild when the relay operator configured it this
# way.
transports = {}
for value in _values('transport', entries):
name, address, port, args = None, None, None, None
if ' ' not in value:
# scrubbed
name = value
else:
# not scrubbed
value_comp = value.split()
if len(value_comp) < 1:
raise ValueError('Transport line is missing its transport name: transport %s' % value)
elif len(value_comp) < 2:
raise ValueError('Transport line is missing its address:port value: transport %s' % value)
elif ':' not in value_comp[1]:
raise ValueError("Transport line's address:port entry is missing a colon: transport %s" % value)
name = value_comp[0]
address, port_str = value_comp[1].rsplit(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address) or \
stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True):
raise ValueError('Transport line has a malformed address: transport %s' % value)
elif not stem.util.connection.is_valid_port(port_str):
raise ValueError('Transport line has a malformed port: transport %s' % value)
address.lstrip('[').rstrip(']')
port = int(port_str)
args = value_comp[2:] if len(value_comp) >= 3 else []
transports[name] = (address, port, args)
descriptor.transport = transports
def _parse_padding_counts_line(descriptor, entries):
# "padding-counts" YYYY-MM-DD HH:MM:SS (NSEC s) key=val key=val...
value = _value('padding-counts', entries)
timestamp, interval, remainder = _parse_timestamp_and_interval('padding-counts', value)
counts = {}
for k, v in _mappings_for('padding-counts', remainder, require_value = True):
counts[k] = int(v) if v.isdigit() else v
setattr(descriptor, 'padding_counts_end', timestamp)
setattr(descriptor, 'padding_counts_interval', interval)
setattr(descriptor, 'padding_counts', counts)
def _parse_dirreq_line(keyword, recognized_counts_attr, unrecognized_counts_attr, descriptor, entries):
value = _value(keyword, entries)
recognized_counts = {}
unrecognized_counts = {}
is_response_stats = keyword in ('dirreq-v2-resp', 'dirreq-v3-resp')
key_set = DirResponse if is_response_stats else DirStat
key_type = 'STATUS' if is_response_stats else 'STAT'
for status, count in _mappings_for(keyword, value, divider = ','):
if not count.isdigit():
raise ValueError('%s lines should contain %s=COUNT mappings: %s %s' % (keyword, key_type, keyword, value))
if status in key_set:
recognized_counts[status] = int(count)
else:
unrecognized_counts[status] = int(count)
setattr(descriptor, recognized_counts_attr, recognized_counts)
setattr(descriptor, unrecognized_counts_attr, unrecognized_counts)
def _parse_dirreq_share_line(keyword, attribute, descriptor, entries):
value = _value(keyword, entries)
if not value.endswith('%'):
raise ValueError('%s lines should be a percentage: %s %s' % (keyword, keyword, value))
elif float(value[:-1]) < 0:
raise ValueError('Negative percentage value: %s %s' % (keyword, value))
# bug means it might be above 100%: https://lists.torproject.org/pipermail/tor-dev/2012-June/003679.html
setattr(descriptor, attribute, float(value[:-1]) / 100)
def _parse_cell_line(keyword, attribute, descriptor, entries):
# "" num,...,num
value = _value(keyword, entries)
entries, exc = [], None
if value:
for entry in value.split(','):
try:
# Values should be positive but as discussed in ticket #5849
# there was a bug around this. It was fixed in tor 0.2.2.1.
entries.append(float(entry))
except ValueError:
exc = ValueError('Non-numeric entry in %s listing: %s %s' % (keyword, keyword, value))
setattr(descriptor, attribute, entries)
if exc:
raise exc
def _parse_timestamp_and_interval_line(keyword, end_attribute, interval_attribute, descriptor, entries):
# "" YYYY-MM-DD HH:MM:SS (NSEC s)
timestamp, interval, _ = _parse_timestamp_and_interval(keyword, _value(keyword, entries))
setattr(descriptor, end_attribute, timestamp)
setattr(descriptor, interval_attribute, interval)
def _parse_conn_bi_direct_line(descriptor, entries):
# "conn-bi-direct" YYYY-MM-DD HH:MM:SS (NSEC s) BELOW,READ,WRITE,BOTH
value = _value('conn-bi-direct', entries)
timestamp, interval, remainder = _parse_timestamp_and_interval('conn-bi-direct', value)
stats = remainder.split(',')
if len(stats) != 4 or not (stats[0].isdigit() and stats[1].isdigit() and stats[2].isdigit() and stats[3].isdigit()):
raise ValueError('conn-bi-direct line should end with four numeric values: conn-bi-direct %s' % value)
descriptor.conn_bi_direct_end = timestamp
descriptor.conn_bi_direct_interval = interval
descriptor.conn_bi_direct_below = int(stats[0])
descriptor.conn_bi_direct_read = int(stats[1])
descriptor.conn_bi_direct_write = int(stats[2])
descriptor.conn_bi_direct_both = int(stats[3])
def _parse_history_line(keyword, end_attribute, interval_attribute, values_attribute, descriptor, entries):
# "" YYYY-MM-DD HH:MM:SS (NSEC s) NUM,NUM,NUM,NUM,NUM...
value = _value(keyword, entries)
timestamp, interval, remainder = _parse_timestamp_and_interval(keyword, value)
history_values = []
if remainder:
try:
history_values = [int(entry) for entry in remainder.split(',')]
except ValueError:
raise ValueError('%s line has non-numeric values: %s %s' % (keyword, keyword, value))
setattr(descriptor, end_attribute, timestamp)
setattr(descriptor, interval_attribute, interval)
setattr(descriptor, values_attribute, history_values)
def _parse_port_count_line(keyword, attribute, descriptor, entries):
# "" port=N,port=N,...
value, port_mappings = _value(keyword, entries), {}
for port, stat in _mappings_for(keyword, value, divider = ','):
if (port != 'other' and not stem.util.connection.is_valid_port(port)) or not stat.isdigit():
raise ValueError('Entries in %s line should only be PORT=N entries: %s %s' % (keyword, keyword, value))
port = int(port) if port.isdigit() else port
port_mappings[port] = int(stat)
setattr(descriptor, attribute, port_mappings)
def _parse_geoip_to_count_line(keyword, attribute, descriptor, entries):
# "" CC=N,CC=N,...
#
# The maxmind geoip (https://www.maxmind.com/app/iso3166) has numeric
# locale codes for some special values, for instance...
# A1,"Anonymous Proxy"
# A2,"Satellite Provider"
# ??,"Unknown"
value, locale_usage = _value(keyword, entries), {}
for locale, count in _mappings_for(keyword, value, divider = ','):
if not _locale_re.match(locale) or not count.isdigit():
raise ValueError('Entries in %s line should only be CC=N entries: %s %s' % (keyword, keyword, value))
locale_usage[locale] = int(count)
setattr(descriptor, attribute, locale_usage)
def _parse_bridge_ip_versions_line(descriptor, entries):
value, ip_versions = _value('bridge-ip-versions', entries), {}
for protocol, count in _mappings_for('bridge-ip-versions', value, divider = ','):
if not count.isdigit():
raise stem.ProtocolError('IP protocol count was non-numeric (%s): bridge-ip-versions %s' % (count, value))
ip_versions[protocol] = int(count)
descriptor.ip_versions = ip_versions
def _parse_bridge_ip_transports_line(descriptor, entries):
value, ip_transports = _value('bridge-ip-transports', entries), {}
for protocol, count in _mappings_for('bridge-ip-transports', value, divider = ','):
if not count.isdigit():
raise stem.ProtocolError('Transport count was non-numeric (%s): bridge-ip-transports %s' % (count, value))
ip_transports[protocol] = int(count)
descriptor.ip_transports = ip_transports
def _parse_hs_stats(keyword, stat_attribute, extra_attribute, descriptor, entries):
# "" num key=val key=val...
value, stat, extra = _value(keyword, entries), None, {}
if value is None:
pass # not in the descriptor
elif value == '':
raise ValueError("'%s' line was blank" % keyword)
else:
if ' ' in value:
stat_value, remainder = value.split(' ', 1)
else:
stat_value, remainder = value, None
try:
stat = int(stat_value)
except ValueError:
raise ValueError("'%s' stat was non-numeric (%s): %s %s" % (keyword, stat_value, keyword, value))
for key, val in _mappings_for(keyword, remainder):
extra[key] = val
setattr(descriptor, stat_attribute, stat)
setattr(descriptor, extra_attribute, extra)
_parse_identity_ed25519_line = _parse_key_block('identity-ed25519', 'ed25519_certificate', 'ED25519 CERT')
_parse_master_key_ed25519_line = _parse_simple_line('master-key-ed25519', 'ed25519_certificate_hash')
_parse_geoip_db_digest_line = _parse_forty_character_hex('geoip-db-digest', 'geoip_db_digest')
_parse_geoip6_db_digest_line = _parse_forty_character_hex('geoip6-db-digest', 'geoip6_db_digest')
_parse_dirreq_v2_resp_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-resp', 'dir_v2_responses', 'dir_v2_responses_unknown')
_parse_dirreq_v3_resp_line = functools.partial(_parse_dirreq_line, 'dirreq-v3-resp', 'dir_v3_responses', 'dir_v3_responses_unknown')
_parse_dirreq_v2_direct_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-direct-dl', 'dir_v2_direct_dl', 'dir_v2_direct_dl_unknown')
_parse_dirreq_v3_direct_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v3-direct-dl', 'dir_v3_direct_dl', 'dir_v3_direct_dl_unknown')
_parse_dirreq_v2_tunneled_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-tunneled-dl', 'dir_v2_tunneled_dl', 'dir_v2_tunneled_dl_unknown')
_parse_dirreq_v3_tunneled_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v3-tunneled-dl', 'dir_v3_tunneled_dl', 'dir_v3_tunneled_dl_unknown')
_parse_dirreq_v2_share_line = functools.partial(_parse_dirreq_share_line, 'dirreq-v2-share', 'dir_v2_share')
_parse_dirreq_v3_share_line = functools.partial(_parse_dirreq_share_line, 'dirreq-v3-share', 'dir_v3_share')
_parse_cell_processed_cells_line = functools.partial(_parse_cell_line, 'cell-processed-cells', 'cell_processed_cells')
_parse_cell_queued_cells_line = functools.partial(_parse_cell_line, 'cell-queued-cells', 'cell_queued_cells')
_parse_cell_time_in_queue_line = functools.partial(_parse_cell_line, 'cell-time-in-queue', 'cell_time_in_queue')
_parse_cell_circuits_per_decline_line = _parse_int_line('cell-circuits-per-decile', 'cell_circuits_per_decile', allow_negative = False)
_parse_published_line = _parse_timestamp_line('published', 'published')
_parse_geoip_start_time_line = _parse_timestamp_line('geoip-start-time', 'geoip_start_time')
_parse_cell_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'cell-stats-end', 'cell_stats_end', 'cell_stats_interval')
_parse_entry_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'entry-stats-end', 'entry_stats_end', 'entry_stats_interval')
_parse_exit_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'exit-stats-end', 'exit_stats_end', 'exit_stats_interval')
_parse_bridge_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'bridge-stats-end', 'bridge_stats_end', 'bridge_stats_interval')
_parse_dirreq_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'dirreq-stats-end', 'dir_stats_end', 'dir_stats_interval')
_parse_read_history_line = functools.partial(_parse_history_line, 'read-history', 'read_history_end', 'read_history_interval', 'read_history_values')
_parse_write_history_line = functools.partial(_parse_history_line, 'write-history', 'write_history_end', 'write_history_interval', 'write_history_values')
_parse_dirreq_read_history_line = functools.partial(_parse_history_line, 'dirreq-read-history', 'dir_read_history_end', 'dir_read_history_interval', 'dir_read_history_values')
_parse_dirreq_write_history_line = functools.partial(_parse_history_line, 'dirreq-write-history', 'dir_write_history_end', 'dir_write_history_interval', 'dir_write_history_values')
_parse_exit_kibibytes_written_line = functools.partial(_parse_port_count_line, 'exit-kibibytes-written', 'exit_kibibytes_written')
_parse_exit_kibibytes_read_line = functools.partial(_parse_port_count_line, 'exit-kibibytes-read', 'exit_kibibytes_read')
_parse_exit_streams_opened_line = functools.partial(_parse_port_count_line, 'exit-streams-opened', 'exit_streams_opened')
_parse_hidden_service_stats_end_line = _parse_timestamp_line('hidserv-stats-end', 'hs_stats_end')
_parse_hidden_service_rend_relayed_cells_line = functools.partial(_parse_hs_stats, 'hidserv-rend-relayed-cells', 'hs_rend_cells', 'hs_rend_cells_attr')
_parse_hidden_service_dir_onions_seen_line = functools.partial(_parse_hs_stats, 'hidserv-dir-onions-seen', 'hs_dir_onions_seen', 'hs_dir_onions_seen_attr')
_parse_dirreq_v2_ips_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v2-ips', 'dir_v2_ips')
_parse_dirreq_v3_ips_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v3-ips', 'dir_v3_ips')
_parse_dirreq_v2_reqs_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v2-reqs', 'dir_v2_requests')
_parse_dirreq_v3_reqs_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v3-reqs', 'dir_v3_requests')
_parse_geoip_client_origins_line = functools.partial(_parse_geoip_to_count_line, 'geoip-client-origins', 'geoip_client_origins')
_parse_entry_ips_line = functools.partial(_parse_geoip_to_count_line, 'entry-ips', 'entry_ips')
_parse_bridge_ips_line = functools.partial(_parse_geoip_to_count_line, 'bridge-ips', 'bridge_ips')
_parse_router_sig_ed25519_line = _parse_simple_line('router-sig-ed25519', 'ed25519_signature')
_parse_router_digest_sha256_line = _parse_simple_line('router-digest-sha256', 'router_digest_sha256')
_parse_router_digest_line = _parse_forty_character_hex('router-digest', '_digest')
_parse_router_signature_line = _parse_key_block('router-signature', 'signature', 'SIGNATURE')
class ExtraInfoDescriptor(Descriptor):
"""
Extra-info descriptor document.
:var str nickname: **\\*** relay's nickname
:var str fingerprint: **\\*** identity key fingerprint
:var datetime published: **\\*** time in UTC when this descriptor was made
:var str geoip_db_digest: sha1 of the geoIP database file for IPv4 addresses
:var str geoip6_db_digest: sha1 of the geoIP database file for IPv6 addresses
:var dict transport: **\\*** mapping of transport methods to their (address,
port, args) tuple, these usually appear on bridges in which case all of
those are **None**
**Bi-directional connection usage:**
:var datetime conn_bi_direct_end: end of the sampling interval
:var int conn_bi_direct_interval: seconds per interval
:var int conn_bi_direct_below: connections that read/wrote less than 20 KiB
:var int conn_bi_direct_read: connections that read at least 10x more than wrote
:var int conn_bi_direct_write: connections that wrote at least 10x more than read
:var int conn_bi_direct_both: remaining connections
**Bytes read/written for relayed traffic:**
:var datetime read_history_end: end of the sampling interval
:var int read_history_interval: seconds per interval
:var list read_history_values: bytes read during each interval
:var datetime write_history_end: end of the sampling interval
:var int write_history_interval: seconds per interval
:var list write_history_values: bytes written during each interval
**Cell relaying statistics:**
:var datetime cell_stats_end: end of the period when stats were gathered
:var int cell_stats_interval: length in seconds of the interval
:var list cell_processed_cells: measurement of processed cells per circuit
:var list cell_queued_cells: measurement of queued cells per circuit
:var list cell_time_in_queue: mean enqueued time in milliseconds for cells
:var int cell_circuits_per_decile: mean number of circuits in a decile
**Directory Mirror Attributes:**
:var datetime dir_stats_end: end of the period when stats were gathered
:var int dir_stats_interval: length in seconds of the interval
:var dict dir_v2_ips: mapping of locales to rounded count of requester ips
:var dict dir_v3_ips: mapping of locales to rounded count of requester ips
:var float dir_v2_share: percent of total directory traffic it expects to serve
:var float dir_v3_share: percent of total directory traffic it expects to serve
:var dict dir_v2_requests: mapping of locales to rounded count of requests
:var dict dir_v3_requests: mapping of locales to rounded count of requests
:var dict dir_v2_responses: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirResponse` to their rounded count
:var dict dir_v3_responses: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirResponse` to their rounded count
:var dict dir_v2_responses_unknown: mapping of unrecognized statuses to their count
:var dict dir_v3_responses_unknown: mapping of unrecognized statuses to their count
:var dict dir_v2_direct_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over DirPort
:var dict dir_v3_direct_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over DirPort
:var dict dir_v2_direct_dl_unknown: mapping of unrecognized stats to their measurement
:var dict dir_v3_direct_dl_unknown: mapping of unrecognized stats to their measurement
:var dict dir_v2_tunneled_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over ORPort
:var dict dir_v3_tunneled_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over ORPort
:var dict dir_v2_tunneled_dl_unknown: mapping of unrecognized stats to their measurement
:var dict dir_v3_tunneled_dl_unknown: mapping of unrecognized stats to their measurement
**Bytes read/written for directory mirroring:**
:var datetime dir_read_history_end: end of the sampling interval
:var int dir_read_history_interval: seconds per interval
:var list dir_read_history_values: bytes read during each interval
:var datetime dir_write_history_end: end of the sampling interval
:var int dir_write_history_interval: seconds per interval
:var list dir_write_history_values: bytes read during each interval
**Guard Attributes:**
:var datetime entry_stats_end: end of the period when stats were gathered
:var int entry_stats_interval: length in seconds of the interval
:var dict entry_ips: mapping of locales to rounded count of unique user ips
**Exit Attributes:**
:var datetime exit_stats_end: end of the period when stats were gathered
:var int exit_stats_interval: length in seconds of the interval
:var dict exit_kibibytes_written: traffic per port (keys are ints or 'other')
:var dict exit_kibibytes_read: traffic per port (keys are ints or 'other')
:var dict exit_streams_opened: streams per port (keys are ints or 'other')
**Hidden Service Attributes:**
:var datetime hs_stats_end: end of the sampling interval
:var int hs_rend_cells: rounded count of the RENDEZVOUS1 cells seen
:var int hs_rend_cells_attr: **\\*** attributes provided for the hs_rend_cells
:var int hs_dir_onions_seen: rounded count of the identities seen
:var int hs_dir_onions_seen_attr: **\\*** attributes provided for the hs_dir_onions_seen
**Padding Count Attributes:**
:var dict padding_counts: **\\*** padding parameters
:var datetime padding_counts_end: end of the period when padding data is being collected
:var int padding_counts_interval: length in seconds of the interval
**Bridge Attributes:**
:var datetime bridge_stats_end: end of the period when stats were gathered
:var int bridge_stats_interval: length in seconds of the interval
:var dict bridge_ips: mapping of locales to rounded count of unique user ips
:var datetime geoip_start_time: replaced by bridge_stats_end (deprecated)
:var dict geoip_client_origins: replaced by bridge_ips (deprecated)
:var dict ip_versions: mapping of ip protocols to a rounded count for the number of users
:var dict ip_versions: mapping of ip transports to a count for the number of users
**\\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
.. versionchanged:: 1.4.0
Added the hs_stats_end, hs_rend_cells, hs_rend_cells_attr,
hs_dir_onions_seen, and hs_dir_onions_seen_attr attributes.
.. versionchanged:: 1.6.0
Added the padding_counts, padding_counts_end, and padding_counts_interval
attributes.
"""
ATTRIBUTES = {
'nickname': (None, _parse_extra_info_line),
'fingerprint': (None, _parse_extra_info_line),
'published': (None, _parse_published_line),
'geoip_db_digest': (None, _parse_geoip_db_digest_line),
'geoip6_db_digest': (None, _parse_geoip6_db_digest_line),
'transport': ({}, _parse_transport_line),
'conn_bi_direct_end': (None, _parse_conn_bi_direct_line),
'conn_bi_direct_interval': (None, _parse_conn_bi_direct_line),
'conn_bi_direct_below': (None, _parse_conn_bi_direct_line),
'conn_bi_direct_read': (None, _parse_conn_bi_direct_line),
'conn_bi_direct_write': (None, _parse_conn_bi_direct_line),
'conn_bi_direct_both': (None, _parse_conn_bi_direct_line),
'read_history_end': (None, _parse_read_history_line),
'read_history_interval': (None, _parse_read_history_line),
'read_history_values': (None, _parse_read_history_line),
'write_history_end': (None, _parse_write_history_line),
'write_history_interval': (None, _parse_write_history_line),
'write_history_values': (None, _parse_write_history_line),
'cell_stats_end': (None, _parse_cell_stats_end_line),
'cell_stats_interval': (None, _parse_cell_stats_end_line),
'cell_processed_cells': (None, _parse_cell_processed_cells_line),
'cell_queued_cells': (None, _parse_cell_queued_cells_line),
'cell_time_in_queue': (None, _parse_cell_time_in_queue_line),
'cell_circuits_per_decile': (None, _parse_cell_circuits_per_decline_line),
'dir_stats_end': (None, _parse_dirreq_stats_end_line),
'dir_stats_interval': (None, _parse_dirreq_stats_end_line),
'dir_v2_ips': (None, _parse_dirreq_v2_ips_line),
'dir_v3_ips': (None, _parse_dirreq_v3_ips_line),
'dir_v2_share': (None, _parse_dirreq_v2_share_line),
'dir_v3_share': (None, _parse_dirreq_v3_share_line),
'dir_v2_requests': (None, _parse_dirreq_v2_reqs_line),
'dir_v3_requests': (None, _parse_dirreq_v3_reqs_line),
'dir_v2_responses': (None, _parse_dirreq_v2_resp_line),
'dir_v3_responses': (None, _parse_dirreq_v3_resp_line),
'dir_v2_responses_unknown': (None, _parse_dirreq_v2_resp_line),
'dir_v3_responses_unknown': (None, _parse_dirreq_v3_resp_line),
'dir_v2_direct_dl': (None, _parse_dirreq_v2_direct_dl_line),
'dir_v3_direct_dl': (None, _parse_dirreq_v3_direct_dl_line),
'dir_v2_direct_dl_unknown': (None, _parse_dirreq_v2_direct_dl_line),
'dir_v3_direct_dl_unknown': (None, _parse_dirreq_v3_direct_dl_line),
'dir_v2_tunneled_dl': (None, _parse_dirreq_v2_tunneled_dl_line),
'dir_v3_tunneled_dl': (None, _parse_dirreq_v3_tunneled_dl_line),
'dir_v2_tunneled_dl_unknown': (None, _parse_dirreq_v2_tunneled_dl_line),
'dir_v3_tunneled_dl_unknown': (None, _parse_dirreq_v3_tunneled_dl_line),
'dir_read_history_end': (None, _parse_dirreq_read_history_line),
'dir_read_history_interval': (None, _parse_dirreq_read_history_line),
'dir_read_history_values': (None, _parse_dirreq_read_history_line),
'dir_write_history_end': (None, _parse_dirreq_write_history_line),
'dir_write_history_interval': (None, _parse_dirreq_write_history_line),
'dir_write_history_values': (None, _parse_dirreq_write_history_line),
'entry_stats_end': (None, _parse_entry_stats_end_line),
'entry_stats_interval': (None, _parse_entry_stats_end_line),
'entry_ips': (None, _parse_entry_ips_line),
'exit_stats_end': (None, _parse_exit_stats_end_line),
'exit_stats_interval': (None, _parse_exit_stats_end_line),
'exit_kibibytes_written': (None, _parse_exit_kibibytes_written_line),
'exit_kibibytes_read': (None, _parse_exit_kibibytes_read_line),
'exit_streams_opened': (None, _parse_exit_streams_opened_line),
'hs_stats_end': (None, _parse_hidden_service_stats_end_line),
'hs_rend_cells': (None, _parse_hidden_service_rend_relayed_cells_line),
'hs_rend_cells_attr': ({}, _parse_hidden_service_rend_relayed_cells_line),
'hs_dir_onions_seen': (None, _parse_hidden_service_dir_onions_seen_line),
'hs_dir_onions_seen_attr': ({}, _parse_hidden_service_dir_onions_seen_line),
'padding_counts': ({}, _parse_padding_counts_line),
'padding_counts_end': (None, _parse_padding_counts_line),
'padding_counts_interval': (None, _parse_padding_counts_line),
'bridge_stats_end': (None, _parse_bridge_stats_end_line),
'bridge_stats_interval': (None, _parse_bridge_stats_end_line),
'bridge_ips': (None, _parse_bridge_ips_line),
'geoip_start_time': (None, _parse_geoip_start_time_line),
'geoip_client_origins': (None, _parse_geoip_client_origins_line),
'ip_versions': (None, _parse_bridge_ip_versions_line),
'ip_transports': (None, _parse_bridge_ip_transports_line),
}
PARSER_FOR_LINE = {
'extra-info': _parse_extra_info_line,
'geoip-db-digest': _parse_geoip_db_digest_line,
'geoip6-db-digest': _parse_geoip6_db_digest_line,
'transport': _parse_transport_line,
'cell-circuits-per-decile': _parse_cell_circuits_per_decline_line,
'dirreq-v2-resp': _parse_dirreq_v2_resp_line,
'dirreq-v3-resp': _parse_dirreq_v3_resp_line,
'dirreq-v2-direct-dl': _parse_dirreq_v2_direct_dl_line,
'dirreq-v3-direct-dl': _parse_dirreq_v3_direct_dl_line,
'dirreq-v2-tunneled-dl': _parse_dirreq_v2_tunneled_dl_line,
'dirreq-v3-tunneled-dl': _parse_dirreq_v3_tunneled_dl_line,
'dirreq-v2-share': _parse_dirreq_v2_share_line,
'dirreq-v3-share': _parse_dirreq_v3_share_line,
'cell-processed-cells': _parse_cell_processed_cells_line,
'cell-queued-cells': _parse_cell_queued_cells_line,
'cell-time-in-queue': _parse_cell_time_in_queue_line,
'published': _parse_published_line,
'geoip-start-time': _parse_geoip_start_time_line,
'cell-stats-end': _parse_cell_stats_end_line,
'entry-stats-end': _parse_entry_stats_end_line,
'exit-stats-end': _parse_exit_stats_end_line,
'bridge-stats-end': _parse_bridge_stats_end_line,
'dirreq-stats-end': _parse_dirreq_stats_end_line,
'conn-bi-direct': _parse_conn_bi_direct_line,
'read-history': _parse_read_history_line,
'write-history': _parse_write_history_line,
'dirreq-read-history': _parse_dirreq_read_history_line,
'dirreq-write-history': _parse_dirreq_write_history_line,
'exit-kibibytes-written': _parse_exit_kibibytes_written_line,
'exit-kibibytes-read': _parse_exit_kibibytes_read_line,
'exit-streams-opened': _parse_exit_streams_opened_line,
'hidserv-stats-end': _parse_hidden_service_stats_end_line,
'hidserv-rend-relayed-cells': _parse_hidden_service_rend_relayed_cells_line,
'hidserv-dir-onions-seen': _parse_hidden_service_dir_onions_seen_line,
'padding-counts': _parse_padding_counts_line,
'dirreq-v2-ips': _parse_dirreq_v2_ips_line,
'dirreq-v3-ips': _parse_dirreq_v3_ips_line,
'dirreq-v2-reqs': _parse_dirreq_v2_reqs_line,
'dirreq-v3-reqs': _parse_dirreq_v3_reqs_line,
'geoip-client-origins': _parse_geoip_client_origins_line,
'entry-ips': _parse_entry_ips_line,
'bridge-ips': _parse_bridge_ips_line,
'bridge-ip-versions': _parse_bridge_ip_versions_line,
'bridge-ip-transports': _parse_bridge_ip_transports_line,
}
def __init__(self, raw_contents, validate = False):
"""
Extra-info descriptor constructor. By default this validates the
descriptor's content as it's parsed. This validation can be disabled to
either improve performance or be accepting of malformed data.
:param str raw_contents: extra-info content provided by the relay
:param bool validate: checks the validity of the extra-info descriptor if
**True**, skips these checks otherwise
:raises: **ValueError** if the contents is malformed and validate is True
"""
super(ExtraInfoDescriptor, self).__init__(raw_contents, lazy_load = not validate)
entries = _descriptor_components(raw_contents, validate)
if validate:
for keyword in self._required_fields():
if keyword not in entries:
raise ValueError("Extra-info descriptor must have a '%s' entry" % keyword)
for keyword in self._required_fields() + SINGLE_FIELDS:
if keyword in entries and len(entries[keyword]) > 1:
raise ValueError("The '%s' entry can only appear once in an extra-info descriptor" % keyword)
expected_first_keyword = self._first_keyword()
if expected_first_keyword and expected_first_keyword != list(entries.keys())[0]:
raise ValueError("Extra-info descriptor must start with a '%s' entry" % expected_first_keyword)
expected_last_keyword = self._last_keyword()
if expected_last_keyword and expected_last_keyword != list(entries.keys())[-1]:
raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword)
self._parse(entries, validate)
else:
self._entries = entries
def digest(self, hash_type = DigestHash.SHA1, encoding = DigestEncoding.HEX):
"""
Digest of this descriptor's content. These are referenced by...
* **Server Descriptors**
* Referer: :class:`~stem.descriptor.server_descriptor.ServerDescriptor` **extra_info_digest** attribute
* Format: **SHA1/HEX**
* **Server Descriptors**
* Referer: :class:`~stem.descriptor.server_descriptor.ServerDescriptor` **extra_info_sha256_digest** attribute
* Format: **SHA256/BASE64**
.. versionchanged:: 1.8.0
Added the hash_type and encoding arguments.
:param stem.descriptor.DigestHash hash_type: digest hashing algorithm
:param stem.descriptor.DigestEncoding encoding: digest encoding
:returns: **hashlib.HASH** or **str** based on our encoding argument
"""
raise NotImplementedError('Unsupported Operation: this should be implemented by the ExtraInfoDescriptor subclass')
def _required_fields(self):
return REQUIRED_FIELDS
def _first_keyword(self):
return 'extra-info'
def _last_keyword(self):
return 'router-signature'
class RelayExtraInfoDescriptor(ExtraInfoDescriptor):
"""
Relay extra-info descriptor, constructed from data such as that provided by
'GETINFO extra-info/digest/\\*', cached descriptors, and metrics
(`specification `_).
:var ed25519_certificate str: base64 encoded ed25519 certificate
:var ed25519_signature str: signature of this document using ed25519
:var str signature: **\\*** signature for this extrainfo descriptor
**\\*** attribute is required when we're parsed with validation
.. versionchanged:: 1.5.0
Added the ed25519_certificate and ed25519_signature attributes.
"""
TYPE_ANNOTATION_NAME = 'extra-info'
ATTRIBUTES = dict(ExtraInfoDescriptor.ATTRIBUTES, **{
'ed25519_certificate': (None, _parse_identity_ed25519_line),
'ed25519_signature': (None, _parse_router_sig_ed25519_line),
'signature': (None, _parse_router_signature_line),
})
PARSER_FOR_LINE = dict(ExtraInfoDescriptor.PARSER_FOR_LINE, **{
'identity-ed25519': _parse_identity_ed25519_line,
'router-sig-ed25519': _parse_router_sig_ed25519_line,
'router-signature': _parse_router_signature_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False, signing_key = None):
base_header = (
('extra-info', '%s %s' % (_random_nickname(), _random_fingerprint())),
('published', _random_date()),
)
if signing_key:
sign = True
if sign:
if attr and 'router-signature' in attr:
raise ValueError('Cannot sign the descriptor if a router-signature has been provided')
if signing_key is None:
signing_key = create_signing_key()
content = _descriptor_content(attr, exclude, base_header) + b'\nrouter-signature\n'
return _append_router_signature(content, signing_key.private)
else:
return _descriptor_content(attr, exclude, base_header, (
('router-signature', _random_crypto_blob('SIGNATURE')),
))
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, signing_key = None):
return cls(cls.content(attr, exclude, sign, signing_key), validate = validate)
@lru_cache()
def digest(self, hash_type = DigestHash.SHA1, encoding = DigestEncoding.HEX):
if hash_type == DigestHash.SHA1:
# our digest is calculated from everything except our signature
content = self._content_range(end = '\nrouter-signature\n')
return stem.descriptor._encode_digest(hashlib.sha1(content), encoding)
elif hash_type == DigestHash.SHA256:
# Due to a tor bug sha256 digests are calculated from the
# whole descriptor rather than ommiting the signature...
#
# https://trac.torproject.org/projects/tor/ticket/28415
return stem.descriptor._encode_digest(hashlib.sha256(self.get_bytes()), encoding)
else:
raise NotImplementedError('Extrainfo descriptor digests are only available in sha1 and sha256, not %s' % hash_type)
class BridgeExtraInfoDescriptor(ExtraInfoDescriptor):
"""
Bridge extra-info descriptor (`bridge descriptor specification
`_)
:var str ed25519_certificate_hash: sha256 hash of the original identity-ed25519
:var str router_digest_sha256: sha256 digest of this document
.. versionchanged:: 1.5.0
Added the ed25519_certificate_hash and router_digest_sha256 attributes.
"""
TYPE_ANNOTATION_NAME = 'bridge-extra-info'
ATTRIBUTES = dict(ExtraInfoDescriptor.ATTRIBUTES, **{
'ed25519_certificate_hash': (None, _parse_master_key_ed25519_line),
'router_digest_sha256': (None, _parse_router_digest_sha256_line),
'_digest': (None, _parse_router_digest_line),
})
PARSER_FOR_LINE = dict(ExtraInfoDescriptor.PARSER_FOR_LINE, **{
'master-key-ed25519': _parse_master_key_ed25519_line,
'router-digest-sha256': _parse_router_digest_sha256_line,
'router-digest': _parse_router_digest_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('extra-info', 'ec2bridgereaac65a3 %s' % _random_fingerprint()),
('published', _random_date()),
), (
('router-digest', _random_fingerprint()),
))
def digest(self, hash_type = DigestHash.SHA1, encoding = DigestEncoding.HEX):
if hash_type == DigestHash.SHA1 and encoding == DigestEncoding.HEX:
return self._digest
elif hash_type == DigestHash.SHA256 and encoding == DigestEncoding.BASE64:
return self.router_digest_sha256
else:
raise NotImplementedError('Bridge extrainfo digests are only available as sha1/hex and sha256/base64, not %s/%s' % (hash_type, encoding))
def _required_fields(self):
excluded_fields = [
'router-signature',
]
included_fields = [
'router-digest',
]
return tuple(included_fields + [f for f in REQUIRED_FIELDS if f not in excluded_fields])
def _last_keyword(self):
return None
stem-1.8.0/stem/descriptor/hidden_service_descriptor.py 0000664 0001750 0001750 00000000261 13530351613 024122 0 ustar atagar atagar 0000000 0000000 # TODO: This module (hidden_service_descriptor) is a temporary alias for
# hidden_service. This alias will be removed in Stem 2.x.
from stem.descriptor.hidden_service import *
stem-1.8.0/stem/descriptor/collector.py 0000664 0001750 0001750 00000064725 13601507121 020712 0 ustar atagar atagar 0000000 0000000 # Copyright 2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Descriptor archives are available from `CollecTor
`_. If you need Tor's topology
at a prior point in time this is the place to go!
With CollecTor you can either read descriptors directly...
.. literalinclude:: /_static/example/collector_reading.py
:language: python
... or download the descriptors to disk and read them later.
.. literalinclude:: /_static/example/collector_caching.py
:language: python
::
get_instance - Provides a singleton CollecTor used for...
|- get_server_descriptors - published server descriptors
|- get_extrainfo_descriptors - published extrainfo descriptors
|- get_microdescriptors - published microdescriptors
|- get_consensus - published router status entries
|
|- get_key_certificates - authority key certificates
|- get_bandwidth_files - bandwidth authority heuristics
+- get_exit_lists - TorDNSEL exit list
File - Individual file residing within CollecTor
|- read - provides descriptors from this file
+- download - download this file to disk
CollecTor - Downloader for descriptors from CollecTor
|- get_server_descriptors - published server descriptors
|- get_extrainfo_descriptors - published extrainfo descriptors
|- get_microdescriptors - published microdescriptors
|- get_consensus - published router status entries
|
|- get_key_certificates - authority key certificates
|- get_bandwidth_files - bandwidth authority heuristics
|- get_exit_lists - TorDNSEL exit list
|
|- index - metadata for content available from CollecTor
+- files - files available from CollecTor
.. versionadded:: 1.8.0
"""
import base64
import binascii
import datetime
import hashlib
import json
import os
import re
import shutil
import tempfile
import time
import stem.descriptor
import stem.util.connection
import stem.util.str_tools
from stem.descriptor import Compression, DocumentHandler
COLLECTOR_URL = 'https://collector.torproject.org/'
REFRESH_INDEX_RATE = 3600 # get new index if cached copy is an hour old
SINGLETON_COLLECTOR = None
YEAR_DATE = re.compile('-(\\d{4})-(\\d{2})\\.')
SEC_DATE = re.compile('(\\d{4}-\\d{2}-\\d{2}-\\d{2}-\\d{2}-\\d{2})')
# distant future date so we can sort files without a timestamp at the end
FUTURE = datetime.datetime(9999, 1, 1)
def get_instance():
"""
Provides the singleton :class:`~stem.descriptor.collector.CollecTor`
used for this module's shorthand functions.
:returns: singleton :class:`~stem.descriptor.collector.CollecTor` instance
"""
global SINGLETON_COLLECTOR
if SINGLETON_COLLECTOR is None:
SINGLETON_COLLECTOR = CollecTor()
return SINGLETON_COLLECTOR
def get_server_descriptors(start = None, end = None, cache_to = None, bridge = False, timeout = None, retries = 3):
"""
Shorthand for
:func:`~stem.descriptor.collector.CollecTor.get_server_descriptors`
on our singleton instance.
"""
for desc in get_instance().get_server_descriptors(start, end, cache_to, bridge, timeout, retries):
yield desc
def get_extrainfo_descriptors(start = None, end = None, cache_to = None, bridge = False, timeout = None, retries = 3):
"""
Shorthand for
:func:`~stem.descriptor.collector.CollecTor.get_extrainfo_descriptors`
on our singleton instance.
"""
for desc in get_instance().get_extrainfo_descriptors(start, end, cache_to, bridge, timeout, retries):
yield desc
def get_microdescriptors(start = None, end = None, cache_to = None, timeout = None, retries = 3):
"""
Shorthand for
:func:`~stem.descriptor.collector.CollecTor.get_microdescriptors`
on our singleton instance.
"""
for desc in get_instance().get_microdescriptors(start, end, cache_to, timeout, retries):
yield desc
def get_consensus(start = None, end = None, cache_to = None, document_handler = DocumentHandler.ENTRIES, version = 3, microdescriptor = False, bridge = False, timeout = None, retries = 3):
"""
Shorthand for
:func:`~stem.descriptor.collector.CollecTor.get_consensus`
on our singleton instance.
"""
for desc in get_instance().get_consensus(start, end, cache_to, document_handler, version, microdescriptor, bridge, timeout, retries):
yield desc
def get_key_certificates(start = None, end = None, cache_to = None, timeout = None, retries = 3):
"""
Shorthand for
:func:`~stem.descriptor.collector.CollecTor.get_key_certificates`
on our singleton instance.
"""
for desc in get_instance().get_key_certificates(start, end, cache_to, timeout, retries):
yield desc
def get_bandwidth_files(start = None, end = None, cache_to = None, timeout = None, retries = 3):
"""
Shorthand for
:func:`~stem.descriptor.collector.CollecTor.get_bandwidth_files`
on our singleton instance.
"""
for desc in get_instance().get_bandwidth_files(start, end, cache_to, timeout, retries):
yield desc
def get_exit_lists(start = None, end = None, cache_to = None, timeout = None, retries = 3):
"""
Shorthand for
:func:`~stem.descriptor.collector.CollecTor.get_exit_lists`
on our singleton instance.
"""
for desc in get_instance().get_exit_lists(start, end, cache_to, timeout, retries):
yield desc
class File(object):
"""
File within CollecTor.
:var str path: file path within collector
:var tuple types: descriptor types contained within this file
:var stem.descriptor.Compression compression: file compression, **None** if
this cannot be determined
:var int size: size of the file
:var str sha256: file's sha256 checksum
:var datetime start: first publication within the file, **None** if this
cannot be determined
:var datetime end: last publication within the file, **None** if this cannot
be determined
:var datetime last_modified: when the file was last modified
"""
def __init__(self, path, types, size, sha256, first_published, last_published, last_modified):
self.path = path
self.types = tuple(types) if types else ()
self.compression = File._guess_compression(path)
self.size = size
self.sha256 = sha256
self.last_modified = datetime.datetime.strptime(last_modified, '%Y-%m-%d %H:%M')
self._downloaded_to = None # location we last downloaded to
# Most descriptor types have publication time fields, but microdescriptors
# don't because these files lack timestamps to parse.
if first_published and last_published:
self.start = datetime.datetime.strptime(first_published, '%Y-%m-%d %H:%M')
self.end = datetime.datetime.strptime(last_published, '%Y-%m-%d %H:%M')
else:
self.start, self.end = File._guess_time_range(path)
def read(self, directory = None, descriptor_type = None, start = None, end = None, document_handler = DocumentHandler.ENTRIES, timeout = None, retries = 3):
"""
Provides descriptors from this archive. Descriptors are downloaded or read
from disk as follows...
* If this file has already been downloaded through
:func:`~stem.descriptor.collector.CollecTor.download' these descriptors
are read from disk.
* If a **directory** argument is provided and the file is already present
these descriptors are read from disk.
* If a **directory** argument is provided and the file is not present the
file is downloaded this location then read.
* If the file has neither been downloaded and no **directory** argument
is provided then the file is downloaded to a temporary directory that's
deleted after it is read.
:param str directory: destination to download into
:param str descriptor_type: `descriptor type
`_, this is
guessed if not provided
:param datetime.datetime start: publication time to begin with
:param datetime.datetime end: publication time to end with
:param stem.descriptor.__init__.DocumentHandler document_handler: method in
which to parse a :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
:param int timeout: timeout when connection becomes idle, no timeout
applied if **None**
:param int retries: maximum attempts to impose
:returns: iterator for :class:`~stem.descriptor.__init__.Descriptor`
instances in the file
:raises:
* **ValueError** if unable to determine the descirptor type
* **TypeError** if we cannot parse this descriptor type
* :class:`~stem.DownloadFailed` if the download fails
"""
if descriptor_type is None:
# If archive contains multiple descriptor types the caller must provide a
# 'descriptor_type' argument so we can disambiguate. However, if only the
# version number varies we can probably simply pick one.
base_types = set([t.split(' ')[0] for t in self.types])
if not self.types:
raise ValueError("Unable to determine this file's descriptor type")
elif len(base_types) > 1:
raise ValueError("Unable to disambiguate file's descriptor type from among %s" % ', '.join(self.types))
else:
descriptor_type = self.types[0]
if directory is None:
if self._downloaded_to and os.path.exists(self._downloaded_to):
directory = os.path.dirname(self._downloaded_to)
else:
# TODO: The following can be replaced with simpler usage of
# tempfile.TemporaryDirectory when we drop python 2.x support.
tmp_directory = tempfile.mkdtemp()
for desc in self.read(tmp_directory, descriptor_type, start, end, document_handler, timeout, retries):
yield desc
shutil.rmtree(tmp_directory)
return
path = self.download(directory, True, timeout, retries)
# Archives can contain multiple descriptor types, so parsing everything and
# filtering to what we're after.
for desc in stem.descriptor.parse_file(path, document_handler = document_handler):
if descriptor_type is None or descriptor_type.startswith(desc.type_annotation().name):
# TODO: This can filter server and extrainfo times, but other
# descriptor types may use other attribute names.
published = getattr(desc, 'published', None)
if published:
if start and published < start:
continue
elif end and published > end:
continue
yield desc
def download(self, directory, decompress = True, timeout = None, retries = 3, overwrite = False):
"""
Downloads this file to the given location. If a file already exists this is
a no-op.
:param str directory: destination to download into
:param bool decompress: decompress written file
:param int timeout: timeout when connection becomes idle, no timeout
applied if **None**
:param int retries: maximum attempts to impose
:param bool overwrite: if this file exists but mismatches CollecTor's
checksum then overwrites if **True**, otherwise rases an exception
:returns: **str** with the path we downloaded to
:raises:
* :class:`~stem.DownloadFailed` if the download fails
* **IOError** if a mismatching file exists and **overwrite** is **False**
"""
filename = self.path.split('/')[-1]
if self.compression != Compression.PLAINTEXT and decompress:
filename = filename.rsplit('.', 1)[0]
directory = os.path.expanduser(directory)
path = os.path.join(directory, filename)
if not os.path.exists(directory):
os.makedirs(directory)
# check if this file already exists with the correct checksum
if os.path.exists(path):
with open(path) as prior_file:
expected_hash = binascii.hexlify(base64.b64decode(self.sha256))
actual_hash = hashlib.sha256(prior_file.read()).hexdigest()
if expected_hash == actual_hash:
return path # nothing to do, we already have the file
elif not overwrite:
raise IOError("%s already exists but mismatches CollecTor's checksum (expected: %s, actual: %s)" % (path, expected_hash, actual_hash))
response = stem.util.connection.download(COLLECTOR_URL + self.path, timeout, retries)
if decompress:
response = self.compression.decompress(response)
with open(path, 'wb') as output_file:
output_file.write(response)
self._downloaded_to = path
return path
@staticmethod
def _guess_compression(path):
"""
Determine file comprssion from CollecTor's filename.
"""
for compression in (Compression.LZMA, Compression.BZ2, Compression.GZIP):
if path.endswith(compression.extension):
return compression
return Compression.PLAINTEXT
@staticmethod
def _guess_time_range(path):
"""
Attemt to determine the (start, end) time range from CollecTor's filename.
This provides (None, None) if this cannot be determined.
"""
year_match = YEAR_DATE.search(path)
if year_match:
year, month = map(int, year_match.groups())
start = datetime.datetime(year, month, 1)
if month < 12:
return (start, datetime.datetime(year, month + 1, 1))
else:
return (start, datetime.datetime(year + 1, 1, 1))
sec_match = SEC_DATE.search(path)
if sec_match:
# Descriptors in the 'recent/*' section have filenames with second level
# granularity. Not quite sure why, but since consensus documents are
# published hourly we'll use that as the delta here.
start = datetime.datetime.strptime(sec_match.group(1), '%Y-%m-%d-%H-%M-%S')
return (start, start + datetime.timedelta(seconds = 3600))
return (None, None)
class CollecTor(object):
"""
Downloader for descriptors from CollecTor. The contents of CollecTor are
provided in `an index `_
that's fetched as required.
:var int retries: number of times to attempt the request if downloading it
fails
:var float timeout: duration before we'll time out our request
"""
def __init__(self, retries = 2, timeout = None):
self.retries = retries
self.timeout = timeout
self._cached_index = None
self._cached_files = None
self._cached_index_at = 0
def get_server_descriptors(self, start = None, end = None, cache_to = None, bridge = False, timeout = None, retries = 3):
"""
Provides server descriptors published during the given time range, sorted
oldest to newest.
:param datetime.datetime start: publication time to begin with
:param datetime.datetime end: publication time to end with
:param str cache_to: directory to cache archives into, if an archive is
available here it is not downloaded
:param bool bridge: standard descriptors if **False**, bridge if **True**
:param int timeout: timeout for downloading each individual archive when
the connection becomes idle, no timeout applied if **None**
:param int retries: maximum attempts to impose on a per-archive basis
:returns: **iterator** of
:class:`~stem.descriptor.server_descriptor.ServerDescriptor` for the
given time range
:raises: :class:`~stem.DownloadFailed` if the download fails
"""
desc_type = 'server-descriptor' if not bridge else 'bridge-server-descriptor'
for f in self.files(desc_type, start, end):
for desc in f.read(cache_to, desc_type, start, end, timeout = timeout, retries = retries):
yield desc
def get_extrainfo_descriptors(self, start = None, end = None, cache_to = None, bridge = False, timeout = None, retries = 3):
"""
Provides extrainfo descriptors published during the given time range,
sorted oldest to newest.
:param datetime.datetime start: publication time to begin with
:param datetime.datetime end: publication time to end with
:param str cache_to: directory to cache archives into, if an archive is
available here it is not downloaded
:param bool bridge: standard descriptors if **False**, bridge if **True**
:param int timeout: timeout for downloading each individual archive when
the connection becomes idle, no timeout applied if **None**
:param int retries: maximum attempts to impose on a per-archive basis
:returns: **iterator** of
:class:`~stem.descriptor.extrainfo_descriptor.RelayExtraInfoDescriptor`
for the given time range
:raises: :class:`~stem.DownloadFailed` if the download fails
"""
desc_type = 'extra-info' if not bridge else 'bridge-extra-info'
for f in self.files(desc_type, start, end):
for desc in f.read(cache_to, desc_type, start, end, timeout = timeout, retries = retries):
yield desc
def get_microdescriptors(self, start = None, end = None, cache_to = None, timeout = None, retries = 3):
"""
Provides microdescriptors estimated to be published during the given time
range, sorted oldest to newest. Unlike server/extrainfo descriptors,
microdescriptors change very infrequently...
::
"Microdescriptors are expected to be relatively static and only change
about once per week." -dir-spec section 3.3
CollecTor archives only contain microdescriptors that *change*, so hourly
tarballs often contain very few. Microdescriptors also do not contain
their publication timestamp, so this is estimated.
:param datetime.datetime start: publication time to begin with
:param datetime.datetime end: publication time to end with
:param str cache_to: directory to cache archives into, if an archive is
available here it is not downloaded
:param int timeout: timeout for downloading each individual archive when
the connection becomes idle, no timeout applied if **None**
:param int retries: maximum attempts to impose on a per-archive basis
:returns: **iterator** of
:class:`~stem.descriptor.microdescriptor.Microdescriptor
for the given time range
:raises: :class:`~stem.DownloadFailed` if the download fails
"""
for f in self.files('microdescriptor', start, end):
for desc in f.read(cache_to, 'microdescriptor', start, end, timeout = timeout, retries = retries):
yield desc
def get_consensus(self, start = None, end = None, cache_to = None, document_handler = DocumentHandler.ENTRIES, version = 3, microdescriptor = False, bridge = False, timeout = None, retries = 3):
"""
Provides consensus router status entries published during the given time
range, sorted oldest to newest.
:param datetime.datetime start: publication time to begin with
:param datetime.datetime end: publication time to end with
:param str cache_to: directory to cache archives into, if an archive is
available here it is not downloaded
:param stem.descriptor.__init__.DocumentHandler document_handler: method in
which to parse a :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
:param int version: consensus variant to retrieve (versions 2 or 3)
:param bool microdescriptor: provides the microdescriptor consensus if
**True**, standard consensus otherwise
:param bool bridge: standard descriptors if **False**, bridge if **True**
:param int timeout: timeout for downloading each individual archive when
the connection becomes idle, no timeout applied if **None**
:param int retries: maximum attempts to impose on a per-archive basis
:returns: **iterator** of
:class:`~stem.descriptor.router_status_entry.RouterStatusEntry`
for the given time range
:raises: :class:`~stem.DownloadFailed` if the download fails
"""
if version == 3 and not microdescriptor and not bridge:
desc_type = 'network-status-consensus-3'
elif version == 3 and microdescriptor and not bridge:
desc_type = 'network-status-microdesc-consensus-3'
elif version == 2 and not microdescriptor and not bridge:
desc_type = 'network-status-2'
elif bridge:
desc_type = 'bridge-network-status'
else:
if microdescriptor and version != 3:
raise ValueError('Only v3 microdescriptors are available (not version %s)' % version)
else:
raise ValueError('Only v2 and v3 router status entries are available (not version %s)' % version)
for f in self.files(desc_type, start, end):
for desc in f.read(cache_to, desc_type, start, end, document_handler, timeout = timeout, retries = retries):
yield desc
def get_key_certificates(self, start = None, end = None, cache_to = None, timeout = None, retries = 3):
"""
Directory authority key certificates for the given time range,
sorted oldest to newest.
:param datetime.datetime start: publication time to begin with
:param datetime.datetime end: publication time to end with
:param str cache_to: directory to cache archives into, if an archive is
available here it is not downloaded
:param int timeout: timeout for downloading each individual archive when
the connection becomes idle, no timeout applied if **None**
:param int retries: maximum attempts to impose on a per-archive basis
:returns: **iterator** of
:class:`~stem.descriptor.networkstatus.KeyCertificate
for the given time range
:raises: :class:`~stem.DownloadFailed` if the download fails
"""
for f in self.files('dir-key-certificate-3', start, end):
for desc in f.read(cache_to, 'dir-key-certificate-3', start, end, timeout = timeout, retries = retries):
yield desc
def get_bandwidth_files(self, start = None, end = None, cache_to = None, timeout = None, retries = 3):
"""
Bandwidth authority heuristics for the given time range, sorted oldest to
newest.
:param datetime.datetime start: publication time to begin with
:param datetime.datetime end: publication time to end with
:param str cache_to: directory to cache archives into, if an archive is
available here it is not downloaded
:param int timeout: timeout for downloading each individual archive when
the connection becomes idle, no timeout applied if **None**
:param int retries: maximum attempts to impose on a per-archive basis
:returns: **iterator** of
:class:`~stem.descriptor.bandwidth_file.BandwidthFile
for the given time range
:raises: :class:`~stem.DownloadFailed` if the download fails
"""
for f in self.files('bandwidth-file', start, end):
for desc in f.read(cache_to, 'bandwidth-file', start, end, timeout = timeout, retries = retries):
yield desc
def get_exit_lists(self, start = None, end = None, cache_to = None, timeout = None, retries = 3):
"""
`TorDNSEL exit lists `_
for the given time range, sorted oldest to newest.
:param datetime.datetime start: publication time to begin with
:param datetime.datetime end: publication time to end with
:param str cache_to: directory to cache archives into, if an archive is
available here it is not downloaded
:param int timeout: timeout for downloading each individual archive when
the connection becomes idle, no timeout applied if **None**
:param int retries: maximum attempts to impose on a per-archive basis
:returns: **iterator** of
:class:`~stem.descriptor.tordnsel.TorDNSEL
for the given time range
:raises: :class:`~stem.DownloadFailed` if the download fails
"""
for f in self.files('tordnsel', start, end):
for desc in f.read(cache_to, 'tordnsel', start, end, timeout = timeout, retries = retries):
yield desc
def index(self, compression = 'best'):
"""
Provides the archives available in CollecTor.
:param descriptor.Compression compression: compression type to
download from, if undefiled we'll use the best decompression available
:returns: **dict** with the archive contents
:raises:
If unable to retrieve the index this provide...
* **ValueError** if json is malformed
* **IOError** if unable to decompress
* :class:`~stem.DownloadFailed` if the download fails
"""
if not self._cached_index or time.time() - self._cached_index_at >= REFRESH_INDEX_RATE:
if compression == 'best':
for option in (Compression.LZMA, Compression.BZ2, Compression.GZIP, Compression.PLAINTEXT):
if option.available:
compression = option
break
elif compression is None:
compression = Compression.PLAINTEXT
extension = compression.extension if compression != Compression.PLAINTEXT else ''
url = COLLECTOR_URL + 'index/index.json' + extension
response = compression.decompress(stem.util.connection.download(url, self.timeout, self.retries))
self._cached_index = json.loads(stem.util.str_tools._to_unicode(response))
self._cached_index_at = time.time()
return self._cached_index
def files(self, descriptor_type = None, start = None, end = None):
"""
Provides files CollecTor presently has, sorted oldest to newest.
:param str descriptor_type: descriptor type or prefix to retrieve
:param datetime.datetime start: publication time to begin with
:param datetime.datetime end: publication time to end with
:returns: **list** of :class:`~stem.descriptor.collector.File`
:raises:
If unable to retrieve the index this provide...
* **ValueError** if json is malformed
* **IOError** if unable to decompress
* :class:`~stem.DownloadFailed` if the download fails
"""
if not self._cached_files or time.time() - self._cached_index_at >= REFRESH_INDEX_RATE:
self._cached_files = sorted(CollecTor._files(self.index(), []), key = lambda x: x.start if x.start else FUTURE)
matches = []
for f in self._cached_files:
if start and (f.end is None or f.end < start):
continue # only contains descriptors before time range
elif end and (f.start is None or f.start > end):
continue # only contains descriptors after time range
if descriptor_type is None or any([desc_type.startswith(descriptor_type) for desc_type in f.types]):
matches.append(f)
return matches
@staticmethod
def _files(val, path):
"""
Recursively provies files within the index.
:param dict val: index hash
:param list path: path we've transversed into
:returns: **list** of :class:`~stem.descriptor.collector.File`
"""
if not isinstance(val, dict):
return [] # leaf node without any files
files = []
for k, v in val.items():
if k == 'files':
for attr in v:
file_path = '/'.join(path + [attr.get('path')])
files.append(File(file_path, attr.get('types'), attr.get('size'), attr.get('sha256'), attr.get('first_published'), attr.get('last_published'), attr.get('last_modified')))
elif k == 'directories':
for attr in v:
files.extend(CollecTor._files(attr, path + [attr.get('path')]))
return files
stem-1.8.0/stem/descriptor/export.py 0000664 0001750 0001750 00000010104 13501272761 020233 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Toolkit for exporting descriptors to other formats.
**Module Overview:**
::
export_csv - Exports descriptors to a CSV
export_csv_file - Writes exported CSV output to a file
.. deprecated:: 1.7.0
This module will likely be removed in Stem 2.0 due to lack of usage. If you
use this modle please `let me know `_.
"""
import csv
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import stem.descriptor
import stem.prereq
class _ExportDialect(csv.excel):
lineterminator = '\n'
def export_csv(descriptors, included_fields = (), excluded_fields = (), header = True):
"""
Provides a newline separated CSV for one or more descriptors. If simply
provided with descriptors then the CSV contains all of its attributes,
labeled with a header row. Either 'included_fields' or 'excluded_fields' can
be used for more granular control over its attributes and the order.
:param Descriptor,list descriptors: either a
:class:`~stem.descriptor.Descriptor` or list of descriptors to be exported
:param list included_fields: attributes to include in the csv
:param list excluded_fields: attributes to exclude from the csv
:param bool header: if **True** then the first line will be a comma separated
list of the attribute names (**only supported in python 2.7 and higher**)
:returns: **str** of the CSV for the descriptors, one per line
:raises: **ValueError** if descriptors contain more than one descriptor type
"""
output_buffer = StringIO()
export_csv_file(output_buffer, descriptors, included_fields, excluded_fields, header)
return output_buffer.getvalue()
def export_csv_file(output_file, descriptors, included_fields = (), excluded_fields = (), header = True):
"""
Similar to :func:`stem.descriptor.export.export_csv`, except that the CSV is
written directly to a file.
:param file output_file: file to be written to
:param Descriptor,list descriptors: either a
:class:`~stem.descriptor.Descriptor` or list of descriptors to be exported
:param list included_fields: attributes to include in the csv
:param list excluded_fields: attributes to exclude from the csv
:param bool header: if **True** then the first line will be a comma separated
list of the attribute names (**only supported in python 2.7 and higher**)
:returns: **str** of the CSV for the descriptors, one per line
:raises: **ValueError** if descriptors contain more than one descriptor type
"""
if isinstance(descriptors, stem.descriptor.Descriptor):
descriptors = (descriptors,)
if not descriptors:
return
descriptor_type = type(descriptors[0])
descriptor_type_label = descriptor_type.__name__
included_fields = list(included_fields)
# If the user didn't specify the fields to include then export everything,
# ordered alphabetically. If they did specify fields then make sure that
# they exist.
desc_attr = sorted(vars(descriptors[0]).keys())
if included_fields:
for field in included_fields:
if field not in desc_attr:
raise ValueError("%s does not have a '%s' attribute, valid fields are: %s" % (descriptor_type_label, field, ', '.join(desc_attr)))
else:
included_fields = [attr for attr in desc_attr if not attr.startswith('_')]
for field in excluded_fields:
try:
included_fields.remove(field)
except ValueError:
pass
writer = csv.DictWriter(output_file, included_fields, dialect = _ExportDialect(), extrasaction='ignore')
if header and not stem.prereq._is_python_26():
writer.writeheader()
for desc in descriptors:
if not isinstance(desc, stem.descriptor.Descriptor):
raise ValueError('Unable to export a descriptor CSV since %s is not a descriptor.' % type(desc).__name__)
elif descriptor_type != type(desc):
raise ValueError('To export a descriptor CSV all of the descriptors must be of the same type. First descriptor was a %s but we later got a %s.' % (descriptor_type_label, type(desc)))
writer.writerow(vars(desc))
stem-1.8.0/stem/descriptor/router_status_entry.py 0000664 0001750 0001750 00000060456 13564354230 023076 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for router status entries, the information for individual routers
within a network status document. This information is provided from a few
sources...
* control port via 'GETINFO ns/\\*' and 'GETINFO md/\\*' queries
* router entries in a network status document, like the cached-consensus
**Module Overview:**
::
RouterStatusEntry - Common parent for router status entries
|- RouterStatusEntryV2 - Entry for a network status v2 document
| +- RouterStatusEntryBridgeV2 - Entry for a bridge flavored v2 document
|
|- RouterStatusEntryV3 - Entry for a network status v3 document
+- RouterStatusEntryMicroV3 - Entry for a microdescriptor flavored v3 document
"""
import binascii
import io
import stem.exit_policy
import stem.prereq
import stem.util.str_tools
from stem.descriptor import (
KEYWORD_LINE,
Descriptor,
_descriptor_content,
_value,
_values,
_descriptor_components,
_parse_protocol_line,
_read_until_keywords,
_random_nickname,
_random_ipv4_address,
_random_date,
)
_parse_pr_line = _parse_protocol_line('pr', 'protocols')
def _parse_file(document_file, validate, entry_class, entry_keyword = 'r', start_position = None, end_position = None, section_end_keywords = (), extra_args = ()):
"""
Reads a range of the document_file containing some number of entry_class
instances. We deliminate the entry_class entries by the keyword on their
first line (entry_keyword). When finished the document is left at the
end_position.
Either an end_position or section_end_keywords must be provided.
:param file document_file: file with network status document content
:param bool validate: checks the validity of the document's contents if
**True**, skips these checks otherwise
:param class entry_class: class to construct instance for
:param str entry_keyword: first keyword for the entry instances
:param int start_position: start of the section, default is the current position
:param int end_position: end of the section
:param tuple section_end_keywords: keyword(s) that deliminate the end of the
section if no end_position was provided
:param tuple extra_args: extra arguments for the entry_class (after the
content and validate flag)
:returns: iterator over entry_class instances
:raises:
* **ValueError** if the contents is malformed and validate is **True**
* **IOError** if the file can't be read
"""
if start_position:
document_file.seek(start_position)
else:
start_position = document_file.tell()
# check if we're starting at the end of the section (ie, there's no entries to read)
if section_end_keywords:
first_keyword = None
line_match = KEYWORD_LINE.match(stem.util.str_tools._to_unicode(document_file.readline()))
if line_match:
first_keyword = line_match.groups()[0]
document_file.seek(start_position)
if first_keyword in section_end_keywords:
return
while end_position is None or document_file.tell() < end_position:
desc_lines, ending_keyword = _read_until_keywords(
(entry_keyword,) + section_end_keywords,
document_file,
ignore_first = True,
end_position = end_position,
include_ending_keyword = True
)
desc_content = bytes.join(b'', desc_lines)
if desc_content:
yield entry_class(desc_content, validate, *extra_args)
# check if we stopped at the end of the section
if ending_keyword in section_end_keywords:
break
else:
break
def _parse_r_line(descriptor, entries):
# Parses a RouterStatusEntry's 'r' line. They're very nearly identical for
# all current entry types (v2, v3, and microdescriptor v3) with one little
# wrinkle: only the microdescriptor flavor excludes a 'digest' field.
#
# For v2 and v3 router status entries:
# "r" nickname identity digest publication IP ORPort DirPort
# example: r mauer BD7xbfsCFku3+tgybEZsg8Yjhvw itcuKQ6PuPLJ7m/Oi928WjO2j8g 2012-06-22 13:19:32 80.101.105.103 9001 0
#
# For v3 microdescriptor router status entries:
# "r" nickname identity publication IP ORPort DirPort
# example: r Konata ARIJF2zbqirB9IwsW0mQznccWww 2012-09-24 13:40:40 69.64.48.168 9001 9030
value = _value('r', entries)
include_digest = not isinstance(descriptor, RouterStatusEntryMicroV3)
r_comp = value.split(' ')
# inject a None for the digest to normalize the field positioning
if not include_digest:
r_comp.insert(2, None)
if len(r_comp) < 8:
expected_field_count = 'eight' if include_digest else 'seven'
raise ValueError("%s 'r' line must have %s values: r %s" % (descriptor._name(), expected_field_count, value))
if not stem.util.tor_tools.is_valid_nickname(r_comp[0]):
raise ValueError("%s nickname isn't valid: %s" % (descriptor._name(), r_comp[0]))
elif not stem.util.connection.is_valid_ipv4_address(r_comp[5]):
raise ValueError("%s address isn't a valid IPv4 address: %s" % (descriptor._name(), r_comp[5]))
elif not stem.util.connection.is_valid_port(r_comp[6]):
raise ValueError('%s ORPort is invalid: %s' % (descriptor._name(), r_comp[6]))
elif not stem.util.connection.is_valid_port(r_comp[7], allow_zero = True):
raise ValueError('%s DirPort is invalid: %s' % (descriptor._name(), r_comp[7]))
descriptor.nickname = r_comp[0]
descriptor.fingerprint = _base64_to_hex(r_comp[1])
if include_digest:
descriptor.digest = _base64_to_hex(r_comp[2])
descriptor.address = r_comp[5]
descriptor.or_port = int(r_comp[6])
descriptor.dir_port = None if r_comp[7] == '0' else int(r_comp[7])
try:
published = '%s %s' % (r_comp[3], r_comp[4])
descriptor.published = stem.util.str_tools._parse_timestamp(published)
except ValueError:
raise ValueError("Publication time time wasn't parsable: r %s" % value)
def _parse_a_line(descriptor, entries):
# "a" SP address ":" portlist
# example: a [2001:888:2133:0:82:94:251:204]:9001
or_addresses = []
for value in _values('a', entries):
if ':' not in value:
raise ValueError("%s 'a' line must be of the form '[address]:[ports]': a %s" % (descriptor._name(), value))
address, port = value.rsplit(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address) and not stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True):
raise ValueError("%s 'a' line must start with an IPv6 address: a %s" % (descriptor._name(), value))
if stem.util.connection.is_valid_port(port):
or_addresses.append((address.lstrip('[').rstrip(']'), int(port), stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True)))
else:
raise ValueError("%s 'a' line had an invalid port (%s): a %s" % (descriptor._name(), port, value))
descriptor.or_addresses = or_addresses
def _parse_s_line(descriptor, entries):
# "s" Flags
# example: s Named Running Stable Valid
value = _value('s', entries)
flags = [] if value == '' else value.split(' ')
descriptor.flags = flags
for flag in flags:
if flags.count(flag) > 1:
raise ValueError('%s had duplicate flags: s %s' % (descriptor._name(), value))
elif flag == '':
raise ValueError("%s had extra whitespace on its 's' line: s %s" % (descriptor._name(), value))
def _parse_v_line(descriptor, entries):
# "v" version
# example: v Tor 0.2.2.35
#
# The spec says that if this starts with "Tor " then what follows is a
# tor version. If not then it has "upgraded to a more sophisticated
# protocol versioning system".
value = _value('v', entries)
descriptor.version_line = value
if value.startswith('Tor '):
try:
descriptor.version = stem.version._get_version(value[4:])
except ValueError as exc:
raise ValueError('%s has a malformed tor version (%s): v %s' % (descriptor._name(), exc, value))
def _parse_w_line(descriptor, entries):
# "w" "Bandwidth=" INT ["Measured=" INT] ["Unmeasured=1"]
# example: w Bandwidth=7980
value = _value('w', entries)
w_comp = value.split(' ')
if len(w_comp) < 1:
raise ValueError("%s 'w' line is blank: w %s" % (descriptor._name(), value))
elif not w_comp[0].startswith('Bandwidth='):
raise ValueError("%s 'w' line needs to start with a 'Bandwidth=' entry: w %s" % (descriptor._name(), value))
bandwidth = None
measured = None
is_unmeasured = False
unrecognized_bandwidth_entries = []
for w_entry in w_comp:
if '=' in w_entry:
w_key, w_value = w_entry.split('=', 1)
else:
w_key, w_value = w_entry, None
if w_key == 'Bandwidth':
if not (w_value and w_value.isdigit()):
raise ValueError("%s 'Bandwidth=' entry needs to have a numeric value: w %s" % (descriptor._name(), value))
bandwidth = int(w_value)
elif w_key == 'Measured':
if not (w_value and w_value.isdigit()):
raise ValueError("%s 'Measured=' entry needs to have a numeric value: w %s" % (descriptor._name(), value))
measured = int(w_value)
elif w_key == 'Unmeasured':
if w_value != '1':
raise ValueError("%s 'Unmeasured=' should only have the value of '1': w %s" % (descriptor._name(), value))
is_unmeasured = True
else:
unrecognized_bandwidth_entries.append(w_entry)
descriptor.bandwidth = bandwidth
descriptor.measured = measured
descriptor.is_unmeasured = is_unmeasured
descriptor.unrecognized_bandwidth_entries = unrecognized_bandwidth_entries
def _parse_p_line(descriptor, entries):
# "p" ("accept" / "reject") PortList
#
# examples:
#
# p accept 80,110,143,443,993,995,6660-6669,6697,7000-7001
# p reject 1-65535
value = _value('p', entries)
try:
descriptor.exit_policy = stem.exit_policy.MicroExitPolicy(value)
except ValueError as exc:
raise ValueError('%s exit policy is malformed (%s): p %s' % (descriptor._name(), exc, value))
def _parse_id_line(descriptor, entries):
# "id" "ed25519" ed25519-identity
#
# examples:
#
# id ed25519 none
# id ed25519 8RH34kO07Pp+XYwzdoATVyCibIvmbslUjRkAm7J4IA8
value = _value('id', entries)
if value:
if descriptor.document and not descriptor.document.is_vote:
raise ValueError("%s 'id' line should only appear in votes: id %s" % (descriptor._name(), value))
value_comp = value.split()
if len(value_comp) >= 2:
descriptor.identifier_type = value_comp[0]
descriptor.identifier = value_comp[1]
else:
raise ValueError("'id' lines should contain both the key type and digest: id %s" % value)
def _parse_m_line(descriptor, entries):
# "m" methods 1*(algorithm "=" digest)
# example: m 8,9,10,11,12 sha256=g1vx9si329muxV3tquWIXXySNOIwRGMeAESKs/v4DWs
all_hashes = []
for value in _values('m', entries):
m_comp = value.split(' ')
if not (descriptor.document and descriptor.document.is_vote):
vote_status = 'vote' if descriptor.document else ''
raise ValueError("%s 'm' line should only appear in votes (appeared in a %s): m %s" % (descriptor._name(), vote_status, value))
elif len(m_comp) < 1:
raise ValueError("%s 'm' line needs to start with a series of methods: m %s" % (descriptor._name(), value))
try:
methods = [int(entry) for entry in m_comp[0].split(',')]
except ValueError:
raise ValueError('%s microdescriptor methods should be a series of comma separated integers: m %s' % (descriptor._name(), value))
hashes = {}
for entry in m_comp[1:]:
if '=' not in entry:
raise ValueError("%s can only have a series of 'algorithm=digest' mappings after the methods: m %s" % (descriptor._name(), value))
hash_name, digest = entry.split('=', 1)
hashes[hash_name] = digest
all_hashes.append((methods, hashes))
descriptor.microdescriptor_hashes = all_hashes
def _parse_microdescriptor_m_line(descriptor, entries):
# "m" digest
# example: m aiUklwBrua82obG5AsTX+iEpkjQA2+AQHxZ7GwMfY70
descriptor.microdescriptor_digest = _value('m', entries)
# TODO: drop the following in stem 2.x
descriptor.digest = _base64_to_hex(_value('m', entries), check_if_fingerprint = False)
def _base64_to_hex(identity, check_if_fingerprint = True):
"""
Decodes a base64 value to hex. For example...
::
>>> _base64_to_hex('p1aag7VwarGxqctS7/fS0y5FU+s')
'A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB'
:param str identity: encoded fingerprint from the consensus
:param bool check_if_fingerprint: asserts that the result is a fingerprint if **True**
:returns: **str** with the uppercase hex encoding of the relay's fingerprint
:raises: **ValueError** if the result isn't a valid fingerprint
"""
try:
identity_decoded = stem.util.str_tools._decode_b64(stem.util.str_tools._to_bytes(identity))
except (TypeError, binascii.Error):
raise ValueError("Unable to decode identity string '%s'" % identity)
fingerprint = binascii.hexlify(identity_decoded).upper()
if stem.prereq.is_python_3():
fingerprint = stem.util.str_tools._to_unicode(fingerprint)
if check_if_fingerprint:
if not stem.util.tor_tools.is_valid_fingerprint(fingerprint):
raise ValueError("Decoded '%s' to be '%s', which isn't a valid fingerprint" % (identity, fingerprint))
return fingerprint
class RouterStatusEntry(Descriptor):
"""
Information about an individual router stored within a network status
document. This is the common parent for concrete status entry types.
:var stem.descriptor.networkstatus.NetworkStatusDocument document: **\\*** document that this descriptor came from
:var str nickname: **\\*** router's nickname
:var str fingerprint: **\\*** router's fingerprint
:var datetime published: **\\*** router's publication
:var str address: **\\*** router's IP address
:var int or_port: **\\*** router's ORPort
:var int dir_port: **\\*** router's DirPort
:var list flags: **\\*** list of :data:`~stem.Flag` associated with the relay
:var stem.version.Version version: parsed version of tor, this is **None** if
the relay's using a new versioning scheme
:var str version_line: versioning information reported by the relay
"""
ATTRIBUTES = {
'nickname': (None, _parse_r_line),
'fingerprint': (None, _parse_r_line),
'published': (None, _parse_r_line),
'address': (None, _parse_r_line),
'or_port': (None, _parse_r_line),
'dir_port': (None, _parse_r_line),
'flags': (None, _parse_s_line),
'version_line': (None, _parse_v_line),
'version': (None, _parse_v_line),
}
PARSER_FOR_LINE = {
'r': _parse_r_line,
's': _parse_s_line,
'v': _parse_v_line,
}
@classmethod
def from_str(cls, content, **kwargs):
# Router status entries don't have their own @type annotation, so to make
# our subclass from_str() work we need to do the type inferencing ourself.
if cls == RouterStatusEntry:
raise NotImplementedError('Please use the from_str() method from RouterStatusEntry subclasses, not RouterStatusEntry itself')
elif 'descriptor_type' in kwargs:
raise ValueError("Router status entries don't have their own @type annotation. As such providing a 'descriptor_type' argument with RouterStatusEntry.from_str() does not work. Please drop the 'descriptor_type' argument when using this these subclasses' from_str() method.")
is_multiple = kwargs.pop('multiple', False)
validate = kwargs.pop('validate', False)
results = list(_parse_file(io.BytesIO(stem.util.str_tools._to_bytes(content)), validate, cls, **kwargs))
if is_multiple:
return results
elif len(results) == 1:
return results[0]
else:
raise ValueError("Descriptor.from_str() expected a single descriptor, but had %i instead. Please include 'multiple = True' if you want a list of results instead." % len(results))
def __init__(self, content, validate = False, document = None):
"""
Parse a router descriptor in a network status document.
:param str content: router descriptor content to be parsed
:param NetworkStatusDocument document: document this descriptor came from
:param bool validate: checks the validity of the content if **True**, skips
these checks otherwise
:raises: **ValueError** if the descriptor data is invalid
"""
super(RouterStatusEntry, self).__init__(content, lazy_load = not validate)
self.document = document
entries = _descriptor_components(content, validate)
if validate:
for keyword in self._required_fields():
if keyword not in entries:
raise ValueError("%s must have a '%s' line:\n%s" % (self._name(True), keyword, str(self)))
for keyword in self._single_fields():
if keyword in entries and len(entries[keyword]) > 1:
raise ValueError("%s can only have a single '%s' line, got %i:\n%s" % (self._name(True), keyword, len(entries[keyword]), str(self)))
if 'r' != list(entries.keys())[0]:
raise ValueError("%s are expected to start with a 'r' line:\n%s" % (self._name(True), str(self)))
self._parse(entries, validate)
else:
self._entries = entries
def _name(self, is_plural = False):
"""
Name for this descriptor type.
"""
return 'Router status entries' if is_plural else 'Router status entry'
def _required_fields(self):
"""
Provides lines that must appear in the descriptor.
"""
return ()
def _single_fields(self):
"""
Provides lines that can only appear in the descriptor once.
"""
return ()
class RouterStatusEntryV2(RouterStatusEntry):
"""
Information about an individual router stored within a version 2 network
status document.
:var str digest: **\\*** router's upper-case hex digest
**\\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
TYPE_ANNOTATION_NAME = 'network-status-consensus-2'
ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{
'digest': (None, _parse_r_line),
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('r', '%s p1aag7VwarGxqctS7/fS0y5FU+s oQZFLYe9e4A7bOkWKR7TaNxb0JE %s %s 9001 0' % (_random_nickname(), _random_date(), _random_ipv4_address())),
))
def _name(self, is_plural = False):
return 'Router status entries (v2)' if is_plural else 'Router status entry (v2)'
def _required_fields(self):
return ('r')
def _single_fields(self):
return ('r', 's', 'v')
class RouterStatusEntryBridgeV2(RouterStatusEntryV2):
"""
Information about an individual router stored within a bridge flavored
version 2 network status document.
.. versionadded:: 1.8.0
"""
TYPE_ANNOTATION_NAME = 'bridge-network-status'
class RouterStatusEntryV3(RouterStatusEntry):
"""
Information about an individual router stored within a version 3 network
status document.
:var list or_addresses: **\\*** relay's OR addresses, this is a tuple listing
of the form (address (**str**), port (**int**), is_ipv6 (**bool**))
:var str identifier_type: identity digest key type
:var str identifier: base64 encoded identity digest
:var str digest: **\\*** router's upper-case hex digest
:var int bandwidth: bandwidth measured to be available by the relay, this is
an arbitrary units (currently kilobytes per second) heuristic generated by
the Bandwidth authoritites to weight relay selection
:var int measured: *bandwidth* vote provided by a bandwidth authority
:var bool is_unmeasured: *bandwidth* measurement isn't based on three or more
measurements
:var list unrecognized_bandwidth_entries: **\\*** bandwidth weighting
information that isn't yet recognized
:var stem.exit_policy.MicroExitPolicy exit_policy: router's exit policy
:var dict protocols: mapping of protocols to their supported versions
:var list microdescriptor_hashes: **\\*** tuples of two values, the list of
consensus methods for generating a set of digests and the 'algorithm =>
digest' mappings
**\\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
.. versionchanged:: 1.5.0
Added the identifier and identifier_type attributes.
.. versionchanged:: 1.6.0
Added the protocols attribute.
"""
TYPE_ANNOTATION_NAME = 'network-status-consensus-3'
ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{
'digest': (None, _parse_r_line),
'or_addresses': ([], _parse_a_line),
'identifier_type': (None, _parse_id_line),
'identifier': (None, _parse_id_line),
'bandwidth': (None, _parse_w_line),
'measured': (None, _parse_w_line),
'is_unmeasured': (False, _parse_w_line),
'unrecognized_bandwidth_entries': ([], _parse_w_line),
'exit_policy': (None, _parse_p_line),
'protocols': ({}, _parse_pr_line),
'microdescriptor_hashes': ([], _parse_m_line),
})
PARSER_FOR_LINE = dict(RouterStatusEntry.PARSER_FOR_LINE, **{
'a': _parse_a_line,
'w': _parse_w_line,
'p': _parse_p_line,
'pr': _parse_pr_line,
'id': _parse_id_line,
'm': _parse_m_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('r', '%s p1aag7VwarGxqctS7/fS0y5FU+s oQZFLYe9e4A7bOkWKR7TaNxb0JE %s %s 9001 0' % (_random_nickname(), _random_date(), _random_ipv4_address())),
('s', 'Fast Named Running Stable Valid'),
))
def _name(self, is_plural = False):
return 'Router status entries (v3)' if is_plural else 'Router status entry (v3)'
def _required_fields(self):
return ('r', 's')
def _single_fields(self):
return ('r', 's', 'v', 'w', 'p', 'pr')
class RouterStatusEntryMicroV3(RouterStatusEntry):
"""
Information about an individual router stored within a microdescriptor
flavored network status document.
:var list or_addresses: **\\*** relay's OR addresses, this is a tuple listing
of the form (address (**str**), port (**int**), is_ipv6 (**bool**))
:var int bandwidth: bandwidth claimed by the relay (in kb/s)
:var int measured: bandwidth measured to be available by the relay
:var bool is_unmeasured: bandwidth measurement isn't based on three or more
measurements
:var list unrecognized_bandwidth_entries: **\\*** bandwidth weighting
information that isn't yet recognized
:var dict protocols: mapping of protocols to their supported versions
:var str digest: **\\*** router's hex encoded digest of our corresponding
microdescriptor (**deprecated**, use microdescriptor_digest instead)
:var str microdescriptor_digest: **\\*** router's base64 encoded digest of our corresponding microdescriptor
.. versionchanged:: 1.6.0
Added the protocols attribute.
.. versionchanged:: 1.7.0
Added the or_addresses attribute.
.. versionchanged:: 1.7.0
Added the microdescriptor_digest attribute to replace our now deprecated digest attribute.
**\\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
TYPE_ANNOTATION_NAME = 'network-status-microdesc-consensus-3'
ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{
'or_addresses': ([], _parse_a_line),
'bandwidth': (None, _parse_w_line),
'measured': (None, _parse_w_line),
'is_unmeasured': (False, _parse_w_line),
'unrecognized_bandwidth_entries': ([], _parse_w_line),
'protocols': ({}, _parse_pr_line),
'microdescriptor_digest': (None, _parse_microdescriptor_m_line),
'digest': (None, _parse_microdescriptor_m_line),
})
PARSER_FOR_LINE = dict(RouterStatusEntry.PARSER_FOR_LINE, **{
'a': _parse_a_line,
'w': _parse_w_line,
'm': _parse_microdescriptor_m_line,
'pr': _parse_pr_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('r', '%s ARIJF2zbqirB9IwsW0mQznccWww %s %s 9001 9030' % (_random_nickname(), _random_date(), _random_ipv4_address())),
('m', 'aiUklwBrua82obG5AsTX+iEpkjQA2+AQHxZ7GwMfY70'),
('s', 'Fast Guard HSDir Named Running Stable V2Dir Valid'),
))
def _name(self, is_plural = False):
return 'Router status entries (micro v3)' if is_plural else 'Router status entry (micro v3)'
def _required_fields(self):
return ('r', 's', 'm')
def _single_fields(self):
return ('r', 's', 'v', 'w', 'm', 'pr')
stem-1.8.0/stem/descriptor/tordnsel.py 0000664 0001750 0001750 00000007620 13501272761 020555 0 ustar atagar atagar 0000000 0000000 # Copyright 2013-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for `TorDNSEL `_
exit list files.
::
TorDNSEL - Exit list provided by TorDNSEL
"""
import stem.util.connection
import stem.util.str_tools
import stem.util.tor_tools
from stem.descriptor import (
Descriptor,
_read_until_keywords,
_descriptor_components,
)
def _parse_file(tordnsel_file, validate = False, **kwargs):
"""
Iterates over a tordnsel file.
:returns: iterator for :class:`~stem.descriptor.tordnsel.TorDNSEL`
instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is **True**
* **IOError** if the file can't be read
"""
# skip content prior to the first ExitNode
_read_until_keywords('ExitNode', tordnsel_file, skip = True)
while True:
contents = _read_until_keywords('ExitAddress', tordnsel_file)
contents += _read_until_keywords('ExitNode', tordnsel_file)
if contents:
yield TorDNSEL(bytes.join(b'', contents), validate, **kwargs)
else:
break # done parsing file
class TorDNSEL(Descriptor):
"""
TorDNSEL descriptor (`exitlist specification
`_)
:var str fingerprint: **\\*** authority's fingerprint
:var datetime published: **\\*** time in UTC when this descriptor was made
:var datetime last_status: **\\*** time in UTC when the relay was seen in a v2 network status
:var list exit_addresses: **\\*** list of (str address, datetime date) tuples consisting of the found IPv4 exit address and the time
**\\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
TYPE_ANNOTATION_NAME = 'tordnsel'
def __init__(self, raw_contents, validate):
super(TorDNSEL, self).__init__(raw_contents)
raw_contents = stem.util.str_tools._to_unicode(raw_contents)
entries = _descriptor_components(raw_contents, validate)
self.fingerprint = None
self.published = None
self.last_status = None
self.exit_addresses = []
self._parse(entries, validate)
def _parse(self, entries, validate):
for keyword, values in list(entries.items()):
value, block_type, block_content = values[0]
if validate and block_content:
raise ValueError('Unexpected block content: %s' % block_content)
if keyword == 'ExitNode':
if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % value)
self.fingerprint = value
elif keyword == 'Published':
try:
self.published = stem.util.str_tools._parse_timestamp(value)
except ValueError:
if validate:
raise ValueError("Published time wasn't parsable: %s" % value)
elif keyword == 'LastStatus':
try:
self.last_status = stem.util.str_tools._parse_timestamp(value)
except ValueError:
if validate:
raise ValueError("LastStatus time wasn't parsable: %s" % value)
elif keyword == 'ExitAddress':
for value, block_type, block_content in values:
address, date = value.split(' ', 1)
if validate:
if not stem.util.connection.is_valid_ipv4_address(address):
raise ValueError("ExitAddress isn't a valid IPv4 address: %s" % address)
elif block_content:
raise ValueError('Unexpected block content: %s' % block_content)
try:
date = stem.util.str_tools._parse_timestamp(date)
self.exit_addresses.append((address, date))
except ValueError:
if validate:
raise ValueError("ExitAddress found time wasn't parsable: %s" % value)
elif validate:
raise ValueError('Unrecognized keyword: %s' % keyword)
stem-1.8.0/stem/descriptor/__init__.py 0000664 0001750 0001750 00000156516 13565322274 020500 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Package for parsing and processing descriptor data.
**Module Overview:**
::
parse_file - Parses the descriptors in a file.
create_signing_key - Cretes a signing key that can be used for creating descriptors.
Compression - method of descriptor decompression
Descriptor - Common parent for all descriptor file types.
| |- content - creates the text of a new descriptor
| |- create - creates a new descriptor
| +- from_str - provides a parsed descriptor for the given string
|
|- type_annotation - provides our @type annotation
|- get_path - location of the descriptor on disk if it came from a file
|- get_archive_path - location of the descriptor within the archive it came from
|- get_bytes - similar to str(), but provides our original bytes content
|- get_unrecognized_lines - unparsed descriptor content
+- __str__ - string that the descriptor was made from
.. data:: DigestHash (enum)
.. versionadded:: 1.8.0
Hash function used by tor for descriptor digests.
=========== ===========
DigestHash Description
=========== ===========
SHA1 SHA1 hash
SHA256 SHA256 hash
=========== ===========
.. data:: DigestEncoding (enum)
.. versionadded:: 1.8.0
Encoding of descriptor digests.
================= ===========
DigestEncoding Description
================= ===========
RAW hash object
HEX uppercase hexidecimal encoding
BASE64 base64 encoding `without trailing '=' padding `_
================= ===========
.. data:: DocumentHandler (enum)
Ways in which we can parse a
:class:`~stem.descriptor.networkstatus.NetworkStatusDocument`.
Both **ENTRIES** and **BARE_DOCUMENT** have a 'thin' document, which doesn't
have a populated **routers** attribute. This allows for lower memory usage
and upfront runtime. However, if read time and memory aren't a concern then
**DOCUMENT** can provide you with a fully populated document.
Handlers don't change the fact that most methods that provide
descriptors return an iterator. In the case of **DOCUMENT** and
**BARE_DOCUMENT** that iterator would have just a single item -
the document itself.
Simple way to handle this is to call **next()** to get the iterator's one and
only value...
::
import stem.descriptor.remote
from stem.descriptor import DocumentHandler
consensus = next(stem.descriptor.remote.get_consensus(
document_handler = DocumentHandler.BARE_DOCUMENT,
)
=================== ===========
DocumentHandler Description
=================== ===========
**ENTRIES** Iterates over the contained :class:`~stem.descriptor.router_status_entry.RouterStatusEntry`. Each has a reference to the bare document it came from (through its **document** attribute).
**DOCUMENT** :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` with the :class:`~stem.descriptor.router_status_entry.RouterStatusEntry` it contains (through its **routers** attribute).
**BARE_DOCUMENT** :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` **without** a reference to its contents (the :class:`~stem.descriptor.router_status_entry.RouterStatusEntry` are unread).
=================== ===========
"""
import base64
import codecs
import collections
import copy
import io
import os
import random
import re
import string
import tarfile
import stem.prereq
import stem.util
import stem.util.enum
import stem.util.str_tools
import stem.util.system
try:
# added in python 2.7
from collections import OrderedDict
except ImportError:
from stem.util.ordereddict import OrderedDict
__all__ = [
'bandwidth_file',
'certificate',
'collector',
'export',
'extrainfo_descriptor',
'hidden_service',
'microdescriptor',
'networkstatus',
'reader',
'remote',
'router_status_entry',
'server_descriptor',
'tordnsel',
'Descriptor',
'parse_file',
]
UNSEEKABLE_MSG = """\
File object isn't seekable. Try using Descriptor.from_str() instead:
content = my_file.read()
parsed_descriptors = stem.descriptor.Descriptor.from_str(content)
"""
KEYWORD_CHAR = 'a-zA-Z0-9-'
WHITESPACE = ' \t'
KEYWORD_LINE = re.compile('^([%s]+)(?:[%s]+(.*))?$' % (KEYWORD_CHAR, WHITESPACE))
SPECIFIC_KEYWORD_LINE = '^(%%s)(?:[%s]+(.*))?$' % WHITESPACE
PGP_BLOCK_START = re.compile('^-----BEGIN ([%s%s]+)-----$' % (KEYWORD_CHAR, WHITESPACE))
PGP_BLOCK_END = '-----END %s-----'
EMPTY_COLLECTION = ([], {}, set())
DIGEST_TYPE_INFO = b'\x00\x01'
DIGEST_PADDING = b'\xFF'
DIGEST_SEPARATOR = b'\x00'
CRYPTO_BLOB = """
MIGJAoGBAJv5IIWQ+WDWYUdyA/0L8qbIkEVH/cwryZWoIaPAzINfrw1WfNZGtBmg
skFtXhOHHqTRN4GPPrZsAIUOQGzQtGb66IQgT4tO/pj+P6QmSCCdTfhvGfgTCsC+
WPi4Fl2qryzTb3QO5r5x7T8OsG2IBUET1bLQzmtbC560SYR49IvVAgMBAAE=
"""
DigestHash = stem.util.enum.UppercaseEnum(
'SHA1',
'SHA256',
)
DigestEncoding = stem.util.enum.UppercaseEnum(
'RAW',
'HEX',
'BASE64',
)
DocumentHandler = stem.util.enum.UppercaseEnum(
'ENTRIES',
'DOCUMENT',
'BARE_DOCUMENT',
)
class _Compression(object):
"""
Compression method supported by CollecTor.
:var bool available: **True** if this method of decryption is available,
**False** otherwise
:var str encoding: `http 'Accept-Encoding' parameter `_
:var str extension: file extension of this compression
.. versionadded:: 1.8.0
"""
def __init__(self, name, module, encoding, extension, decompression_func):
if module is None:
self._module = None
self.available = True
else:
# Compression modules are optional. Usually gzip and bz2 are available,
# but they might be missing if compiling python yourself. As for lzma it
# was added in python 3.3.
try:
self._module = __import__(module)
self.available = True
except ImportError:
self._module = None
self.available = False
self.extension = extension
self.encoding = encoding
self._name = name
self._module_name = module
self._decompression_func = decompression_func
def decompress(self, content):
"""
Decompresses the given content via this method.
:param bytes content: content to be decompressed
:returns: **bytes** with the decompressed content
:raises:
If unable to decompress this provide...
* **IOError** if content isn't compressed with this
* **ImportError** if this method if decompression is unavalable
"""
if not self.available:
if self._name == 'zstd':
raise ImportError('Decompressing zstd data requires https://pypi.org/project/zstandard/')
elif self._name == 'lzma':
raise ImportError('Decompressing lzma data requires https://docs.python.org/3/library/lzma.html')
else:
raise ImportError("'%s' decompression module is unavailable" % self._module_name)
try:
return self._decompression_func(self._module, content)
except Exception as exc:
raise IOError('Failed to decompress as %s: %s' % (self, exc))
def __str__(self):
return self._name
def _zstd_decompress(module, content):
output_buffer = io.BytesIO()
with module.ZstdDecompressor().write_to(output_buffer) as decompressor:
decompressor.write(content)
return output_buffer.getvalue()
Compression = stem.util.enum.Enum(
('PLAINTEXT', _Compression('plaintext', None, 'identity', '.txt', lambda module, content: content)),
('GZIP', _Compression('gzip', 'zlib', 'gzip', '.gz', lambda module, content: module.decompress(content, module.MAX_WBITS | 32))),
('BZ2', _Compression('bzip2', 'bz2', 'bzip2', '.bz2', lambda module, content: module.decompress(content))),
('LZMA', _Compression('lzma', 'lzma', 'x-tor-lzma', '.xz', lambda module, content: module.decompress(content))),
('ZSTD', _Compression('zstd', 'zstd', 'x-zstd', '.zst', _zstd_decompress)),
)
class TypeAnnotation(collections.namedtuple('TypeAnnotation', ['name', 'major_version', 'minor_version'])):
"""
`Tor metrics type annotation
`_. The
string representation is the header annotation, for example "@type
server-descriptor 1.0".
.. versionadded:: 1.8.0
:var str name: name of the descriptor type
:var int major_version: major version number
:var int minor_version: minor version number
"""
def __str__(self):
return '@type %s %s.%s' % (self.name, self.major_version, self.minor_version)
class SigningKey(collections.namedtuple('SigningKey', ['private', 'public', 'public_digest'])):
"""
Key used by relays to sign their server and extrainfo descriptors.
.. versionadded:: 1.6.0
:var cryptography.hazmat.backends.openssl.rsa._RSAPrivateKey private: private key
:var cryptography.hazmat.backends.openssl.rsa._RSAPublicKey public: public key
:var bytes public_digest: block that can be used for the a server descrptor's 'signing-key' field
"""
def parse_file(descriptor_file, descriptor_type = None, validate = False, document_handler = DocumentHandler.ENTRIES, normalize_newlines = None, **kwargs):
"""
Simple function to read the descriptor contents from a file, providing an
iterator for its :class:`~stem.descriptor.__init__.Descriptor` contents.
If you don't provide a **descriptor_type** argument then this automatically
tries to determine the descriptor type based on the following...
* The @type annotation on the first line. These are generally only found in
the `CollecTor archives `_.
* The filename if it matches something from tor's data directory. For
instance, tor's 'cached-descriptors' contains server descriptors.
This is a handy function for simple usage, but if you're reading multiple
descriptor files you might want to consider the
:class:`~stem.descriptor.reader.DescriptorReader`.
Descriptor types include the following, including further minor versions (ie.
if we support 1.1 then we also support everything from 1.0 and most things
from 1.2, but not 2.0)...
========================================= =====
Descriptor Type Class
========================================= =====
server-descriptor 1.0 :class:`~stem.descriptor.server_descriptor.RelayDescriptor`
extra-info 1.0 :class:`~stem.descriptor.extrainfo_descriptor.RelayExtraInfoDescriptor`
microdescriptor 1.0 :class:`~stem.descriptor.microdescriptor.Microdescriptor`
directory 1.0 **unsupported**
network-status-2 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV2`)
dir-key-certificate-3 1.0 :class:`~stem.descriptor.networkstatus.KeyCertificate`
network-status-consensus-3 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`)
network-status-vote-3 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`)
network-status-microdesc-consensus-3 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`)
bridge-network-status 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` (with a :class:`~stem.descriptor.networkstatus.BridgeNetworkStatusDocument`)
bridge-server-descriptor 1.0 :class:`~stem.descriptor.server_descriptor.BridgeDescriptor`
bridge-extra-info 1.1 or 1.2 :class:`~stem.descriptor.extrainfo_descriptor.BridgeExtraInfoDescriptor`
torperf 1.0 **unsupported**
bridge-pool-assignment 1.0 **unsupported**
tordnsel 1.0 :class:`~stem.descriptor.tordnsel.TorDNSEL`
hidden-service-descriptor 1.0 :class:`~stem.descriptor.hidden_service.HiddenServiceDescriptorV2`
========================================= =====
If you're using **python 3** then beware that the open() function defaults to
using text mode. **Binary mode** is strongly suggested because it's both
faster (by my testing by about 33x) and doesn't do universal newline
translation which can make us misparse the document.
::
my_descriptor_file = open(descriptor_path, 'rb')
:param str,file,tarfile descriptor_file: path or opened file with the descriptor contents
:param str descriptor_type: `descriptor type `_, this is guessed if not provided
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param stem.descriptor.__init__.DocumentHandler document_handler: method in
which to parse the :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
:param bool normalize_newlines: converts windows newlines (CRLF), this is the
default when reading data directories on windows
:param dict kwargs: additional arguments for the descriptor constructor
:returns: iterator for :class:`~stem.descriptor.__init__.Descriptor` instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is True
* **TypeError** if we can't match the contents of the file to a descriptor type
* **IOError** if unable to read from the descriptor_file
"""
# Delegate to a helper if this is a path or tarfile.
handler = None
if stem.util._is_str(descriptor_file):
if stem.util.system.is_tarfile(descriptor_file):
handler = _parse_file_for_tar_path
else:
handler = _parse_file_for_path
elif isinstance(descriptor_file, tarfile.TarFile):
handler = _parse_file_for_tarfile
if handler:
for desc in handler(descriptor_file, descriptor_type, validate, document_handler, **kwargs):
yield desc
return
# Not all files are seekable. If unseekable then advising the user.
#
# Python 3.x adds an io.seekable() method, but not an option with python 2.x
# so using an experimental call to tell() to determine this.
try:
descriptor_file.tell()
except IOError:
raise IOError(UNSEEKABLE_MSG)
# The tor descriptor specifications do not provide a reliable method for
# identifying a descriptor file's type and version so we need to guess
# based on its filename. Metrics descriptors, however, can be identified
# by an annotation on their first line...
# https://trac.torproject.org/5651
initial_position = descriptor_file.tell()
first_line = stem.util.str_tools._to_unicode(descriptor_file.readline().strip())
metrics_header_match = re.match('^@type (\\S+) (\\d+).(\\d+)$', first_line)
if not metrics_header_match:
descriptor_file.seek(initial_position)
descriptor_path = getattr(descriptor_file, 'name', None)
filename = '' if descriptor_path is None else os.path.basename(descriptor_file.name)
def parse(descriptor_file):
if normalize_newlines:
descriptor_file = NewlineNormalizer(descriptor_file)
if descriptor_type is not None:
descriptor_type_match = re.match('^(\\S+) (\\d+).(\\d+)$', descriptor_type)
if descriptor_type_match:
desc_type, major_version, minor_version = descriptor_type_match.groups()
return _parse_metrics_file(desc_type, int(major_version), int(minor_version), descriptor_file, validate, document_handler, **kwargs)
else:
raise ValueError("The descriptor_type must be of the form ' .'")
elif metrics_header_match:
# Metrics descriptor handling
desc_type, major_version, minor_version = metrics_header_match.groups()
return _parse_metrics_file(desc_type, int(major_version), int(minor_version), descriptor_file, validate, document_handler, **kwargs)
else:
# Cached descriptor handling. These contain multiple descriptors per file.
if normalize_newlines is None and stem.util.system.is_windows():
descriptor_file = NewlineNormalizer(descriptor_file)
if filename == 'cached-descriptors' or filename == 'cached-descriptors.new':
return stem.descriptor.server_descriptor._parse_file(descriptor_file, validate = validate, **kwargs)
elif filename == 'cached-extrainfo' or filename == 'cached-extrainfo.new':
return stem.descriptor.extrainfo_descriptor._parse_file(descriptor_file, validate = validate, **kwargs)
elif filename == 'cached-microdescs' or filename == 'cached-microdescs.new':
return stem.descriptor.microdescriptor._parse_file(descriptor_file, validate = validate, **kwargs)
elif filename == 'cached-consensus':
return stem.descriptor.networkstatus._parse_file(descriptor_file, validate = validate, document_handler = document_handler, **kwargs)
elif filename == 'cached-microdesc-consensus':
return stem.descriptor.networkstatus._parse_file(descriptor_file, is_microdescriptor = True, validate = validate, document_handler = document_handler, **kwargs)
else:
raise TypeError("Unable to determine the descriptor's type. filename: '%s', first line: '%s'" % (filename, first_line))
for desc in parse(descriptor_file):
if descriptor_path is not None:
desc._set_path(os.path.abspath(descriptor_path))
yield desc
def _parse_file_for_path(descriptor_file, *args, **kwargs):
with open(descriptor_file, 'rb') as desc_file:
for desc in parse_file(desc_file, *args, **kwargs):
yield desc
def _parse_file_for_tar_path(descriptor_file, *args, **kwargs):
# TODO: use 'with' for tarfile after dropping python 2.6 support
tar_file = tarfile.open(descriptor_file)
try:
for desc in parse_file(tar_file, *args, **kwargs):
desc._set_path(os.path.abspath(descriptor_file))
yield desc
finally:
if tar_file:
tar_file.close()
def _parse_file_for_tarfile(descriptor_file, *args, **kwargs):
for tar_entry in descriptor_file:
if tar_entry.isfile():
entry = descriptor_file.extractfile(tar_entry)
if tar_entry.size == 0:
continue
try:
for desc in parse_file(entry, *args, **kwargs):
desc._set_archive_path(entry.name)
yield desc
finally:
entry.close()
def _parse_metrics_file(descriptor_type, major_version, minor_version, descriptor_file, validate, document_handler, **kwargs):
# Parses descriptor files from metrics, yielding individual descriptors. This
# throws a TypeError if the descriptor_type or version isn't recognized.
if descriptor_type == stem.descriptor.server_descriptor.RelayDescriptor.TYPE_ANNOTATION_NAME and major_version == 1:
for desc in stem.descriptor.server_descriptor._parse_file(descriptor_file, is_bridge = False, validate = validate, **kwargs):
yield desc
elif descriptor_type == stem.descriptor.server_descriptor.BridgeDescriptor.TYPE_ANNOTATION_NAME and major_version == 1:
for desc in stem.descriptor.server_descriptor._parse_file(descriptor_file, is_bridge = True, validate = validate, **kwargs):
yield desc
elif descriptor_type == stem.descriptor.extrainfo_descriptor.RelayExtraInfoDescriptor.TYPE_ANNOTATION_NAME and major_version == 1:
for desc in stem.descriptor.extrainfo_descriptor._parse_file(descriptor_file, is_bridge = False, validate = validate, **kwargs):
yield desc
elif descriptor_type == stem.descriptor.microdescriptor.Microdescriptor.TYPE_ANNOTATION_NAME and major_version == 1:
for desc in stem.descriptor.microdescriptor._parse_file(descriptor_file, validate = validate, **kwargs):
yield desc
elif descriptor_type == stem.descriptor.extrainfo_descriptor.BridgeExtraInfoDescriptor.TYPE_ANNOTATION_NAME and major_version == 1:
# version 1.1 introduced a 'transport' field...
# https://trac.torproject.org/6257
for desc in stem.descriptor.extrainfo_descriptor._parse_file(descriptor_file, is_bridge = True, validate = validate, **kwargs):
yield desc
elif descriptor_type == stem.descriptor.networkstatus.NetworkStatusDocumentV2.TYPE_ANNOTATION_NAME and major_version == 1:
document_type = stem.descriptor.networkstatus.NetworkStatusDocumentV2
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs):
yield desc
elif descriptor_type == stem.descriptor.networkstatus.KeyCertificate.TYPE_ANNOTATION_NAME and major_version == 1:
for desc in stem.descriptor.networkstatus._parse_file_key_certs(descriptor_file, validate = validate, **kwargs):
yield desc
elif descriptor_type in ('network-status-consensus-3', 'network-status-vote-3') and major_version == 1:
document_type = stem.descriptor.networkstatus.NetworkStatusDocumentV3
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs):
yield desc
elif descriptor_type == 'network-status-microdesc-consensus-3' and major_version == 1:
document_type = stem.descriptor.networkstatus.NetworkStatusDocumentV3
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, is_microdescriptor = True, validate = validate, document_handler = document_handler, **kwargs):
yield desc
elif descriptor_type == stem.descriptor.networkstatus.BridgeNetworkStatusDocument.TYPE_ANNOTATION_NAME and major_version == 1:
document_type = stem.descriptor.networkstatus.BridgeNetworkStatusDocument
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs):
yield desc
elif descriptor_type == stem.descriptor.networkstatus.DetachedSignature.TYPE_ANNOTATION_NAME and major_version == 1:
document_type = stem.descriptor.networkstatus.DetachedSignature
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs):
yield desc
elif descriptor_type == stem.descriptor.tordnsel.TorDNSEL.TYPE_ANNOTATION_NAME and major_version == 1:
for desc in stem.descriptor.tordnsel._parse_file(descriptor_file, validate = validate, **kwargs):
yield desc
elif descriptor_type == stem.descriptor.hidden_service.HiddenServiceDescriptorV2.TYPE_ANNOTATION_NAME and major_version == 1:
desc_type = stem.descriptor.hidden_service.HiddenServiceDescriptorV2
for desc in stem.descriptor.hidden_service._parse_file(descriptor_file, desc_type, validate = validate, **kwargs):
yield desc
elif descriptor_type == stem.descriptor.hidden_service.HiddenServiceDescriptorV3.TYPE_ANNOTATION_NAME and major_version == 1:
desc_type = stem.descriptor.hidden_service.HiddenServiceDescriptorV3
for desc in stem.descriptor.hidden_service._parse_file(descriptor_file, desc_type, validate = validate, **kwargs):
yield desc
elif descriptor_type == stem.descriptor.bandwidth_file.BandwidthFile.TYPE_ANNOTATION_NAME and major_version == 1:
for desc in stem.descriptor.bandwidth_file._parse_file(descriptor_file, validate = validate, **kwargs):
yield desc
else:
raise TypeError("Unrecognized metrics descriptor format. type: '%s', version: '%i.%i'" % (descriptor_type, major_version, minor_version))
def _descriptor_content(attr = None, exclude = (), header_template = (), footer_template = ()):
"""
Constructs a minimal descriptor with the given attributes. The content we
provide back is of the form...
* header_template (with matching attr filled in)
* unused attr entries
* footer_template (with matching attr filled in)
So for instance...
::
_descriptor_content(
attr = {'nickname': 'caerSidi', 'contact': 'atagar'},
header_template = (
('nickname', 'foobar'),
('fingerprint', '12345'),
),
)
... would result in...
::
nickname caerSidi
fingerprint 12345
contact atagar
:param dict attr: keyword/value mappings to be included in the descriptor
:param list exclude: mandatory keywords to exclude from the descriptor
:param tuple header_template: key/value pairs for mandatory fields before unrecognized content
:param tuple footer_template: key/value pairs for mandatory fields after unrecognized content
:returns: bytes with the requested descriptor content
"""
header_content, footer_content = [], []
attr = {} if attr is None else OrderedDict(attr) # shallow copy since we're destructive
for content, template in ((header_content, header_template),
(footer_content, footer_template)):
for keyword, value in template:
if keyword in exclude:
continue
value = stem.util.str_tools._to_unicode(attr.pop(keyword, value))
if value is None:
continue
elif isinstance(value, (tuple, list)):
for v in value:
content.append('%s %s' % (keyword, v))
elif value == '':
content.append(keyword)
elif value.startswith('\n'):
# some values like crypto follow the line instead
content.append('%s%s' % (keyword, value))
else:
content.append('%s %s' % (keyword, value))
remainder = []
for k, v in attr.items():
if isinstance(v, (tuple, list)):
remainder += ['%s %s' % (k, entry) for entry in v]
else:
remainder.append('%s %s' % (k, v))
return stem.util.str_tools._to_bytes('\n'.join(header_content + remainder + footer_content))
def _value(line, entries):
return entries[line][0][0]
def _values(line, entries):
return [entry[0] for entry in entries[line]]
def _parse_simple_line(keyword, attribute, func = None):
def _parse(descriptor, entries):
value = _value(keyword, entries)
setattr(descriptor, attribute, func(value) if func else value)
return _parse
def _parse_if_present(keyword, attribute):
return lambda descriptor, entries: setattr(descriptor, attribute, keyword in entries)
def _parse_bytes_line(keyword, attribute):
def _parse(descriptor, entries):
line_match = re.search(stem.util.str_tools._to_bytes('^(opt )?%s(?:[%s]+(.*))?$' % (keyword, WHITESPACE)), descriptor.get_bytes(), re.MULTILINE)
result = None
if line_match:
value = line_match.groups()[1]
result = b'' if value is None else value
setattr(descriptor, attribute, result)
return _parse
def _parse_int_line(keyword, attribute, allow_negative = True):
def _parse(descriptor, entries):
value = _value(keyword, entries)
try:
int_val = int(value)
except ValueError:
raise ValueError('%s must have a numeric value: %s' % (keyword, value))
if not allow_negative and int_val < 0:
raise ValueError('%s must have a positive value: %s' % (keyword, value))
setattr(descriptor, attribute, int_val)
return _parse
def _parse_timestamp_line(keyword, attribute):
# "" YYYY-MM-DD HH:MM:SS
def _parse(descriptor, entries):
value = _value(keyword, entries)
try:
setattr(descriptor, attribute, stem.util.str_tools._parse_timestamp(value))
except ValueError:
raise ValueError("Timestamp on %s line wasn't parsable: %s %s" % (keyword, keyword, value))
return _parse
def _parse_forty_character_hex(keyword, attribute):
# format of fingerprints, sha1 digests, etc
def _parse(descriptor, entries):
value = _value(keyword, entries)
if not stem.util.tor_tools.is_hex_digits(value, 40):
raise ValueError('%s line had an invalid value (should be 40 hex characters): %s %s' % (keyword, keyword, value))
setattr(descriptor, attribute, value)
return _parse
def _parse_protocol_line(keyword, attribute):
def _parse(descriptor, entries):
# parses 'protocol' entries like: Cons=1-2 Desc=1-2 DirCache=1 HSDir=1
value = _value(keyword, entries)
protocols = OrderedDict()
for k, v in _mappings_for(keyword, value):
versions = []
if not v:
continue
for entry in v.split(','):
if '-' in entry:
min_value, max_value = entry.split('-', 1)
else:
min_value = max_value = entry
if not min_value.isdigit() or not max_value.isdigit():
raise ValueError('Protocol values should be a number or number range, but was: %s %s' % (keyword, value))
versions += range(int(min_value), int(max_value) + 1)
protocols[k] = versions
setattr(descriptor, attribute, protocols)
return _parse
def _parse_key_block(keyword, attribute, expected_block_type, value_attribute = None):
def _parse(descriptor, entries):
value, block_type, block_contents = entries[keyword][0]
if not block_contents or block_type != expected_block_type:
raise ValueError("'%s' should be followed by a %s block, but was a %s" % (keyword, expected_block_type, block_type))
setattr(descriptor, attribute, block_contents)
if value_attribute:
setattr(descriptor, value_attribute, value)
return _parse
def _mappings_for(keyword, value, require_value = False, divider = ' '):
"""
Parses an attribute as a series of 'key=value' mappings. Unlike _parse_*
functions this is a helper, returning the attribute value rather than setting
a descriptor field. This way parsers can perform additional validations.
:param str keyword: descriptor field being parsed
:param str value: 'attribute => values' mappings to parse
:param str divider: separator between the key/value mappings
:param bool require_value: validates that values are not empty
:returns: **generator** with the key/value of the map attribute
:raises: **ValueError** if descriptor content is invalid
"""
if value is None:
return # no descripoter value to process
elif value == '':
return # descriptor field was present, but blank
for entry in value.split(divider):
if '=' not in entry:
raise ValueError("'%s' should be a series of 'key=value' pairs but was: %s" % (keyword, value))
k, v = entry.split('=', 1)
if require_value and not v:
raise ValueError("'%s' line's %s mapping had a blank value: %s" % (keyword, k, value))
yield k, v
def _copy(default):
if default is None or isinstance(default, (bool, stem.exit_policy.ExitPolicy)):
return default # immutable
elif default in EMPTY_COLLECTION:
return type(default)() # collection construction tad faster than copy
else:
return copy.copy(default)
def _encode_digest(hash_value, encoding):
"""
Encodes a hash value with the given HashEncoding.
"""
if encoding == DigestEncoding.RAW:
return hash_value
elif encoding == DigestEncoding.HEX:
return stem.util.str_tools._to_unicode(hash_value.hexdigest().upper())
elif encoding == DigestEncoding.BASE64:
return stem.util.str_tools._to_unicode(base64.b64encode(hash_value.digest()).rstrip(b'='))
elif encoding not in DigestEncoding:
raise ValueError('Digest encodings should be among our DigestEncoding enumeration (%s), not %s' % (', '.join(DigestEncoding), encoding))
else:
raise NotImplementedError('BUG: stem.descriptor._encode_digest should recognize all DigestEncoding, lacked %s' % encoding)
class Descriptor(object):
"""
Common parent for all types of descriptors.
"""
ATTRIBUTES = {} # mapping of 'attribute' => (default_value, parsing_function)
PARSER_FOR_LINE = {} # line keyword to its associated parsing function
TYPE_ANNOTATION_NAME = None
def __init__(self, contents, lazy_load = False):
self._path = None
self._archive_path = None
self._raw_contents = contents
self._lazy_loading = lazy_load
self._entries = {}
self._hash = None
self._unrecognized_lines = []
@classmethod
def from_str(cls, content, **kwargs):
"""
Provides a :class:`~stem.descriptor.__init__.Descriptor` for the given content.
To parse a descriptor we must know its type. There are three ways to
convey this...
::
# use a descriptor_type argument
desc = Descriptor.from_str(content, descriptor_type = 'server-descriptor 1.0')
# prefixing the content with a "@type" annotation
desc = Descriptor.from_str('@type server-descriptor 1.0\\n' + content)
# use this method from a subclass
desc = stem.descriptor.server_descriptor.RelayDescriptor.from_str(content)
.. versionadded:: 1.8.0
:param str,bytes content: string to construct the descriptor from
:param bool multiple: if provided with **True** this provides a list of
descriptors rather than a single one
:param dict kwargs: additional arguments for :func:`~stem.descriptor.__init__.parse_file`
:returns: :class:`~stem.descriptor.__init__.Descriptor` subclass for the
given content, or a **list** of descriptors if **multiple = True** is
provided
:raises:
* **ValueError** if the contents is malformed and validate is True
* **TypeError** if we can't match the contents of the file to a descriptor type
* **IOError** if unable to read from the descriptor_file
"""
if 'descriptor_type' not in kwargs and cls.TYPE_ANNOTATION_NAME is not None:
kwargs['descriptor_type'] = str(TypeAnnotation(cls.TYPE_ANNOTATION_NAME, 1, 0))[6:]
is_multiple = kwargs.pop('multiple', False)
results = list(parse_file(io.BytesIO(stem.util.str_tools._to_bytes(content)), **kwargs))
if is_multiple:
return results
elif len(results) == 1:
return results[0]
else:
raise ValueError("Descriptor.from_str() expected a single descriptor, but had %i instead. Please include 'multiple = True' if you want a list of results instead." % len(results))
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
"""
Creates descriptor content with the given attributes. Mandatory fields are
filled with dummy information unless data is supplied. This doesn't yet
create a valid signature.
.. versionadded:: 1.6.0
:param dict attr: keyword/value mappings to be included in the descriptor
:param list exclude: mandatory keywords to exclude from the descriptor, this
results in an invalid descriptor
:param bool sign: includes cryptographic signatures and digests if True
:returns: **str** with the content of a descriptor
:raises:
* **ImportError** if cryptography is unavailable and sign is True
* **NotImplementedError** if not implemented for this descriptor type
"""
# TODO: drop the 'sign' argument in stem 2.x (only a few subclasses use this)
raise NotImplementedError("The create and content methods haven't been implemented for %s" % cls.__name__)
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False):
"""
Creates a descriptor with the given attributes. Mandatory fields are filled
with dummy information unless data is supplied. This doesn't yet create a
valid signature.
.. versionadded:: 1.6.0
:param dict attr: keyword/value mappings to be included in the descriptor
:param list exclude: mandatory keywords to exclude from the descriptor, this
results in an invalid descriptor
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param bool sign: includes cryptographic signatures and digests if True
:returns: :class:`~stem.descriptor.Descriptor` subclass
:raises:
* **ValueError** if the contents is malformed and validate is True
* **ImportError** if cryptography is unavailable and sign is True
* **NotImplementedError** if not implemented for this descriptor type
"""
return cls(cls.content(attr, exclude, sign), validate = validate)
def type_annotation(self):
"""
Provides the `Tor metrics annotation
`_ of this
descriptor type. For example, "@type server-descriptor 1.0" for server
descriptors.
Please note that the version number component is specific to CollecTor,
and for the moment hardcode as 1.0. This may change in the future.
.. versionadded:: 1.8.0
:returns: :class:`~stem.descriptor.TypeAnnotation` with our type information
"""
# TODO: populate this from the archive instead if available (so we have correct version numbers)
if self.TYPE_ANNOTATION_NAME is not None:
return TypeAnnotation(self.TYPE_ANNOTATION_NAME, 1, 0)
else:
raise NotImplementedError('%s does not have a @type annotation' % type(self).__name__)
def get_path(self):
"""
Provides the absolute path that we loaded this descriptor from.
:returns: **str** with the absolute path of the descriptor source
"""
return self._path
def get_archive_path(self):
"""
If this descriptor came from an archive then provides its path within the
archive. This is only set if the descriptor came from a
:class:`~stem.descriptor.reader.DescriptorReader`, and is **None** if this
descriptor didn't come from an archive.
:returns: **str** with the descriptor's path within the archive
"""
return self._archive_path
def get_bytes(self):
"""
Provides the ASCII **bytes** of the descriptor. This only differs from
**str()** if you're running python 3.x, in which case **str()** provides a
**unicode** string.
:returns: **bytes** for the descriptor's contents
"""
return stem.util.str_tools._to_bytes(self._raw_contents)
def get_unrecognized_lines(self):
"""
Provides a list of lines that were either ignored or had data that we did
not know how to process. This is most common due to new descriptor fields
that this library does not yet know how to process. Patches welcome!
:returns: **list** of lines of unrecognized content
"""
if self._lazy_loading:
# we need to go ahead and parse the whole document to figure this out
self._parse(self._entries, False)
self._lazy_loading = False
return list(self._unrecognized_lines)
def _parse(self, entries, validate, parser_for_line = None):
"""
Parses a series of 'keyword => (value, pgp block)' mappings and applies
them as attributes.
:param dict entries: descriptor contents to be applied
:param bool validate: checks the validity of descriptor content if True
:param dict parsers: mapping of lines to the function for parsing it
:raises: **ValueError** if an error occurs in validation
"""
if parser_for_line is None:
parser_for_line = self.PARSER_FOR_LINE
for keyword, values in list(entries.items()):
try:
if keyword in parser_for_line:
parser_for_line[keyword](self, entries)
else:
for value, block_type, block_contents in values:
line = '%s %s' % (keyword, value)
if block_contents:
line += '\n%s' % block_contents
self._unrecognized_lines.append(line)
except ValueError:
if validate:
raise
def _set_path(self, path):
self._path = path
def _set_archive_path(self, path):
self._archive_path = path
def _name(self, is_plural = False):
return str(type(self))
def _digest_for_signature(self, signing_key, signature):
"""
Provides the signed digest we should have given this key and signature.
:param str signing_key: key block used to make this signature
:param str signature: signed digest for this descriptor content
:returns: the digest string encoded in uppercase hex
:raises: ValueError if unable to provide a validly signed digest
"""
if not stem.prereq.is_crypto_available():
raise ValueError('Generating the signed digest requires the cryptography module')
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import load_der_public_key
from cryptography.utils import int_to_bytes, int_from_bytes
key = load_der_public_key(_bytes_for_block(signing_key), default_backend())
modulus = key.public_numbers().n
public_exponent = key.public_numbers().e
sig_as_bytes = _bytes_for_block(signature)
sig_as_long = int_from_bytes(sig_as_bytes, byteorder='big') # convert signature to an int
blocksize = len(sig_as_bytes) # 256B for NetworkStatusDocuments, 128B for others
# use the public exponent[e] & the modulus[n] to decrypt the int
decrypted_int = pow(sig_as_long, public_exponent, modulus)
# convert the int to a byte array
decrypted_bytes = int_to_bytes(decrypted_int, blocksize)
############################################################################
# The decrypted bytes should have a structure exactly along these lines.
# 1 byte - [null '\x00']
# 1 byte - [block type identifier '\x01'] - Should always be 1
# N bytes - [padding '\xFF' ]
# 1 byte - [separator '\x00' ]
# M bytes - [message]
# Total - 128 bytes
# More info here http://www.ietf.org/rfc/rfc2313.txt
# esp the Notes in section 8.1
############################################################################
try:
if decrypted_bytes.index(DIGEST_TYPE_INFO) != 0:
raise ValueError('Verification failed, identifier missing')
except ValueError:
raise ValueError('Verification failed, malformed data')
try:
identifier_offset = 2
# find the separator
seperator_index = decrypted_bytes.index(DIGEST_SEPARATOR, identifier_offset)
except ValueError:
raise ValueError('Verification failed, seperator not found')
digest_hex = codecs.encode(decrypted_bytes[seperator_index + 1:], 'hex_codec')
return stem.util.str_tools._to_unicode(digest_hex.upper())
def _content_range(self, start = None, end = None):
"""
Provides the descriptor content inclusively between two substrings.
:param bytes start: start of the content range to get
:param bytes end: end of the content range to get
:raises: ValueError if either the start or end substring are not within our content
"""
content = self.get_bytes()
start_index, end_index = None, None
if start is not None:
start_index = content.find(stem.util.str_tools._to_bytes(start))
if start_index == -1:
raise ValueError("'%s' is not present within our descriptor content" % start)
if end is not None:
end_index = content.find(stem.util.str_tools._to_bytes(end), start_index)
if end_index == -1:
raise ValueError("'%s' is not present within our descriptor content" % end)
end_index += len(end) # make the ending index inclusive
return content[start_index:end_index]
def __getattr__(self, name):
# We can't use standard hasattr() since it calls this function, recursing.
# Doing so works since it stops recursing after several dozen iterations
# (not sure why), but horrible in terms of performance.
def has_attr(attr):
try:
super(Descriptor, self).__getattribute__(attr)
return True
except:
return False
# If an attribute we should have isn't present it means either...
#
# a. we still need to lazy load this
# b. we read the whole descriptor but it wasn't present, so needs the default
if name in self.ATTRIBUTES and not has_attr(name):
default, parsing_function = self.ATTRIBUTES[name]
if self._lazy_loading:
try:
parsing_function(self, self._entries)
except (ValueError, KeyError):
# Set defaults for anything the parsing function should've covered.
# Despite having a validation failure some attributes might be set in
# which case we keep them.
for attr_name, (attr_default, attr_parser) in self.ATTRIBUTES.items():
if parsing_function == attr_parser and not has_attr(attr_name):
setattr(self, attr_name, _copy(attr_default))
else:
setattr(self, name, _copy(default))
return super(Descriptor, self).__getattribute__(name)
def __str__(self):
if stem.prereq.is_python_3():
return stem.util.str_tools._to_unicode(self._raw_contents)
else:
return self._raw_contents
def _compare(self, other, method):
if type(self) != type(other):
return False
return method(str(self).strip(), str(other).strip())
def __hash__(self):
if self._hash is None:
self._hash = hash(str(self).strip())
return self._hash
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
class NewlineNormalizer(object):
"""
File wrapper that normalizes CRLF line endings.
"""
def __init__(self, wrapped_file):
self._wrapped_file = wrapped_file
self.name = getattr(wrapped_file, 'name', None)
def read(self, *args):
return self._wrapped_file.read(*args).replace(b'\r\n', b'\n')
def readline(self, *args):
return self._wrapped_file.readline(*args).replace(b'\r\n', b'\n')
def readlines(self, *args):
return [line.rstrip(b'\r') for line in self._wrapped_file.readlines(*args)]
def seek(self, *args):
return self._wrapped_file.seek(*args)
def tell(self, *args):
return self._wrapped_file.tell(*args)
def _read_until_keywords(keywords, descriptor_file, inclusive = False, ignore_first = False, skip = False, end_position = None, include_ending_keyword = False):
"""
Reads from the descriptor file until we get to one of the given keywords or reach the
end of the file.
:param str,list keywords: keyword(s) we want to read until
:param file descriptor_file: file with the descriptor content
:param bool inclusive: includes the line with the keyword if True
:param bool ignore_first: doesn't check if the first line read has one of the
given keywords
:param bool skip: skips buffering content, returning None
:param int end_position: end if we reach this point in the file
:param bool include_ending_keyword: provides the keyword we broke on if **True**
:returns: **list** with the lines until we find one of the keywords, this is
a two value tuple with the ending keyword if include_ending_keyword is
**True**
"""
content = None if skip else []
ending_keyword = None
if stem.util._is_str(keywords):
keywords = (keywords,)
if ignore_first:
first_line = descriptor_file.readline()
if first_line and content is not None:
content.append(first_line)
keyword_match = re.compile(SPECIFIC_KEYWORD_LINE % '|'.join(keywords))
while True:
last_position = descriptor_file.tell()
if end_position and last_position >= end_position:
break
line = descriptor_file.readline()
if not line:
break # EOF
line_match = keyword_match.match(stem.util.str_tools._to_unicode(line))
if line_match:
ending_keyword = line_match.groups()[0]
if not inclusive:
descriptor_file.seek(last_position)
elif content is not None:
content.append(line)
break
elif content is not None:
content.append(line)
if include_ending_keyword:
return (content, ending_keyword)
else:
return content
def _bytes_for_block(content):
"""
Provides the base64 decoded content of a pgp-style block.
:param str content: block to be decoded
:returns: decoded block content
:raises: **TypeError** if this isn't base64 encoded content
"""
# strip the '-----BEGIN RSA PUBLIC KEY-----' header and footer
content = ''.join(content.split('\n')[1:-1])
return base64.b64decode(stem.util.str_tools._to_bytes(content))
def _get_pseudo_pgp_block(remaining_contents):
"""
Checks if given contents begins with a pseudo-Open-PGP-style block and, if
so, pops it off and provides it back to the caller.
:param list remaining_contents: lines to be checked for a public key block
:returns: **tuple** of the (block_type, content) or None if it doesn't exist
:raises: **ValueError** if the contents starts with a key block but it's
malformed (for instance, if it lacks an ending line)
"""
if not remaining_contents:
return None # nothing left
block_match = PGP_BLOCK_START.match(remaining_contents[0])
if block_match:
block_type = block_match.groups()[0]
block_lines = []
end_line = PGP_BLOCK_END % block_type
while True:
if not remaining_contents:
raise ValueError("Unterminated pgp style block (looking for '%s'):\n%s" % (end_line, '\n'.join(block_lines)))
line = remaining_contents.pop(0)
block_lines.append(line)
if line == end_line:
return (block_type, '\n'.join(block_lines))
else:
return None
def create_signing_key(private_key = None):
"""
Serializes a signing key if we have one. Otherwise this creates a new signing
key we can use to create descriptors.
.. versionadded:: 1.6.0
:param cryptography.hazmat.backends.openssl.rsa._RSAPrivateKey private_key: private key
:returns: :class:`~stem.descriptor.__init__.SigningKey` that can be used to
create descriptors
:raises: **ImportError** if the cryptography module is unavailable
"""
if not stem.prereq.is_crypto_available():
raise ImportError('Signing requires the cryptography module')
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
if private_key is None:
private_key = rsa.generate_private_key(
public_exponent = 65537,
key_size = 1024,
backend = default_backend(),
)
# When signing the cryptography module includes a constant indicating
# the hash algorithm used. Tor doesn't. This causes signature
# validation failures and unfortunately cryptography have no nice way
# of excluding these so we need to mock out part of their internals...
#
# https://github.com/pyca/cryptography/issues/3713
def no_op(*args, **kwargs):
return 1
private_key._backend._lib.EVP_PKEY_CTX_set_signature_md = no_op
private_key._backend.openssl_assert = no_op
public_key = private_key.public_key()
public_digest = b'\n' + public_key.public_bytes(
encoding = serialization.Encoding.PEM,
format = serialization.PublicFormat.PKCS1,
).strip()
return SigningKey(private_key, public_key, public_digest)
def _append_router_signature(content, private_key):
"""
Appends a router signature to a server or extrainfo descriptor.
:param bytes content: descriptor content up through 'router-signature\\n'
:param cryptography.hazmat.backends.openssl.rsa._RSAPrivateKey private_key:
private relay signing key
:returns: **bytes** with the signed descriptor content
"""
if not stem.prereq.is_crypto_available():
raise ImportError('Signing requires the cryptography module')
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
signature = base64.b64encode(private_key.sign(content, padding.PKCS1v15(), hashes.SHA1()))
return content + b'\n'.join([b'-----BEGIN SIGNATURE-----'] + stem.util.str_tools._split_by_length(signature, 64) + [b'-----END SIGNATURE-----\n'])
def _random_nickname():
return ('Unnamed%i' % random.randint(0, 100000000000000))[:19]
def _random_fingerprint():
return ('%040x' % random.randrange(16 ** 40)).upper()
def _random_ipv4_address():
return '%i.%i.%i.%i' % (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
def _random_date():
return '%i-%02i-%02i %02i:%02i:%02i' % (random.randint(2000, 2015), random.randint(1, 12), random.randint(1, 20), random.randint(0, 23), random.randint(0, 59), random.randint(0, 59))
def _random_crypto_blob(block_type = None):
"""
Provides a random string that can be used for crypto blocks.
"""
random_base64 = stem.util.str_tools._to_unicode(base64.b64encode(os.urandom(140)))
crypto_blob = '\n'.join(stem.util.str_tools._split_by_length(random_base64, 64))
if block_type:
return '\n-----BEGIN %s-----\n%s\n-----END %s-----' % (block_type, crypto_blob, block_type)
else:
return crypto_blob
def _descriptor_components(raw_contents, validate, extra_keywords = (), non_ascii_fields = ()):
"""
Initial breakup of the server descriptor contents to make parsing easier.
A descriptor contains a series of 'keyword lines' which are simply a keyword
followed by an optional value. Lines can also be followed by a signature
block.
To get a sub-listing with just certain keywords use extra_keywords. This can
be useful if we care about their relative ordering with respect to each
other. For instance, we care about the ordering of 'accept' and 'reject'
entries because this influences the resulting exit policy, but for everything
else in server descriptors the order does not matter.
:param str raw_contents: descriptor content provided by the relay
:param bool validate: checks the validity of the descriptor's content if
True, skips these checks otherwise
:param list extra_keywords: entity keywords to put into a separate listing
with ordering intact
:param list non_ascii_fields: fields containing non-ascii content
:returns:
**collections.OrderedDict** with the 'keyword => (value, pgp key) entries'
mappings. If a extra_keywords was provided then this instead provides a two
value tuple, the second being a list of those entries.
"""
if isinstance(raw_contents, bytes):
raw_contents = stem.util.str_tools._to_unicode(raw_contents)
entries = OrderedDict()
extra_entries = [] # entries with a keyword in extra_keywords
remaining_lines = raw_contents.split('\n')
while remaining_lines:
line = remaining_lines.pop(0)
# V2 network status documents explicitly can contain blank lines...
#
# "Implementations MAY insert blank lines for clarity between sections;
# these blank lines are ignored."
#
# ... and server descriptors end with an extra newline. But other documents
# don't say how blank lines should be handled so globally ignoring them.
if not line:
continue
# Some lines have an 'opt ' for backward compatibility. They should be
# ignored. This prefix is being removed in...
# https://trac.torproject.org/projects/tor/ticket/5124
if line.startswith('opt '):
line = line[4:]
line_match = KEYWORD_LINE.match(line)
if not line_match:
if not validate:
continue
raise ValueError('Line contains invalid characters: %s' % line)
keyword, value = line_match.groups()
if value is None:
value = ''
try:
block_attr = _get_pseudo_pgp_block(remaining_lines)
if block_attr:
block_type, block_contents = block_attr
else:
block_type, block_contents = None, None
except ValueError:
if not validate:
continue
raise
if validate and keyword not in non_ascii_fields:
try:
value.encode('ascii')
except UnicodeError:
replaced = ''.join([(char if char in string.printable else '?') for char in value])
raise ValueError("'%s' line had non-ascii content: %s" % (keyword, replaced))
if keyword in extra_keywords:
extra_entries.append('%s %s' % (keyword, value))
else:
entries.setdefault(keyword, []).append((value, block_type, block_contents))
if extra_keywords:
return entries, extra_entries
else:
return entries
# importing at the end to avoid circular dependencies on our Descriptor class
import stem.descriptor.bandwidth_file
import stem.descriptor.extrainfo_descriptor
import stem.descriptor.hidden_service
import stem.descriptor.microdescriptor
import stem.descriptor.networkstatus
import stem.descriptor.server_descriptor
import stem.descriptor.tordnsel
stem-1.8.0/stem/descriptor/server_descriptor.py 0000664 0001750 0001750 00000126641 13564354230 022475 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for Tor server descriptors, which contains the infrequently changing
information about a Tor relay (contact information, exit policy, public keys,
etc). This information is provided from a few sources...
* The control port via 'GETINFO desc/\\*' queries.
* The 'cached-descriptors' file in Tor's data directory.
* Archived descriptors provided by `CollecTor `_.
* Directory authorities and mirrors via their DirPort.
**Module Overview:**
::
ServerDescriptor - Tor server descriptor.
|- RelayDescriptor - Server descriptor for a relay.
| +- make_router_status_entry - Creates a router status entry for this descriptor.
|
|- BridgeDescriptor - Scrubbed server descriptor for a bridge.
| |- is_scrubbed - checks if our content has been properly scrubbed
| +- get_scrubbing_issues - description of issues with our scrubbing
|
|- digest - calculates the upper-case hex digest value for our content
|- get_annotations - dictionary of content prior to the descriptor entry
+- get_annotation_lines - lines that provided the annotations
.. data:: BridgeDistribution (enum)
Preferred method of distributing this relay if a bridge.
.. versionadded:: 1.6.0
===================== ===========
BridgeDistribution Description
===================== ===========
**ANY** No proference, BridgeDB will pick how the bridge is distributed.
**HTTPS** Provided via the `web interface `_.
**EMAIL** Provided in response to emails to bridges@torproject.org.
**MOAT** Provided in interactive menus within Tor Browser.
**HYPHAE** Provided via a cryptographic invitation-based system.
===================== ===========
"""
import base64
import binascii
import functools
import hashlib
import re
import stem.descriptor.certificate
import stem.descriptor.extrainfo_descriptor
import stem.exit_policy
import stem.prereq
import stem.util.connection
import stem.util.enum
import stem.util.str_tools
import stem.util.tor_tools
import stem.version
from stem.descriptor.router_status_entry import RouterStatusEntryV3
from stem.descriptor import (
PGP_BLOCK_END,
Descriptor,
DigestHash,
DigestEncoding,
create_signing_key,
_descriptor_content,
_descriptor_components,
_read_until_keywords,
_bytes_for_block,
_value,
_values,
_parse_simple_line,
_parse_int_line,
_parse_if_present,
_parse_bytes_line,
_parse_timestamp_line,
_parse_forty_character_hex,
_parse_protocol_line,
_parse_key_block,
_append_router_signature,
_random_nickname,
_random_ipv4_address,
_random_date,
_random_crypto_blob,
)
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
# relay descriptors must have exactly one of the following
REQUIRED_FIELDS = (
'router',
'bandwidth',
'published',
'onion-key',
'signing-key',
'router-signature',
)
# optional entries that can appear at most once
SINGLE_FIELDS = (
'identity-ed25519',
'master-key-ed25519',
'platform',
'fingerprint',
'hibernating',
'uptime',
'contact',
'read-history',
'write-history',
'eventdns',
'bridge-distribution-request',
'family',
'caches-extra-info',
'extra-info-digest',
'hidden-service-dir',
'protocols',
'allow-single-hop-exits',
'tunnelled-dir-server',
'proto',
'onion-key-crosscert',
'ntor-onion-key',
'ntor-onion-key-crosscert',
'router-sig-ed25519',
)
BridgeDistribution = stem.util.enum.Enum(
('ANY', 'any'),
('HTTPS', 'https'),
('EMAIL', 'email'),
('MOAT', 'moat'),
('HYPHAE', 'hyphae'),
)
DEFAULT_IPV6_EXIT_POLICY = stem.exit_policy.MicroExitPolicy('reject 1-65535')
REJECT_ALL_POLICY = stem.exit_policy.ExitPolicy('reject *:*')
DEFAULT_BRIDGE_DISTRIBUTION = 'any'
def _truncated_b64encode(content):
return stem.util.str_tools._to_unicode(base64.b64encode(content).rstrip(b'='))
def _parse_file(descriptor_file, is_bridge = False, validate = False, **kwargs):
"""
Iterates over the server descriptors in a file.
:param file descriptor_file: file with descriptor content
:param bool is_bridge: parses the file as being a bridge descriptor
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param dict kwargs: additional arguments for the descriptor constructor
:returns: iterator for ServerDescriptor instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is True
* **IOError** if the file can't be read
"""
# Handler for relay descriptors
#
# Cached descriptors consist of annotations followed by the descriptor
# itself. For instance...
#
# @downloaded-at 2012-03-14 16:31:05
# @source "145.53.65.130"
# router caerSidi 71.35.143.157 9001 0 0
# platform Tor 0.2.1.30 on Linux x86_64
#
# router-signature
# -----BEGIN SIGNATURE-----
#
# -----END SIGNATURE-----
#
# Metrics descriptor files are the same, but lack any annotations. The
# following simply does the following...
#
# - parse as annotations until we get to 'router'
# - parse as descriptor content until we get to 'router-signature' followed
# by the end of the signature block
# - construct a descriptor and provide it back to the caller
#
# Any annotations after the last server descriptor is ignored (never provided
# to the caller).
while True:
annotations = _read_until_keywords('router', descriptor_file)
annotations = map(bytes.strip, annotations) # strip newlines
annotations = map(stem.util.str_tools._to_unicode, annotations) # convert to unicode
annotations = list(filter(lambda x: x != '', annotations)) # drop any blanks
if not is_bridge:
descriptor_content = _read_until_keywords('router-signature', descriptor_file)
# we've reached the 'router-signature', now include the pgp style block
block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0]
descriptor_content += _read_until_keywords(block_end_prefix, descriptor_file, True)
else:
descriptor_content = _read_until_keywords('router-digest', descriptor_file, True)
if descriptor_content:
if descriptor_content[0].startswith(b'@type'):
descriptor_content = descriptor_content[1:]
descriptor_text = bytes.join(b'', descriptor_content)
if is_bridge:
yield BridgeDescriptor(descriptor_text, validate, annotations, **kwargs)
else:
yield RelayDescriptor(descriptor_text, validate, annotations, **kwargs)
else:
if validate and annotations:
raise ValueError('Content conform to being a server descriptor:\n%s' % '\n'.join(annotations))
break # done parsing descriptors
def _parse_router_line(descriptor, entries):
# "router" nickname address ORPort SocksPort DirPort
value = _value('router', entries)
router_comp = value.split()
if len(router_comp) < 5:
raise ValueError('Router line must have five values: router %s' % value)
elif not stem.util.tor_tools.is_valid_nickname(router_comp[0]):
raise ValueError("Router line entry isn't a valid nickname: %s" % router_comp[0])
elif not stem.util.connection.is_valid_ipv4_address(router_comp[1]):
raise ValueError("Router line entry isn't a valid IPv4 address: %s" % router_comp[1])
elif not stem.util.connection.is_valid_port(router_comp[2], allow_zero = True):
raise ValueError("Router line's ORPort is invalid: %s" % router_comp[2])
elif not stem.util.connection.is_valid_port(router_comp[3], allow_zero = True):
raise ValueError("Router line's SocksPort is invalid: %s" % router_comp[3])
elif not stem.util.connection.is_valid_port(router_comp[4], allow_zero = True):
raise ValueError("Router line's DirPort is invalid: %s" % router_comp[4])
descriptor.nickname = router_comp[0]
descriptor.address = router_comp[1]
descriptor.or_port = int(router_comp[2])
descriptor.socks_port = None if router_comp[3] == '0' else int(router_comp[3])
descriptor.dir_port = None if router_comp[4] == '0' else int(router_comp[4])
def _parse_bandwidth_line(descriptor, entries):
# "bandwidth" bandwidth-avg bandwidth-burst bandwidth-observed
value = _value('bandwidth', entries)
bandwidth_comp = value.split()
if len(bandwidth_comp) < 3:
raise ValueError('Bandwidth line must have three values: bandwidth %s' % value)
elif not bandwidth_comp[0].isdigit():
raise ValueError("Bandwidth line's average rate isn't numeric: %s" % bandwidth_comp[0])
elif not bandwidth_comp[1].isdigit():
raise ValueError("Bandwidth line's burst rate isn't numeric: %s" % bandwidth_comp[1])
elif not bandwidth_comp[2].isdigit():
raise ValueError("Bandwidth line's observed rate isn't numeric: %s" % bandwidth_comp[2])
descriptor.average_bandwidth = int(bandwidth_comp[0])
descriptor.burst_bandwidth = int(bandwidth_comp[1])
descriptor.observed_bandwidth = int(bandwidth_comp[2])
def _parse_platform_line(descriptor, entries):
# "platform" string
_parse_bytes_line('platform', 'platform')(descriptor, entries)
# The platform attribute was set earlier. This line can contain any
# arbitrary data, but tor seems to report its version followed by the
# os like the following...
#
# platform Tor 0.2.2.35 (git-73ff13ab3cc9570d) on Linux x86_64
#
# There's no guarantee that we'll be able to pick these out the
# version, but might as well try to save our caller the effort.
value = _value('platform', entries)
platform_match = re.match('^(?:node-)?Tor (\\S*).* on (.*)$', value)
if platform_match:
version_str, descriptor.operating_system = platform_match.groups()
try:
descriptor.tor_version = stem.version._get_version(version_str)
except ValueError:
pass
def _parse_fingerprint_line(descriptor, entries):
# This is forty hex digits split into space separated groups of four.
# Checking that we match this pattern.
value = _value('fingerprint', entries)
fingerprint = value.replace(' ', '')
for grouping in value.split(' '):
if len(grouping) != 4:
raise ValueError('Fingerprint line should have groupings of four hex digits: %s' % value)
if not stem.util.tor_tools.is_valid_fingerprint(fingerprint):
raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % value)
descriptor.fingerprint = fingerprint
def _parse_extrainfo_digest_line(descriptor, entries):
value = _value('extra-info-digest', entries)
digest_comp = value.split(' ')
if not stem.util.tor_tools.is_hex_digits(digest_comp[0], 40):
raise ValueError('extra-info-digest should be 40 hex characters: %s' % digest_comp[0])
descriptor.extra_info_digest = digest_comp[0]
descriptor.extra_info_sha256_digest = digest_comp[1] if len(digest_comp) >= 2 else None
def _parse_hibernating_line(descriptor, entries):
# "hibernating" 0|1 (in practice only set if one)
value = _value('hibernating', entries)
if value not in ('0', '1'):
raise ValueError('Hibernating line had an invalid value, must be zero or one: %s' % value)
descriptor.hibernating = value == '1'
def _parse_protocols_line(descriptor, entries):
value = _value('protocols', entries)
protocols_match = re.match('^Link (.*) Circuit (.*)$', value)
if not protocols_match:
raise ValueError('Protocols line did not match the expected pattern: protocols %s' % value)
link_versions, circuit_versions = protocols_match.groups()
descriptor.link_protocols = link_versions.split(' ')
descriptor.circuit_protocols = circuit_versions.split(' ')
def _parse_or_address_line(descriptor, entries):
all_values = _values('or-address', entries)
or_addresses = []
for entry in all_values:
line = 'or-address %s' % entry
if ':' not in entry:
raise ValueError('or-address line missing a colon: %s' % line)
address, port = entry.rsplit(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address) and not stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True):
raise ValueError('or-address line has a malformed address: %s' % line)
if not stem.util.connection.is_valid_port(port):
raise ValueError('or-address line has a malformed port: %s' % line)
or_addresses.append((address.lstrip('[').rstrip(']'), int(port), stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True)))
descriptor.or_addresses = or_addresses
def _parse_history_line(keyword, history_end_attribute, history_interval_attribute, history_values_attribute, descriptor, entries):
value = _value(keyword, entries)
timestamp, interval, remainder = stem.descriptor.extrainfo_descriptor._parse_timestamp_and_interval(keyword, value)
try:
if remainder:
history_values = [int(entry) for entry in remainder.split(',')]
else:
history_values = []
except ValueError:
raise ValueError('%s line has non-numeric values: %s %s' % (keyword, keyword, value))
setattr(descriptor, history_end_attribute, timestamp)
setattr(descriptor, history_interval_attribute, interval)
setattr(descriptor, history_values_attribute, history_values)
def _parse_exit_policy(descriptor, entries):
if hasattr(descriptor, '_unparsed_exit_policy'):
if descriptor._unparsed_exit_policy and stem.util.str_tools._to_unicode(descriptor._unparsed_exit_policy[0]) == 'reject *:*':
descriptor.exit_policy = REJECT_ALL_POLICY
else:
descriptor.exit_policy = stem.exit_policy.ExitPolicy(*descriptor._unparsed_exit_policy)
del descriptor._unparsed_exit_policy
def _parse_identity_ed25519_line(descriptor, entries):
# TODO: replace this with Ed25519Certificate._from_descriptor() in stem 2.x
_parse_key_block('identity-ed25519', 'ed25519_certificate', 'ED25519 CERT')(descriptor, entries)
if descriptor.ed25519_certificate:
descriptor.certificate = stem.descriptor.certificate.Ed25519Certificate.from_base64(descriptor.ed25519_certificate)
_parse_master_key_ed25519_line = _parse_simple_line('master-key-ed25519', 'ed25519_master_key')
_parse_master_key_ed25519_for_hash_line = _parse_simple_line('master-key-ed25519', 'ed25519_certificate_hash')
_parse_contact_line = _parse_bytes_line('contact', 'contact')
_parse_published_line = _parse_timestamp_line('published', 'published')
_parse_read_history_line = functools.partial(_parse_history_line, 'read-history', 'read_history_end', 'read_history_interval', 'read_history_values')
_parse_write_history_line = functools.partial(_parse_history_line, 'write-history', 'write_history_end', 'write_history_interval', 'write_history_values')
_parse_ipv6_policy_line = _parse_simple_line('ipv6-policy', 'exit_policy_v6', func = lambda v: stem.exit_policy.MicroExitPolicy(v))
_parse_allow_single_hop_exits_line = _parse_if_present('allow-single-hop-exits', 'allow_single_hop_exits')
_parse_tunneled_dir_server_line = _parse_if_present('tunnelled-dir-server', 'allow_tunneled_dir_requests')
_parse_proto_line = _parse_protocol_line('proto', 'protocols')
_parse_hidden_service_dir_line = _parse_if_present('hidden-service-dir', 'is_hidden_service_dir')
_parse_caches_extra_info_line = _parse_if_present('caches-extra-info', 'extra_info_cache')
_parse_bridge_distribution_request_line = _parse_simple_line('bridge-distribution-request', 'bridge_distribution')
_parse_family_line = _parse_simple_line('family', 'family', func = lambda v: set(v.split(' ')))
_parse_eventdns_line = _parse_simple_line('eventdns', 'eventdns', func = lambda v: v == '1')
_parse_onion_key_line = _parse_key_block('onion-key', 'onion_key', 'RSA PUBLIC KEY')
_parse_onion_key_crosscert_line = _parse_key_block('onion-key-crosscert', 'onion_key_crosscert', 'CROSSCERT')
_parse_signing_key_line = _parse_key_block('signing-key', 'signing_key', 'RSA PUBLIC KEY')
_parse_router_signature_line = _parse_key_block('router-signature', 'signature', 'SIGNATURE')
_parse_ntor_onion_key_line = _parse_simple_line('ntor-onion-key', 'ntor_onion_key')
_parse_ntor_onion_key_crosscert_line = _parse_key_block('ntor-onion-key-crosscert', 'ntor_onion_key_crosscert', 'ED25519 CERT', 'ntor_onion_key_crosscert_sign')
_parse_router_sig_ed25519_line = _parse_simple_line('router-sig-ed25519', 'ed25519_signature')
_parse_router_digest_sha256_line = _parse_simple_line('router-digest-sha256', 'router_digest_sha256')
_parse_router_digest_line = _parse_forty_character_hex('router-digest', '_digest')
# TODO: We need to be tolerant of negative uptimes to accommodate a past tor
# bug...
#
# Changes in version 0.1.2.7-alpha - 2007-02-06
# - If our system clock jumps back in time, don't publish a negative
# uptime in the descriptor. Also, don't let the global rate limiting
# buckets go absurdly negative.
#
# After parsing all of the attributes we'll double check that negative
# uptimes only occurred prior to this fix.
_parse_uptime_line = _parse_int_line('uptime', 'uptime', allow_negative = True)
class ServerDescriptor(Descriptor):
"""
Common parent for server descriptors.
:var str nickname: **\\*** relay's nickname
:var str fingerprint: identity key fingerprint
:var datetime published: **\\*** time in UTC when this descriptor was made
:var str address: **\\*** IPv4 address of the relay
:var int or_port: **\\*** port used for relaying
:var int socks_port: **\\*** port used as client (**deprecated**, always **None**)
:var int dir_port: **\\*** port used for descriptor mirroring
:var bytes platform: line with operating system and tor version
:var stem.version.Version tor_version: version of tor
:var str operating_system: operating system
:var int uptime: uptime when published in seconds
:var bytes contact: contact information
:var stem.exit_policy.ExitPolicy exit_policy: **\\*** stated exit policy
:var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\\*** exit policy for IPv6
:var BridgeDistribution bridge_distribution: **\\*** preferred method of providing this relay's
address if a bridge
:var set family: **\\*** nicknames or fingerprints of declared family
:var int average_bandwidth: **\\*** average rate it's willing to relay in bytes/s
:var int burst_bandwidth: **\\*** burst rate it's willing to relay in bytes/s
:var int observed_bandwidth: **\\*** estimated capacity based on usage in bytes/s
:var list link_protocols: link protocols supported by the relay
:var list circuit_protocols: circuit protocols supported by the relay
:var bool is_hidden_service_dir: **\\*** indicates if the relay serves hidden
service descriptors
:var bool hibernating: **\\*** hibernating when published
:var bool allow_single_hop_exits: **\\*** flag if single hop exiting is allowed
:var bool allow_tunneled_dir_requests: **\\*** flag if tunneled directory
requests are accepted
:var bool extra_info_cache: **\\*** flag if a mirror for extra-info documents
:var str extra_info_digest: upper-case hex encoded digest of our extra-info document
:var str extra_info_sha256_digest: base64 encoded sha256 digest of our extra-info document
:var bool eventdns: flag for evdns backend (**deprecated**, always unset)
:var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol
:var list or_addresses: **\\*** alternative for our address/or_port
attributes, each entry is a tuple of the form (address (**str**), port
(**int**), is_ipv6 (**bool**))
:var dict protocols: mapping of protocols to their supported versions
**Deprecated**, moved to extra-info descriptor...
:var datetime read_history_end: end of the sampling interval
:var int read_history_interval: seconds per interval
:var list read_history_values: bytes read during each interval
:var datetime write_history_end: end of the sampling interval
:var int write_history_interval: seconds per interval
:var list write_history_values: bytes written during each interval
**\\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
.. versionchanged:: 1.5.0
Added the allow_tunneled_dir_requests attribute.
.. versionchanged:: 1.6.0
Added the extra_info_sha256_digest, protocols, and bridge_distribution
attributes.
.. versionchanged:: 1.7.0
Added the is_hidden_service_dir attribute.
.. versionchanged:: 1.7.0
Deprecated the hidden_service_dir field, it's never been populated
(:spec:`43c2f78`). This field will be removed in Stem 2.0.
"""
ATTRIBUTES = {
'nickname': (None, _parse_router_line),
'fingerprint': (None, _parse_fingerprint_line),
'contact': (None, _parse_contact_line),
'published': (None, _parse_published_line),
'exit_policy': (None, _parse_exit_policy),
'address': (None, _parse_router_line),
'or_port': (None, _parse_router_line),
'socks_port': (None, _parse_router_line),
'dir_port': (None, _parse_router_line),
'platform': (None, _parse_platform_line),
'tor_version': (None, _parse_platform_line),
'operating_system': (None, _parse_platform_line),
'uptime': (None, _parse_uptime_line),
'exit_policy_v6': (DEFAULT_IPV6_EXIT_POLICY, _parse_ipv6_policy_line),
'bridge_distribution': (DEFAULT_BRIDGE_DISTRIBUTION, _parse_bridge_distribution_request_line),
'family': (set(), _parse_family_line),
'average_bandwidth': (None, _parse_bandwidth_line),
'burst_bandwidth': (None, _parse_bandwidth_line),
'observed_bandwidth': (None, _parse_bandwidth_line),
'link_protocols': (None, _parse_protocols_line),
'circuit_protocols': (None, _parse_protocols_line),
'is_hidden_service_dir': (False, _parse_hidden_service_dir_line),
'hibernating': (False, _parse_hibernating_line),
'allow_single_hop_exits': (False, _parse_allow_single_hop_exits_line),
'allow_tunneled_dir_requests': (False, _parse_tunneled_dir_server_line),
'protocols': ({}, _parse_proto_line),
'extra_info_cache': (False, _parse_caches_extra_info_line),
'extra_info_digest': (None, _parse_extrainfo_digest_line),
'extra_info_sha256_digest': (None, _parse_extrainfo_digest_line),
'eventdns': (None, _parse_eventdns_line),
'ntor_onion_key': (None, _parse_ntor_onion_key_line),
'or_addresses': ([], _parse_or_address_line),
'read_history_end': (None, _parse_read_history_line),
'read_history_interval': (None, _parse_read_history_line),
'read_history_values': (None, _parse_read_history_line),
'write_history_end': (None, _parse_write_history_line),
'write_history_interval': (None, _parse_write_history_line),
'write_history_values': (None, _parse_write_history_line),
}
PARSER_FOR_LINE = {
'router': _parse_router_line,
'bandwidth': _parse_bandwidth_line,
'platform': _parse_platform_line,
'published': _parse_published_line,
'fingerprint': _parse_fingerprint_line,
'contact': _parse_contact_line,
'hibernating': _parse_hibernating_line,
'extra-info-digest': _parse_extrainfo_digest_line,
'hidden-service-dir': _parse_hidden_service_dir_line,
'uptime': _parse_uptime_line,
'protocols': _parse_protocols_line,
'ntor-onion-key': _parse_ntor_onion_key_line,
'or-address': _parse_or_address_line,
'read-history': _parse_read_history_line,
'write-history': _parse_write_history_line,
'ipv6-policy': _parse_ipv6_policy_line,
'allow-single-hop-exits': _parse_allow_single_hop_exits_line,
'tunnelled-dir-server': _parse_tunneled_dir_server_line,
'proto': _parse_proto_line,
'caches-extra-info': _parse_caches_extra_info_line,
'bridge-distribution-request': _parse_bridge_distribution_request_line,
'family': _parse_family_line,
'eventdns': _parse_eventdns_line,
}
def __init__(self, raw_contents, validate = False, annotations = None):
"""
Server descriptor constructor, created from an individual relay's
descriptor content (as provided by 'GETINFO desc/*', cached descriptors,
and metrics).
By default this validates the descriptor's content as it's parsed. This
validation can be disables to either improve performance or be accepting of
malformed data.
:param str raw_contents: descriptor content provided by the relay
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param list annotations: lines that appeared prior to the descriptor
:raises: **ValueError** if the contents is malformed and validate is True
"""
super(ServerDescriptor, self).__init__(raw_contents, lazy_load = not validate)
self._annotation_lines = annotations if annotations else []
# A descriptor contains a series of 'keyword lines' which are simply a
# keyword followed by an optional value. Lines can also be followed by a
# signature block.
#
# We care about the ordering of 'accept' and 'reject' entries because this
# influences the resulting exit policy, but for everything else the order
# does not matter so breaking it into key / value pairs.
entries, self._unparsed_exit_policy = _descriptor_components(stem.util.str_tools._to_unicode(raw_contents), validate, extra_keywords = ('accept', 'reject'), non_ascii_fields = ('contact', 'platform'))
# TODO: Remove the following field in Stem 2.0. It has never been populated...
#
# https://gitweb.torproject.org/torspec.git/commit/?id=43c2f78
self.hidden_service_dir = ['2']
if validate:
self._parse(entries, validate)
_parse_exit_policy(self, entries)
# if we have a negative uptime and a tor version that shouldn't exhibit
# this bug then fail validation
if validate and self.uptime and self.tor_version:
if self.uptime < 0 and self.tor_version >= stem.version.Version('0.1.2.7'):
raise ValueError("Descriptor for version '%s' had a negative uptime value: %i" % (self.tor_version, self.uptime))
self._check_constraints(entries)
else:
self._entries = entries
def digest(self, hash_type = DigestHash.SHA1, encoding = DigestEncoding.HEX):
"""
Digest of this descriptor's content. These are referenced by...
* **Consensus**
* Referer: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` **digest** attribute
* Format: **SHA1/BASE64**
.. versionchanged:: 1.8.0
Added the hash_type and encoding arguments.
:param stem.descriptor.DigestHash hash_type: digest hashing algorithm
:param stem.descriptor.DigestEncoding encoding: digest encoding
:returns: **hashlib.HASH** or **str** based on our encoding argument
"""
raise NotImplementedError('Unsupported Operation: this should be implemented by the ServerDescriptor subclass')
@lru_cache()
def get_annotations(self):
"""
Provides content that appeared prior to the descriptor. If this comes from
the cached-descriptors file then this commonly contains content like...
::
@downloaded-at 2012-03-18 21:18:29
@source "173.254.216.66"
.. deprecated:: 1.8.0
Users very rarely read from cached descriptor files any longer. This
method will be removed in Stem 2.x. If you have some need for us to keep
this please `let me know
`_.
:returns: **dict** with the key/value pairs in our annotations
"""
annotation_dict = {}
for line in self._annotation_lines:
if ' ' in line:
key, value = line.split(' ', 1)
annotation_dict[key] = value
else:
annotation_dict[line] = None
return annotation_dict
def get_annotation_lines(self):
"""
Provides the lines of content that appeared prior to the descriptor. This
is the same as the
:func:`~stem.descriptor.server_descriptor.ServerDescriptor.get_annotations`
results, but with the unparsed lines and ordering retained.
.. deprecated:: 1.8.0
Users very rarely read from cached descriptor files any longer. This
method will be removed in Stem 2.x. If you have some need for us to keep
this please `let me know
`_.
:returns: **list** with the lines of annotation that came before this descriptor
"""
return self._annotation_lines
def _check_constraints(self, entries):
"""
Does a basic check that the entries conform to this descriptor type's
constraints.
:param dict entries: keyword => (value, pgp key) entries
:raises: **ValueError** if an issue arises in validation
"""
for keyword in self._required_fields():
if keyword not in entries:
raise ValueError("Descriptor must have a '%s' entry" % keyword)
for keyword in self._single_fields():
if keyword in entries and len(entries[keyword]) > 1:
raise ValueError("The '%s' entry can only appear once in a descriptor" % keyword)
expected_first_keyword = self._first_keyword()
if expected_first_keyword and expected_first_keyword != list(entries.keys())[0]:
raise ValueError("Descriptor must start with a '%s' entry" % expected_first_keyword)
expected_last_keyword = self._last_keyword()
if expected_last_keyword and expected_last_keyword != list(entries.keys())[-1]:
raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword)
if 'identity-ed25519' in entries.keys():
if 'router-sig-ed25519' not in entries.keys():
raise ValueError('Descriptor must have router-sig-ed25519 entry to accompany identity-ed25519')
elif 'router-sig-ed25519' not in list(entries.keys())[-2:]:
raise ValueError("Descriptor must have 'router-sig-ed25519' as the next-to-last entry")
if not self.exit_policy:
raise ValueError("Descriptor must have at least one 'accept' or 'reject' entry")
# Constraints that the descriptor must meet to be valid. These can be None if
# not applicable.
def _required_fields(self):
return REQUIRED_FIELDS
def _single_fields(self):
return REQUIRED_FIELDS + SINGLE_FIELDS
def _first_keyword(self):
return 'router'
def _last_keyword(self):
return 'router-signature'
class RelayDescriptor(ServerDescriptor):
"""
Server descriptor (`descriptor specification
`_)
:var stem.certificate.Ed25519Certificate certificate: ed25519 certificate
:var str ed25519_certificate: base64 encoded ed25519 certificate
:var str ed25519_master_key: base64 encoded master key for our ed25519 certificate
:var str ed25519_signature: signature of this document using ed25519
:var str onion_key: **\\*** key used to encrypt EXTEND cells
:var str onion_key_crosscert: signature generated using the onion_key
:var str ntor_onion_key_crosscert: signature generated using the ntor-onion-key
:var str ntor_onion_key_crosscert_sign: sign of the corresponding ed25519 public key
:var str signing_key: **\\*** relay's long-term identity key
:var str signature: **\\*** signature for this descriptor
**\\*** attribute is required when we're parsed with validation
.. versionchanged:: 1.5.0
Added the ed25519_certificate, ed25519_master_key, ed25519_signature,
onion_key_crosscert, ntor_onion_key_crosscert, and
ntor_onion_key_crosscert_sign attributes.
.. versionchanged:: 1.6.0
Moved from the deprecated `pycrypto
`_ module to `cryptography
`_ for validating signatures.
.. versionchanged:: 1.6.0
Added the certificate attribute.
.. deprecated:: 1.6.0
Our **ed25519_certificate** is deprecated in favor of our new
**certificate** attribute. The base64 encoded certificate is available via
the certificate's **encoded** attribute.
.. versionchanged:: 1.6.0
Added the **skip_crypto_validation** constructor argument.
"""
TYPE_ANNOTATION_NAME = 'server-descriptor'
ATTRIBUTES = dict(ServerDescriptor.ATTRIBUTES, **{
'certificate': (None, _parse_identity_ed25519_line),
'ed25519_certificate': (None, _parse_identity_ed25519_line),
'ed25519_master_key': (None, _parse_master_key_ed25519_line),
'ed25519_signature': (None, _parse_router_sig_ed25519_line),
'onion_key': (None, _parse_onion_key_line),
'onion_key_crosscert': (None, _parse_onion_key_crosscert_line),
'ntor_onion_key_crosscert': (None, _parse_ntor_onion_key_crosscert_line),
'ntor_onion_key_crosscert_sign': (None, _parse_ntor_onion_key_crosscert_line),
'signing_key': (None, _parse_signing_key_line),
'signature': (None, _parse_router_signature_line),
})
PARSER_FOR_LINE = dict(ServerDescriptor.PARSER_FOR_LINE, **{
'identity-ed25519': _parse_identity_ed25519_line,
'master-key-ed25519': _parse_master_key_ed25519_line,
'router-sig-ed25519': _parse_router_sig_ed25519_line,
'onion-key': _parse_onion_key_line,
'onion-key-crosscert': _parse_onion_key_crosscert_line,
'ntor-onion-key-crosscert': _parse_ntor_onion_key_crosscert_line,
'signing-key': _parse_signing_key_line,
'router-signature': _parse_router_signature_line,
})
def __init__(self, raw_contents, validate = False, annotations = None, skip_crypto_validation = False):
super(RelayDescriptor, self).__init__(raw_contents, validate, annotations)
if validate:
if self.fingerprint:
key_hash = hashlib.sha1(_bytes_for_block(self.signing_key)).hexdigest()
if key_hash != self.fingerprint.lower():
raise ValueError('Fingerprint does not match the hash of our signing key (fingerprint: %s, signing key hash: %s)' % (self.fingerprint.lower(), key_hash))
if not skip_crypto_validation and stem.prereq.is_crypto_available():
signed_digest = self._digest_for_signature(self.signing_key, self.signature)
if signed_digest != self.digest():
raise ValueError('Decrypted digest does not match local digest (calculated: %s, local: %s)' % (signed_digest, self.digest()))
if self.onion_key_crosscert and stem.prereq.is_crypto_available():
onion_key_crosscert_digest = self._digest_for_signature(self.onion_key, self.onion_key_crosscert)
if onion_key_crosscert_digest != self._onion_key_crosscert_digest():
raise ValueError('Decrypted onion-key-crosscert digest does not match local digest (calculated: %s, local: %s)' % (onion_key_crosscert_digest, self._onion_key_crosscert_digest()))
if stem.prereq.is_crypto_available(ed25519 = True) and self.certificate:
self.certificate.validate(self)
@classmethod
def content(cls, attr = None, exclude = (), sign = False, signing_key = None, exit_policy = None):
if signing_key:
sign = True
if attr is None:
attr = {}
if exit_policy is None:
exit_policy = REJECT_ALL_POLICY
base_header = [
('router', '%s %s 9001 0 0' % (_random_nickname(), _random_ipv4_address())),
('published', _random_date()),
('bandwidth', '153600 256000 104590'),
] + [
tuple(line.split(' ', 1)) for line in str(exit_policy).splitlines()
] + [
('onion-key', _random_crypto_blob('RSA PUBLIC KEY')),
('signing-key', _random_crypto_blob('RSA PUBLIC KEY')),
]
if sign:
if attr and 'signing-key' in attr:
raise ValueError('Cannot sign the descriptor if a signing-key has been provided')
elif attr and 'router-signature' in attr:
raise ValueError('Cannot sign the descriptor if a router-signature has been provided')
if signing_key is None:
signing_key = create_signing_key()
if 'fingerprint' not in attr:
fingerprint = hashlib.sha1(_bytes_for_block(stem.util.str_tools._to_unicode(signing_key.public_digest.strip()))).hexdigest().upper()
attr['fingerprint'] = ' '.join(stem.util.str_tools._split_by_length(fingerprint, 4))
attr['signing-key'] = signing_key.public_digest
content = _descriptor_content(attr, exclude, base_header) + b'\nrouter-signature\n'
return _append_router_signature(content, signing_key.private)
else:
return _descriptor_content(attr, exclude, base_header, (
('router-sig-ed25519', None),
('router-signature', _random_crypto_blob('SIGNATURE')),
))
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, signing_key = None, exit_policy = None):
return cls(cls.content(attr, exclude, sign, signing_key, exit_policy), validate = validate, skip_crypto_validation = not sign)
@lru_cache()
def digest(self, hash_type = DigestHash.SHA1, encoding = DigestEncoding.HEX):
"""
Provides the digest of our descriptor's content.
:returns: the digest string encoded in uppercase hex
:raises: ValueError if the digest cannot be calculated
"""
content = self._content_range(start = 'router', end = '\nrouter-signature\n')
if hash_type == DigestHash.SHA1:
return stem.descriptor._encode_digest(hashlib.sha1(content), encoding)
elif hash_type == DigestHash.SHA256:
return stem.descriptor._encode_digest(hashlib.sha256(content), encoding)
else:
raise NotImplementedError('Server descriptor digests are only available in sha1 and sha256, not %s' % hash_type)
def make_router_status_entry(self):
"""
Provides a RouterStatusEntryV3 for this descriptor content.
.. versionadded:: 1.6.0
:returns: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
that would be in the consensus
"""
if not self.fingerprint:
raise ValueError('Server descriptor lacks a fingerprint. This is an optional field, but required to make a router status entry.')
attr = {
'r': ' '.join([
self.nickname,
_truncated_b64encode(binascii.unhexlify(stem.util.str_tools._to_bytes(self.fingerprint))),
_truncated_b64encode(binascii.unhexlify(stem.util.str_tools._to_bytes(self.digest()))),
self.published.strftime('%Y-%m-%d %H:%M:%S'),
self.address,
str(self.or_port),
str(self.dir_port) if self.dir_port else '0',
]),
'w': 'Bandwidth=%i' % self.average_bandwidth,
'p': self.exit_policy.summary().replace(', ', ','),
}
if self.tor_version:
attr['v'] = 'Tor %s' % self.tor_version
if self.or_addresses:
attr['a'] = ['%s:%s' % (addr, port) for addr, port, _ in self.or_addresses]
if self.certificate:
attr['id'] = 'ed25519 %s' % _truncated_b64encode(self.certificate.key)
return RouterStatusEntryV3.create(attr)
@lru_cache()
def _onion_key_crosscert_digest(self):
"""
Provides the digest of the onion-key-crosscert data. This consists of the
RSA identity key sha1 and ed25519 identity key.
:returns: **unicode** digest encoded in uppercase hex
:raises: ValueError if the digest cannot be calculated
"""
signing_key_digest = hashlib.sha1(_bytes_for_block(self.signing_key)).digest()
data = signing_key_digest + base64.b64decode(stem.util.str_tools._to_bytes(self.ed25519_master_key) + b'=')
return stem.util.str_tools._to_unicode(binascii.hexlify(data).upper())
def _check_constraints(self, entries):
super(RelayDescriptor, self)._check_constraints(entries)
if self.ed25519_certificate:
if not self.onion_key_crosscert:
raise ValueError("Descriptor must have a 'onion-key-crosscert' when identity-ed25519 is present")
elif not self.ed25519_signature:
raise ValueError("Descriptor must have a 'router-sig-ed25519' when identity-ed25519 is present")
class BridgeDescriptor(ServerDescriptor):
"""
Bridge descriptor (`bridge descriptor specification
`_)
:var str ed25519_certificate_hash: sha256 hash of the original identity-ed25519
:var str router_digest_sha256: sha256 digest of this document
.. versionchanged:: 1.5.0
Added the ed25519_certificate_hash and router_digest_sha256 attributes.
Also added ntor_onion_key (previously this only belonged to unsanitized
descriptors).
"""
TYPE_ANNOTATION_NAME = 'bridge-server-descriptor'
ATTRIBUTES = dict(ServerDescriptor.ATTRIBUTES, **{
'ed25519_certificate_hash': (None, _parse_master_key_ed25519_for_hash_line),
'router_digest_sha256': (None, _parse_router_digest_sha256_line),
'_digest': (None, _parse_router_digest_line),
})
PARSER_FOR_LINE = dict(ServerDescriptor.PARSER_FOR_LINE, **{
'master-key-ed25519': _parse_master_key_ed25519_for_hash_line,
'router-digest-sha256': _parse_router_digest_sha256_line,
'router-digest': _parse_router_digest_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('router', '%s %s 9001 0 0' % (_random_nickname(), _random_ipv4_address())),
('router-digest', '006FD96BA35E7785A6A3B8B75FE2E2435A13BDB4'),
('published', _random_date()),
('bandwidth', '409600 819200 5120'),
('reject', '*:*'),
))
def digest(self, hash_type = DigestHash.SHA1, encoding = DigestEncoding.HEX):
if hash_type == DigestHash.SHA1 and encoding == DigestEncoding.HEX:
return self._digest
else:
raise NotImplementedError('Bridge server descriptor digests are only available as sha1/hex, not %s/%s' % (hash_type, encoding))
def is_scrubbed(self):
"""
Checks if we've been properly scrubbed in accordance with the `bridge
descriptor specification
`_.
Validation is a moving target so this may not be fully up to date.
:returns: **True** if we're scrubbed, **False** otherwise
"""
return self.get_scrubbing_issues() == []
@lru_cache()
def get_scrubbing_issues(self):
"""
Provides issues with our scrubbing.
:returns: **list** of strings which describe issues we have with our
scrubbing, this list is empty if we're properly scrubbed
"""
issues = []
if not self.address.startswith('10.'):
issues.append("Router line's address should be scrubbed to be '10.x.x.x': %s" % self.address)
if self.contact and self.contact != 'somebody':
issues.append("Contact line should be scrubbed to be 'somebody', but instead had '%s'" % self.contact)
for address, _, is_ipv6 in self.or_addresses:
if not is_ipv6 and not address.startswith('10.'):
issues.append("or-address line's address should be scrubbed to be '10.x.x.x': %s" % address)
elif is_ipv6 and not address.startswith('fd9f:2e19:3bcf::'):
# TODO: this check isn't quite right because we aren't checking that
# the next grouping of hex digits contains 1-2 digits
issues.append("or-address line's address should be scrubbed to be 'fd9f:2e19:3bcf::xx:xxxx': %s" % address)
for line in self.get_unrecognized_lines():
if line.startswith('onion-key '):
issues.append('Bridge descriptors should have their onion-key scrubbed: %s' % line)
elif line.startswith('signing-key '):
issues.append('Bridge descriptors should have their signing-key scrubbed: %s' % line)
elif line.startswith('router-signature '):
issues.append('Bridge descriptors should have their signature scrubbed: %s' % line)
return issues
def _required_fields(self):
# bridge required fields are the same as a relay descriptor, minus items
# excluded according to the format page
excluded_fields = [
'onion-key',
'signing-key',
'router-signature',
]
included_fields = [
'router-digest',
]
return tuple(included_fields + [f for f in REQUIRED_FIELDS if f not in excluded_fields])
def _single_fields(self):
return self._required_fields() + SINGLE_FIELDS
def _last_keyword(self):
return None
stem-1.8.0/stem/descriptor/bandwidth_file.py 0000664 0001750 0001750 00000026704 13501272761 021672 0 ustar atagar atagar 0000000 0000000 # Copyright 2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for Bandwidth Authority metrics as described in Tor's
`bandwidth-file-spec `_.
**Module Overview:**
::
BandwidthFile - Tor bandwidth authority measurements.
.. versionadded:: 1.8.0
"""
import datetime
import io
import time
import stem.util.str_tools
from stem.descriptor import (
_mappings_for,
Descriptor,
)
try:
# added in python 2.7
from collections import OrderedDict
except ImportError:
from stem.util.ordereddict import OrderedDict
# Four character dividers are allowed for backward compatability, but five is
# preferred.
HEADER_DIV = b'====='
HEADER_DIV_ALT = b'===='
class RecentStats(object):
"""
Statistical information collected over the last 'data_period' (by default
five days).
:var int consensus_count: number of consensuses published during this period
:var int prioritized_relays: number of relays prioritized to be measured
:var int prioritized_relay_lists: number of times a set of relays were
prioritized to be measured
:var int measurement_attempts: number of relay measurements we attempted
:var int measurement_failures: number of measurement attempts that failed
:var RelayFailures relay_failures: number of relays we failed to measure
"""
def __init__(self):
self.consensus_count = None
self.prioritized_relays = None
self.prioritized_relay_lists = None
self.measurement_attempts = None
self.measurement_failures = None
self.relay_failures = RelayFailures()
class RelayFailures(object):
"""
Summary of the number of relays we were unable to measure.
:var int no_measurement: number of relays that did not have any successful
measurements
:var int insuffient_period: number of relays whos measurements were collected
over a period that was too small (1 day by default)
:var int insufficient_measurements: number of relays we did not collect
enough measurements for (2 by default)
:var int stale: number of relays whos latest measurement is too old (5 days
by default)
"""
def __init__(self):
self.no_measurement = None
self.insuffient_period = None
self.insufficient_measurements = None
self.stale = None
# Converts header attributes to a given type. Malformed fields should be
# ignored according to the spec.
def _str(val):
return val # already a str
def _int(val):
return int(val) if (val and val.isdigit()) else None
def _date(val):
try:
return stem.util.str_tools._parse_iso_timestamp(val)
except ValueError:
return None # not an iso formatted date
def _csv(val):
return list(map(lambda v: v.strip(), val.split(','))) if val is not None else None
# mapping of attributes => (header, type)
HEADER_ATTR = {
# version 1.1.0 introduced headers
'version': ('version', _str),
'software': ('software', _str),
'software_version': ('software_version', _str),
'earliest_bandwidth': ('earliest_bandwidth', _date),
'latest_bandwidth': ('latest_bandwidth', _date),
'created_at': ('file_created', _date),
'generated_at': ('generator_started', _date),
# version 1.2.0 additions
'consensus_size': ('number_consensus_relays', _int),
'eligible_count': ('number_eligible_relays', _int),
'eligible_percent': ('percent_eligible_relays', _int),
'min_count': ('minimum_number_eligible_relays', _int),
'min_percent': ('minimum_percent_eligible_relays', _int),
# version 1.3.0 additions
'scanner_country': ('scanner_country', _str),
'destinations_countries': ('destinations_countries', _csv),
# version 1.4.0 additions
'time_to_report_half_network': ('time_to_report_half_network', _int),
'recent_stats.consensus_count': ('recent_consensus_count', _int),
'recent_stats.prioritized_relay_lists': ('recent_priority_list_count', _int),
'recent_stats.prioritized_relays': ('recent_priority_relay_count', _int),
'recent_stats.measurement_attempts': ('recent_measurement_attempt_count', _int),
'recent_stats.measurement_failures': ('recent_measurement_failure_count', _int),
'recent_stats.relay_failures.no_measurement': ('recent_measurements_excluded_error_count', _int),
'recent_stats.relay_failures.insuffient_period': ('recent_measurements_excluded_near_count', _int),
'recent_stats.relay_failures.insufficient_measurements': ('recent_measurements_excluded_few_count', _int),
'recent_stats.relay_failures.stale': ('recent_measurements_excluded_old_count', _int),
}
HEADER_DEFAULT = {
'version': '1.0.0', # version field was added in 1.1.0
}
def _parse_file(descriptor_file, validate = False, **kwargs):
"""
Iterates over the bandwidth authority metrics in a file.
:param file descriptor_file: file with descriptor content
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param dict kwargs: additional arguments for the descriptor constructor
:returns: :class:`stem.descriptor.bandwidth_file.BandwidthFile` object
:raises:
* **ValueError** if the contents is malformed and validate is **True**
* **IOError** if the file can't be read
"""
yield BandwidthFile(descriptor_file.read(), validate, **kwargs)
def _parse_header(descriptor, entries):
header = OrderedDict()
content = io.BytesIO(descriptor.get_bytes())
content.readline() # skip the first line, which should be the timestamp
index = 1
version_index = None
while True:
line = content.readline().strip()
if not line:
break # end of the content
elif line in (HEADER_DIV, HEADER_DIV_ALT):
break # end of header
elif not header and b'node_id=' in line:
break # version 1.0 doesn't have any headers
if b'=' in line:
key, value = stem.util.str_tools._to_unicode(line).split('=', 1)
header[key] = value
if key == 'version':
version_index = index
else:
raise ValueError("Header expected to be key=value pairs, but had '%s'" % line)
index += 1
descriptor.header = header
descriptor.recent_stats = RecentStats()
for full_attr, (keyword, cls) in HEADER_ATTR.items():
obj = descriptor
for attr in full_attr.split('.')[:-1]:
obj = getattr(obj, attr)
setattr(obj, full_attr.split('.')[-1], cls(header.get(keyword, HEADER_DEFAULT.get(full_attr))))
if version_index is not None and version_index != 1:
raise ValueError("The 'version' header must be in the second position")
def _parse_timestamp(descriptor, entries):
first_line = io.BytesIO(descriptor.get_bytes()).readline().strip()
if first_line.isdigit():
descriptor.timestamp = datetime.datetime.utcfromtimestamp(int(first_line))
else:
raise ValueError("First line should be a unix timestamp, but was '%s'" % first_line)
def _parse_body(descriptor, entries):
# In version 1.0.0 the body is everything after the first line. Otherwise
# it's everything after the header's divider.
content = io.BytesIO(descriptor.get_bytes())
if descriptor.version == '1.0.0':
content.readline() # skip the first line
else:
while content.readline().strip() not in ('', HEADER_DIV, HEADER_DIV_ALT):
pass # skip the header
measurements = {}
for line in content.readlines():
line = stem.util.str_tools._to_unicode(line.strip())
attr = dict(_mappings_for('measurement', line))
fingerprint = attr.get('node_id', '').lstrip('$') # bwauths prefix fingerprints with '$'
if not fingerprint:
raise ValueError("Every meaurement must include 'node_id': %s" % line)
elif fingerprint in measurements:
raise ValueError('Relay %s is listed multiple times. It should only be present once.' % fingerprint)
measurements[fingerprint] = attr
descriptor.measurements = measurements
class BandwidthFile(Descriptor):
"""
Tor bandwidth authority measurements.
:var dict measurements: **\\*** mapping of relay fingerprints to their
bandwidth measurement metadata
:var dict header: **\\*** header metadata
:var datetime timestamp: **\\*** time when these metrics were published
:var str version: **\\*** document format version
:var str software: application that generated these metrics
:var str software_version: version of the application that generated these metrics
:var datetime earliest_bandwidth: time of the first sampling
:var datetime latest_bandwidth: time of the last sampling
:var datetime created_at: time when this file was created
:var datetime generated_at: time when collection of these metrics started
:var int consensus_size: number of relays in the consensus
:var int eligible_count: relays with enough measurements to be included
:var int eligible_percent: percentage of consensus with enough measurements
:var int min_count: minimum eligible relays for results to be provided
:var int min_percent: minimum measured percentage of the consensus
:var str scanner_country: country code where this scan took place
:var list destinations_countries: all country codes that were scanned
:var int time_to_report_half_network: estimated number of seconds required to
measure half the network, given recent measurements
:var RecentStats recent_stats: statistical information collected over the
last 'data_period' (by default five days)
**\\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
TYPE_ANNOTATION_NAME = 'bandwidth-file'
ATTRIBUTES = {
'timestamp': (None, _parse_timestamp),
'header': ({}, _parse_header),
'measurements': ({}, _parse_body),
}
ATTRIBUTES.update(dict([(k, (None, _parse_header)) for k in HEADER_ATTR.keys()]))
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
"""
Creates descriptor content with the given attributes. This descriptor type
differs somewhat from others and treats our attr/exclude attributes as
follows...
* 'timestamp' is a reserved key for our mandatory header unix timestamp.
* 'content' is a reserved key for our bandwidth measurement lines.
* All other keys are treated as header fields.
For example...
::
BandwidthFile.content({
'timestamp': '12345',
'version': '1.2.0',
'content': [],
})
"""
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
header = OrderedDict(attr) if attr is not None else OrderedDict()
timestamp = header.pop('timestamp', str(int(time.time())))
content = header.pop('content', [])
version = header.get('version', HEADER_DEFAULT.get('version'))
lines = []
if 'timestamp' not in exclude:
lines.append(stem.util.str_tools._to_bytes(timestamp))
if version == '1.0.0' and header:
raise ValueError('Headers require BandwidthFile version 1.1 or later')
elif version != '1.0.0':
# ensure 'version' is the second header
if 'version' not in exclude:
lines.append(stem.util.str_tools._to_bytes('version=%s' % header.pop('version')))
for k, v in header.items():
lines.append(stem.util.str_tools._to_bytes('%s=%s' % (k, v)))
lines.append(HEADER_DIV)
for measurement in content:
lines.append(stem.util.str_tools._to_bytes(measurement))
return b'\n'.join(lines)
def __init__(self, raw_content, validate = False):
super(BandwidthFile, self).__init__(raw_content, lazy_load = not validate)
if validate:
_parse_timestamp(self, None)
_parse_header(self, None)
_parse_body(self, None)
stem-1.8.0/stem/descriptor/remote.py 0000664 0001750 0001750 00000121304 13526063314 020211 0 ustar atagar atagar 0000000 0000000 # Copyright 2013-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Module for remotely retrieving descriptors from directory authorities and
mirrors. This is the simplest method for getting current tor descriptor
information...
::
import stem.descriptor.remote
for desc in stem.descriptor.remote.get_server_descriptors():
if desc.exit_policy.is_exiting_allowed():
print(' %s (%s)' % (desc.nickname, desc.fingerprint))
More custom downloading behavior can be done through the
:class:`~stem.descriptor.remote.DescriptorDownloader` class, which issues
:class:`~stem.descriptor.remote.Query` instances to get you descriptor
content. For example...
::
from stem.descriptor.remote import DescriptorDownloader
downloader = DescriptorDownloader(
use_mirrors = True,
timeout = 10,
)
query = downloader.get_server_descriptors()
print('Exit Relays:')
try:
for desc in query.run():
if desc.exit_policy.is_exiting_allowed():
print(' %s (%s)' % (desc.nickname, desc.fingerprint))
print
print('Query took %0.2f seconds' % query.runtime)
except Exception as exc:
print('Unable to retrieve the server descriptors: %s' % exc)
::
get_instance - Provides a singleton DescriptorDownloader used for...
|- their_server_descriptor - provides the server descriptor of the relay we download from
|- get_server_descriptors - provides present server descriptors
|- get_extrainfo_descriptors - provides present extrainfo descriptors
|- get_microdescriptors - provides present microdescriptors with the given digests
|- get_consensus - provides the present consensus or router status entries
|- get_bandwidth_file - provides bandwidth heuristics used to make the next consensus
+- get_detached_signatures - authority signatures used to make the next consensus
Query - Asynchronous request to download tor descriptors
|- start - issues the query if it isn't already running
+- run - blocks until the request is finished and provides the results
DescriptorDownloader - Configurable class for issuing queries
|- use_directory_mirrors - use directory mirrors to download future descriptors
|- their_server_descriptor - provides the server descriptor of the relay we download from
|- get_server_descriptors - provides present server descriptors
|- get_extrainfo_descriptors - provides present extrainfo descriptors
|- get_microdescriptors - provides present microdescriptors with the given digests
|- get_consensus - provides the present consensus or router status entries
|- get_vote - provides an authority's vote for the next consensus
|- get_key_certificates - provides present authority key certificates
|- get_bandwidth_file - provides bandwidth heuristics used to make the next consensus
|- get_detached_signatures - authority signatures used to make the next consensus
+- query - request an arbitrary descriptor resource
.. versionadded:: 1.1.0
.. data:: MAX_FINGERPRINTS
Maximum number of descriptors that can requested at a time by their
fingerprints.
.. data:: MAX_MICRODESCRIPTOR_HASHES
Maximum number of microdescriptors that can requested at a time by their
hashes.
.. data:: Compression (enum)
Compression when downloading descriptors.
.. versionadded:: 1.7.0
=============== ===========
Compression Description
=============== ===========
**PLAINTEXT** Uncompressed data.
**GZIP** `GZip compression `_.
**ZSTD** `Zstandard compression `_, this requires the `zstandard module `_.
**LZMA** `LZMA compression `_, this requires the 'lzma module `_.
=============== ===========
"""
import io
import random
import socket
import sys
import threading
import time
import stem
import stem.client
import stem.descriptor
import stem.descriptor.networkstatus
import stem.directory
import stem.prereq
import stem.util.enum
import stem.util.tor_tools
from stem.util import log, str_tools
try:
# account for urllib's change between python 2.x and 3.x
import urllib.request as urllib
except ImportError:
import urllib2 as urllib
# TODO: remove in stem 2.x, replaced with stem.descriptor.Compression
Compression = stem.util.enum.Enum(
('PLAINTEXT', 'identity'),
('GZIP', 'gzip'), # can also be 'deflate'
('ZSTD', 'x-zstd'),
('LZMA', 'x-tor-lzma'),
)
COMPRESSION_MIGRATION = {
'identity': stem.descriptor.Compression.PLAINTEXT,
'gzip': stem.descriptor.Compression.GZIP,
'x-zstd': stem.descriptor.Compression.ZSTD,
'x-tor-lzma': stem.descriptor.Compression.LZMA,
}
# Tor has a limited number of descriptors we can fetch explicitly by their
# fingerprint or hashes due to a limit on the url length by squid proxies.
MAX_FINGERPRINTS = 96
MAX_MICRODESCRIPTOR_HASHES = 90
SINGLETON_DOWNLOADER = None
# Detached signatures do *not* have a specified type annotation. But our
# parsers expect that all descriptors have a type. As such making one up.
# This may change in the future if these ever get an official @type.
#
# https://trac.torproject.org/projects/tor/ticket/28615
DETACHED_SIGNATURE_TYPE = 'detached-signature'
# Some authorities intentionally break their DirPort to discourage DOS. In
# particular they throttle the rate to such a degree that requests can take
# hours to complete. Unfortunately Python's socket timeouts only kick in
# when we stop receiving data, so these 'sandtraps' cause our downloads to
# hang pretty much indefinitely.
#
# Best we can do is simply avoid attempting to use them in the first place.
DIR_PORT_BLACKLIST = ('tor26', 'Serge')
def get_instance():
"""
Provides the singleton :class:`~stem.descriptor.remote.DescriptorDownloader`
used for this module's shorthand functions.
.. versionadded:: 1.5.0
:returns: singleton :class:`~stem.descriptor.remote.DescriptorDownloader` instance
"""
global SINGLETON_DOWNLOADER
if SINGLETON_DOWNLOADER is None:
SINGLETON_DOWNLOADER = DescriptorDownloader()
return SINGLETON_DOWNLOADER
def their_server_descriptor(**query_args):
"""
Provides the server descriptor of the relay we're downloading from.
.. versionadded:: 1.7.0
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the server descriptors
"""
return get_instance().their_server_descriptor(**query_args)
def get_server_descriptors(fingerprints = None, **query_args):
"""
Shorthand for
:func:`~stem.descriptor.remote.DescriptorDownloader.get_server_descriptors`
on our singleton instance.
.. versionadded:: 1.5.0
"""
return get_instance().get_server_descriptors(fingerprints, **query_args)
def get_extrainfo_descriptors(fingerprints = None, **query_args):
"""
Shorthand for
:func:`~stem.descriptor.remote.DescriptorDownloader.get_extrainfo_descriptors`
on our singleton instance.
.. versionadded:: 1.5.0
"""
return get_instance().get_extrainfo_descriptors(fingerprints, **query_args)
def get_microdescriptors(hashes, **query_args):
"""
Shorthand for
:func:`~stem.descriptor.remote.DescriptorDownloader.get_microdescriptors`
on our singleton instance.
.. versionadded:: 1.8.0
"""
return get_instance().get_microdescriptors(hashes, **query_args)
def get_consensus(authority_v3ident = None, microdescriptor = False, **query_args):
"""
Shorthand for
:func:`~stem.descriptor.remote.DescriptorDownloader.get_consensus`
on our singleton instance.
.. versionadded:: 1.5.0
"""
return get_instance().get_consensus(authority_v3ident, microdescriptor, **query_args)
def get_bandwidth_file(**query_args):
"""
Shorthand for
:func:`~stem.descriptor.remote.DescriptorDownloader.get_bandwidth_file`
on our singleton instance.
.. versionadded:: 1.8.0
"""
return get_instance().get_bandwidth_file(**query_args)
def get_detached_signatures(**query_args):
"""
Shorthand for
:func:`~stem.descriptor.remote.DescriptorDownloader.get_detached_signatures`
on our singleton instance.
.. versionadded:: 1.8.0
"""
return get_instance().get_detached_signatures(**query_args)
class Query(object):
"""
Asynchronous request for descriptor content from a directory authority or
mirror. These can either be made through the
:class:`~stem.descriptor.remote.DescriptorDownloader` or directly for more
advanced usage.
To block on the response and get results either call
:func:`~stem.descriptor.remote.Query.run` or iterate over the Query. The
:func:`~stem.descriptor.remote.Query.run` method pass along any errors that
arise...
::
from stem.descriptor.remote import Query
query = Query(
'/tor/server/all',
timeout = 30,
)
print('Current relays:')
try:
for desc in Query('/tor/server/all', 'server-descriptor 1.0').run():
print(desc.fingerprint)
except Exception as exc:
print('Unable to retrieve the server descriptors: %s' % exc)
... while iterating fails silently...
::
print('Current relays:')
for desc in Query('/tor/server/all', 'server-descriptor 1.0'):
print(desc.fingerprint)
In either case exceptions are available via our 'error' attribute.
Tor provides quite a few different descriptor resources via its directory
protocol (see section 4.2 and later of the `dir-spec
`_).
Commonly useful ones include...
=============================================== ===========
Resource Description
=============================================== ===========
/tor/server/all all present server descriptors
/tor/server/fp/++ server descriptors with the given fingerprints
/tor/extra/all all present extrainfo descriptors
/tor/extra/fp/++ extrainfo descriptors with the given fingerprints
/tor/micro/d/- microdescriptors with the given hashes
/tor/status-vote/current/consensus present consensus
/tor/status-vote/current/consensus-microdesc present microdescriptor consensus
/tor/status-vote/next/bandwidth bandwidth authority heuristics for the next consenus
/tor/status-vote/next/consensus-signatures detached signature, used for making the next consenus
/tor/keys/all key certificates for the authorities
/tor/keys/fp/+ key certificates for specific authorities
=============================================== ===========
**ZSTD** compression requires `zstandard
`_, and **LZMA** requires the `lzma
module `_.
For legacy reasons if our resource has a '.z' suffix then our **compression**
argument is overwritten with Compression.GZIP.
.. versionchanged:: 1.7.0
Added support for downloading from ORPorts.
.. versionchanged:: 1.7.0
Added the compression argument.
.. versionchanged:: 1.7.0
Added the reply_headers attribute.
The class this provides changed between Python versions. In python2
this was called httplib.HTTPMessage, whereas in python3 the class was
renamed to http.client.HTTPMessage.
.. versionchanged:: 1.7.0
Endpoints are now expected to be :class:`~stem.DirPort` or
:class:`~stem.ORPort` instances. Usage of tuples for this
argument is deprecated and will be removed in the future.
.. versionchanged:: 1.7.0
Avoid downloading from tor26. This directory authority throttles its
DirPort to such an extent that requests either time out or take on the
order of minutes.
.. versionchanged:: 1.7.0
Avoid downloading from Bifroest. This is the bridge authority so it
doesn't vote in the consensus, and apparently times out frequently.
.. versionchanged:: 1.8.0
Serge has replaced Bifroest as our bridge authority. Avoiding descriptor
downloads from it instead.
.. versionchanged:: 1.8.0
Defaulting to gzip compression rather than plaintext downloads.
.. versionchanged:: 1.8.0
Using :class:`~stem.descriptor.__init__.Compression` for our compression
argument, usage of strings or this module's Compression enum is deprecated
and will be removed in stem 2.x.
:var str resource: resource being fetched, such as '/tor/server/all'
:var str descriptor_type: type of descriptors being fetched (for options see
:func:`~stem.descriptor.__init__.parse_file`), this is guessed from the
resource if **None**
:var list endpoints: :class:`~stem.DirPort` or :class:`~stem.ORPort` of the
authority or mirror we're querying, this uses authorities if undefined
:var list compression: list of :data:`stem.descriptor.Compression`
we're willing to accept, when none are mutually supported downloads fall
back to Compression.PLAINTEXT
:var int retries: number of times to attempt the request if downloading it
fails
:var bool fall_back_to_authority: when retrying request issues the last
request to a directory authority if **True**
:var str content: downloaded descriptor content
:var Exception error: exception if a problem occured
:var bool is_done: flag that indicates if our request has finished
:var float start_time: unix timestamp when we first started running
:var http.client.HTTPMessage reply_headers: headers provided in the response,
**None** if we haven't yet made our request
:var float runtime: time our query took, this is **None** if it's not yet
finished
:var bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:var stem.descriptor.__init__.DocumentHandler document_handler: method in
which to parse a :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
:var dict kwargs: additional arguments for the descriptor constructor
Following are only applicable when downloading from a
:class:`~stem.DirPort`...
:var float timeout: duration before we'll time out our request
:var str download_url: last url used to download the descriptor, this is
unset until we've actually made a download attempt
:param bool start: start making the request when constructed (default is **True**)
:param bool block: only return after the request has been completed, this is
the same as running **query.run(True)** (default is **False**)
"""
def __init__(self, resource, descriptor_type = None, endpoints = None, compression = (Compression.GZIP,), retries = 2, fall_back_to_authority = False, timeout = None, start = True, block = False, validate = False, document_handler = stem.descriptor.DocumentHandler.ENTRIES, **kwargs):
if not resource.startswith('/'):
raise ValueError("Resources should start with a '/': %s" % resource)
if resource.endswith('.z'):
compression = [Compression.GZIP]
resource = resource[:-2]
elif not compression:
compression = [Compression.PLAINTEXT]
else:
if isinstance(compression, str):
compression = [compression] # caller provided only a single option
if Compression.ZSTD in compression and not stem.prereq.is_zstd_available():
compression.remove(Compression.ZSTD)
if Compression.LZMA in compression and not stem.prereq.is_lzma_available():
compression.remove(Compression.LZMA)
if not compression:
compression = [Compression.PLAINTEXT]
# TODO: Normalize from our old compression enum to
# stem.descriptor.Compression. This will get removed in Stem 2.x.
new_compression = []
for legacy_compression in compression:
if isinstance(legacy_compression, stem.descriptor._Compression):
new_compression.append(legacy_compression)
elif legacy_compression in COMPRESSION_MIGRATION:
new_compression.append(COMPRESSION_MIGRATION[legacy_compression])
else:
raise ValueError("'%s' (%s) is not a recognized type of compression" % (legacy_compression, type(legacy_compression).__name__))
if descriptor_type:
self.descriptor_type = descriptor_type
else:
self.descriptor_type = _guess_descriptor_type(resource)
self.endpoints = []
if endpoints:
for endpoint in endpoints:
if isinstance(endpoint, tuple) and len(endpoint) == 2:
self.endpoints.append(stem.DirPort(endpoint[0], endpoint[1])) # TODO: remove this in stem 2.0
elif isinstance(endpoint, (stem.ORPort, stem.DirPort)):
self.endpoints.append(endpoint)
else:
raise ValueError("Endpoints must be an stem.ORPort, stem.DirPort, or two value tuple. '%s' is a %s." % (endpoint, type(endpoint).__name__))
self.resource = resource
self.compression = new_compression
self.retries = retries
self.fall_back_to_authority = fall_back_to_authority
self.content = None
self.error = None
self.is_done = False
self.download_url = None
self.start_time = None
self.timeout = timeout
self.runtime = None
self.validate = validate
self.document_handler = document_handler
self.reply_headers = None
self.kwargs = kwargs
self._downloader_thread = None
self._downloader_thread_lock = threading.RLock()
if start:
self.start()
if block:
self.run(True)
def start(self):
"""
Starts downloading the scriptors if we haven't started already.
"""
with self._downloader_thread_lock:
if self._downloader_thread is None:
self._downloader_thread = threading.Thread(
name = 'Descriptor query',
target = self._download_descriptors,
args = (self.retries, self.timeout)
)
self._downloader_thread.setDaemon(True)
self._downloader_thread.start()
def run(self, suppress = False):
"""
Blocks until our request is complete then provides the descriptors. If we
haven't yet started our request then this does so.
:param bool suppress: avoids raising exceptions if **True**
:returns: list for the requested :class:`~stem.descriptor.__init__.Descriptor` instances
:raises:
Using the iterator can fail with the following if **suppress** is
**False**...
* **ValueError** if the descriptor contents is malformed
* :class:`~stem.DownloadTimeout` if our request timed out
* :class:`~stem.DownloadFailed` if our request fails
"""
return list(self._run(suppress))
def _run(self, suppress):
with self._downloader_thread_lock:
self.start()
self._downloader_thread.join()
if self.error:
if suppress:
return
raise self.error
else:
if self.content is None:
if suppress:
return
raise ValueError('BUG: _download_descriptors() finished without either results or an error')
try:
# TODO: special handling until we have an official detatched
# signature @type...
#
# https://trac.torproject.org/projects/tor/ticket/28615
if self.descriptor_type.startswith(DETACHED_SIGNATURE_TYPE):
results = stem.descriptor.networkstatus._parse_file_detached_sigs(
io.BytesIO(self.content),
validate = self.validate,
)
else:
results = stem.descriptor.parse_file(
io.BytesIO(self.content),
self.descriptor_type,
validate = self.validate,
document_handler = self.document_handler,
**self.kwargs
)
for desc in results:
yield desc
except ValueError as exc:
self.error = exc # encountered a parsing error
if suppress:
return
raise self.error
def __iter__(self):
for desc in self._run(True):
yield desc
def _pick_endpoint(self, use_authority = False):
"""
Provides an endpoint to query. If we have multiple endpoints then one
is picked at random.
:param bool use_authority: ignores our endpoints and uses a directory
authority instead
:returns: :class:`stem.Endpoint` for the location to be downloaded
from by this request
"""
if use_authority or not self.endpoints:
picked = random.choice([auth for auth in stem.directory.Authority.from_cache().values() if auth.nickname not in DIR_PORT_BLACKLIST])
return stem.DirPort(picked.address, picked.dir_port)
else:
return random.choice(self.endpoints)
def _download_descriptors(self, retries, timeout):
try:
self.start_time = time.time()
endpoint = self._pick_endpoint(use_authority = retries == 0 and self.fall_back_to_authority)
if isinstance(endpoint, stem.ORPort):
downloaded_from = 'ORPort %s:%s (resource %s)' % (endpoint.address, endpoint.port, self.resource)
self.content, self.reply_headers = _download_from_orport(endpoint, self.compression, self.resource)
elif isinstance(endpoint, stem.DirPort):
self.download_url = 'http://%s:%i/%s' % (endpoint.address, endpoint.port, self.resource.lstrip('/'))
downloaded_from = self.download_url
self.content, self.reply_headers = _download_from_dirport(self.download_url, self.compression, timeout)
else:
raise ValueError("BUG: endpoints can only be ORPorts or DirPorts, '%s' was a %s" % (endpoint, type(endpoint).__name__))
self.runtime = time.time() - self.start_time
log.trace('Descriptors retrieved from %s in %0.2fs' % (downloaded_from, self.runtime))
except:
exc = sys.exc_info()[1]
if timeout is not None:
timeout -= time.time() - self.start_time
if retries > 0 and (timeout is None or timeout > 0):
log.debug("Unable to download descriptors from '%s' (%i retries remaining): %s" % (self.download_url, retries, exc))
return self._download_descriptors(retries - 1, timeout)
else:
log.debug("Unable to download descriptors from '%s': %s" % (self.download_url, exc))
self.error = exc
finally:
self.is_done = True
class DescriptorDownloader(object):
"""
Configurable class that issues :class:`~stem.descriptor.remote.Query`
instances on your behalf.
:param bool use_mirrors: downloads the present consensus and uses the directory
mirrors to fetch future requests, this fails silently if the consensus
cannot be downloaded
:param default_args: default arguments for the
:class:`~stem.descriptor.remote.Query` constructor
"""
def __init__(self, use_mirrors = False, **default_args):
self._default_args = default_args
self._endpoints = None
if use_mirrors:
try:
start_time = time.time()
self.use_directory_mirrors()
log.debug('Retrieved directory mirrors (took %0.2fs)' % (time.time() - start_time))
except Exception as exc:
log.debug('Unable to retrieve directory mirrors: %s' % exc)
def use_directory_mirrors(self):
"""
Downloads the present consensus and configures ourselves to use directory
mirrors, in addition to authorities.
:returns: :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`
from which we got the directory mirrors
:raises: **Exception** if unable to determine the directory mirrors
"""
directories = [auth for auth in stem.directory.Authority.from_cache().values() if auth.nickname not in DIR_PORT_BLACKLIST]
new_endpoints = set([(directory.address, directory.dir_port) for directory in directories])
consensus = list(self.get_consensus(document_handler = stem.descriptor.DocumentHandler.DOCUMENT).run())[0]
for desc in consensus.routers.values():
if stem.Flag.V2DIR in desc.flags and desc.dir_port:
new_endpoints.add((desc.address, desc.dir_port))
# we need our endpoints to be a list rather than set for random.choice()
self._endpoints = list(new_endpoints)
return consensus
def their_server_descriptor(self, **query_args):
"""
Provides the server descriptor of the relay we're downloading from.
.. versionadded:: 1.7.0
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the server descriptors
"""
return self.query('/tor/server/authority', **query_args)
def get_server_descriptors(self, fingerprints = None, **query_args):
"""
Provides the server descriptors with the given fingerprints. If no
fingerprints are provided then this returns all descriptors known
by the relay.
:param str,list fingerprints: fingerprint or list of fingerprints to be
retrieved, gets all descriptors if **None**
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the server descriptors
:raises: **ValueError** if we request more than 96 descriptors by their
fingerprints (this is due to a limit on the url length by squid proxies).
"""
resource = '/tor/server/all'
if isinstance(fingerprints, str):
fingerprints = [fingerprints]
if fingerprints:
if len(fingerprints) > MAX_FINGERPRINTS:
raise ValueError('Unable to request more than %i descriptors at a time by their fingerprints' % MAX_FINGERPRINTS)
resource = '/tor/server/fp/%s' % '+'.join(fingerprints)
return self.query(resource, **query_args)
def get_extrainfo_descriptors(self, fingerprints = None, **query_args):
"""
Provides the extrainfo descriptors with the given fingerprints. If no
fingerprints are provided then this returns all descriptors in the present
consensus.
:param str,list fingerprints: fingerprint or list of fingerprints to be
retrieved, gets all descriptors if **None**
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the extrainfo descriptors
:raises: **ValueError** if we request more than 96 descriptors by their
fingerprints (this is due to a limit on the url length by squid proxies).
"""
resource = '/tor/extra/all'
if isinstance(fingerprints, str):
fingerprints = [fingerprints]
if fingerprints:
if len(fingerprints) > MAX_FINGERPRINTS:
raise ValueError('Unable to request more than %i descriptors at a time by their fingerprints' % MAX_FINGERPRINTS)
resource = '/tor/extra/fp/%s' % '+'.join(fingerprints)
return self.query(resource, **query_args)
def get_microdescriptors(self, hashes, **query_args):
"""
Provides the microdescriptors with the given hashes. To get these see the
**microdescriptor_digest** attribute of
:class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3`.
Note that these are only provided via the **microdescriptor consensus**.
For exampe...
::
>>> import stem.descriptor.remote
>>> consensus = stem.descriptor.remote.get_consensus(microdescriptor = True).run()
>>> my_router_status_entry = list(filter(lambda desc: desc.nickname == 'caersidi', consensus))[0]
>>> print(my_router_status_entry.microdescriptor_digest)
IQI5X2A5p0WVN/MgwncqOaHF2f0HEGFEaxSON+uKRhU
>>> my_microdescriptor = stem.descriptor.remote.get_microdescriptors([my_router_status_entry.microdescriptor_digest]).run()[0]
>>> print(my_microdescriptor)
onion-key
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAOJo9yyVgG8ksEHQibqPIEbLieI6rh1EACRPiDiV21YObb+9QEHaR3Cf
FNAzDbGhbvADLBB7EzuViL8w+eXQUOaIsJRdymh/wuUJ78bv5oEIJhthKq/Uqa4P
wKHXSZixwAHfy8NASTX3kxu9dAHWU3Owb+4W4lR2hYM0ZpoYYkThAgMBAAE=
-----END RSA PUBLIC KEY-----
ntor-onion-key kWOHNd+2uBlMpcIUbbpFLiq/rry66Ep6MlwmNpwzcBg=
id ed25519 xE/GeYImYAIB0RbzJXFL8kDLpDrj/ydCuCdvOgC4F/4
:param str,list hashes: microdescriptor hash or list of hashes to be
retrieved
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the microdescriptors
:raises: **ValueError** if we request more than 92 microdescriptors by their
hashes (this is due to a limit on the url length by squid proxies).
"""
if isinstance(hashes, str):
hashes = [hashes]
if len(hashes) > MAX_MICRODESCRIPTOR_HASHES:
raise ValueError('Unable to request more than %i microdescriptors at a time by their hashes' % MAX_MICRODESCRIPTOR_HASHES)
return self.query('/tor/micro/d/%s' % '-'.join(hashes), **query_args)
def get_consensus(self, authority_v3ident = None, microdescriptor = False, **query_args):
"""
Provides the present router status entries.
.. versionchanged:: 1.5.0
Added the microdescriptor argument.
:param str authority_v3ident: fingerprint of the authority key for which
to get the consensus, see `'v3ident' in tor's config.c
`_
for the values.
:param bool microdescriptor: provides the microdescriptor consensus if
**True**, standard consensus otherwise
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the router status
entries
"""
if microdescriptor:
resource = '/tor/status-vote/current/consensus-microdesc'
else:
resource = '/tor/status-vote/current/consensus'
if authority_v3ident:
resource += '/%s' % authority_v3ident
consensus_query = self.query(resource, **query_args)
# if we're performing validation then check that it's signed by the
# authority key certificates
if consensus_query.validate and consensus_query.document_handler == stem.descriptor.DocumentHandler.DOCUMENT and stem.prereq.is_crypto_available():
consensus = list(consensus_query.run())[0]
key_certs = self.get_key_certificates(**query_args).run()
consensus.validate_signatures(key_certs)
return consensus_query
def get_vote(self, authority, **query_args):
"""
Provides the present vote for a given directory authority.
:param stem.directory.Authority authority: authority for which to retrieve a vote for
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the router status
entries
"""
resource = '/tor/status-vote/current/authority'
if 'endpoint' not in query_args:
query_args['endpoints'] = [(authority.address, authority.dir_port)]
return self.query(resource, **query_args)
def get_key_certificates(self, authority_v3idents = None, **query_args):
"""
Provides the key certificates for authorities with the given fingerprints.
If no fingerprints are provided then this returns all present key
certificates.
:param str authority_v3idents: fingerprint or list of fingerprints of the
authority keys, see `'v3ident' in tor's config.c
`_
for the values.
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the key certificates
:raises: **ValueError** if we request more than 96 key certificates by
their identity fingerprints (this is due to a limit on the url length by
squid proxies).
"""
resource = '/tor/keys/all'
if isinstance(authority_v3idents, str):
authority_v3idents = [authority_v3idents]
if authority_v3idents:
if len(authority_v3idents) > MAX_FINGERPRINTS:
raise ValueError('Unable to request more than %i key certificates at a time by their identity fingerprints' % MAX_FINGERPRINTS)
resource = '/tor/keys/fp/%s' % '+'.join(authority_v3idents)
return self.query(resource, **query_args)
def get_bandwidth_file(self, **query_args):
"""
Provides the bandwidth authority heuristics used to make the next
consensus.
.. versionadded:: 1.8.0
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the bandwidth
authority heuristics
"""
return self.query('/tor/status-vote/next/bandwidth', **query_args)
def get_detached_signatures(self, **query_args):
"""
Provides the detached signatures that will be used to make the next
consensus. Please note that **these are only available during minutes 55-60
each hour**. If requested during minutes 0-55 tor will not service these
requests, and this will fail with a 404.
For example...
::
import stem.descriptor.remote
detached_sigs = stem.descriptor.remote.get_detached_signatures().run()[0]
for i, sig in enumerate(detached_sigs.signatures):
print('Signature %i is from %s' % (i + 1, sig.identity))
**When available (minutes 55-60 of the hour)**
::
% python demo.py
Signature 1 is from 0232AF901C31A04EE9848595AF9BB7620D4C5B2E
Signature 2 is from 14C131DFC5C6F93646BE72FA1401C02A8DF2E8B4
Signature 3 is from 23D15D965BC35114467363C165C4F724B64B4F66
...
**When unavailable (minutes 0-55 of the hour)**
::
% python demo.py
Traceback (most recent call last):
File "demo.py", line 3, in
detached_sigs = stem.descriptor.remote.get_detached_signatures().run()[0]
File "/home/atagar/Desktop/stem/stem/descriptor/remote.py", line 533, in run
return list(self._run(suppress))
File "/home/atagar/Desktop/stem/stem/descriptor/remote.py", line 544, in _run
raise self.error
stem.DownloadFailed: Failed to download from http://154.35.175.225:80/tor/status-vote/next/consensus-signatures (HTTPError): Not found
.. versionadded:: 1.8.0
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the detached
signatures
"""
return self.query('/tor/status-vote/next/consensus-signatures', **query_args)
def query(self, resource, **query_args):
"""
Issues a request for the given resource.
.. versionchanged:: 1.7.0
The **fall_back_to_authority** default when using this method is now
**False**, like the :class:`~stem.descriptor.Query` class.
:param str resource: resource being fetched, such as '/tor/server/all'
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the descriptors
:raises: **ValueError** if resource is clearly invalid or the descriptor
type can't be determined when 'descriptor_type' is **None**
"""
args = dict(self._default_args)
args.update(query_args)
if 'endpoints' not in args:
args['endpoints'] = self._endpoints
return Query(resource, **args)
def _download_from_orport(endpoint, compression, resource):
"""
Downloads descriptors from the given orport. Payload is just like an http
response (headers and all)...
::
HTTP/1.0 200 OK
Date: Mon, 23 Apr 2018 18:43:47 GMT
Content-Type: text/plain
X-Your-Address-Is: 216.161.254.25
Content-Encoding: identity
Expires: Wed, 25 Apr 2018 18:43:47 GMT
router dannenberg 193.23.244.244 443 0 80
identity-ed25519
... rest of the descriptor content...
:param stem.ORPort endpoint: endpoint to download from
:param list compression: compression methods for the request
:param str resource: descriptor resource to download
:returns: two value tuple of the form (data, reply_headers)
:raises:
* :class:`stem.ProtocolError` if not a valid descriptor response
* :class:`stem.SocketError` if unable to establish a connection
"""
link_protocols = endpoint.link_protocols if endpoint.link_protocols else [3]
with stem.client.Relay.connect(endpoint.address, endpoint.port, link_protocols) as relay:
with relay.create_circuit() as circ:
request = '\r\n'.join((
'GET %s HTTP/1.0' % resource,
'Accept-Encoding: %s' % ', '.join(map(lambda c: c.encoding, compression)),
'User-Agent: %s' % stem.USER_AGENT,
)) + '\r\n\r\n'
response = circ.directory(request, stream_id = 1)
first_line, data = response.split(b'\r\n', 1)
header_data, body_data = data.split(b'\r\n\r\n', 1)
if not first_line.startswith(b'HTTP/1.0 2'):
raise stem.ProtocolError("Response should begin with HTTP success, but was '%s'" % str_tools._to_unicode(first_line))
headers = {}
for line in str_tools._to_unicode(header_data).splitlines():
if ': ' not in line:
raise stem.ProtocolError("'%s' is not a HTTP header:\n\n%s" % line)
key, value = line.split(': ', 1)
headers[key] = value
return _decompress(body_data, headers.get('Content-Encoding')), headers
def _download_from_dirport(url, compression, timeout):
"""
Downloads descriptors from the given url.
:param str url: dirport url from which to download from
:param list compression: compression methods for the request
:param float timeout: duration before we'll time out our request
:returns: two value tuple of the form (data, reply_headers)
:raises:
* :class:`~stem.DownloadTimeout` if our request timed out
* :class:`~stem.DownloadFailed` if our request fails
"""
try:
response = urllib.urlopen(
urllib.Request(
url,
headers = {
'Accept-Encoding': ', '.join(map(lambda c: c.encoding, compression)),
'User-Agent': stem.USER_AGENT,
}
),
timeout = timeout,
)
except socket.timeout as exc:
raise stem.DownloadTimeout(url, exc, sys.exc_info()[2], timeout)
except:
exc, stacktrace = sys.exc_info()[1:3]
raise stem.DownloadFailed(url, exc, stacktrace)
return _decompress(response.read(), response.headers.get('Content-Encoding')), response.headers
def _decompress(data, encoding):
"""
Decompresses descriptor data.
Tor doesn't include compression headers. As such when using gzip we
need to include '32' for automatic header detection...
https://stackoverflow.com/questions/3122145/zlib-error-error-3-while-decompressing-incorrect-header-check/22310760#22310760
... and with zstd we need to use the streaming API.
:param bytes data: data we received
:param str encoding: 'Content-Encoding' header of the response
:raises:
* **ValueError** if encoding is unrecognized
* **ImportError** if missing the decompression module
"""
if encoding == 'deflate':
return stem.descriptor.Compression.GZIP.decompress(data)
for compression in stem.descriptor.Compression:
if encoding == compression.encoding:
return compression.decompress(data)
raise ValueError("'%s' isn't a recognized type of encoding" % encoding)
def _guess_descriptor_type(resource):
# Attempts to determine the descriptor type based on the resource url. This
# raises a ValueError if the resource isn't recognized.
if resource.startswith('/tor/server/'):
return 'server-descriptor 1.0'
elif resource.startswith('/tor/extra/'):
return 'extra-info 1.0'
elif resource.startswith('/tor/micro/'):
return 'microdescriptor 1.0'
elif resource.startswith('/tor/keys/'):
return 'dir-key-certificate-3 1.0'
elif resource.startswith('/tor/status-vote/'):
# The following resource urls can be for the present consensus
# (/tor/status-vote/current/*) or the next (/tor/status-vote/next/*).
if resource.endswith('/consensus') or resource.endswith('/authority'):
return 'network-status-consensus-3 1.0'
elif resource.endswith('/consensus-microdesc'):
return 'network-status-microdesc-consensus-3 1.0'
elif resource.endswith('/consensus-signatures'):
return '%s 1.0' % DETACHED_SIGNATURE_TYPE
elif stem.util.tor_tools.is_valid_fingerprint(resource.split('/')[-1]):
return 'network-status-consensus-3 1.0'
elif resource.endswith('/bandwidth'):
return 'bandwidth-file 1.0'
raise ValueError("Unable to determine the descriptor type for '%s'" % resource)
def get_authorities():
"""
Provides cached Tor directory authority information. The directory
information hardcoded into Tor and occasionally changes, so the information
this provides might not necessarily match your version of tor.
.. deprecated:: 1.7.0
Use stem.directory.Authority.from_cache() instead.
:returns: **dict** of **str** nicknames to :class:`~stem.directory.Authority` instances
"""
return DirectoryAuthority.from_cache()
# TODO: drop aliases in stem 2.0
Directory = stem.directory.Directory
DirectoryAuthority = stem.directory.Authority
FallbackDirectory = stem.directory.Fallback
stem-1.8.0/stem/descriptor/hidden_service.py 0000664 0001750 0001750 00000157657 13601502033 021703 0 ustar atagar atagar 0000000 0000000 # Copyright 2015-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for Tor hidden service descriptors as described in Tor's `version 2
`_ and
`version 3 `_
rend-spec.
Unlike other descriptor types these describe a hidden service rather than a
relay. They're created by the service, and can only be fetched via relays with
the HSDir flag.
These are only available through the Controller's
:func:`~stem.control.Controller.get_hidden_service_descriptor` method.
**Module Overview:**
::
BaseHiddenServiceDescriptor - Common parent for hidden service descriptors
|- HiddenServiceDescriptorV2 - Version 2 hidden service descriptor
+- HiddenServiceDescriptorV3 - Version 3 hidden service descriptor
|- address_from_identity_key - convert an identity key to address
|- identity_key_from_address - convert an address to identity key
+- decrypt - decrypt and parse encrypted layers
OuterLayer - First encrypted layer of a hidden service v3 descriptor
InnerLayer - Second encrypted layer of a hidden service v3 descriptor
.. versionadded:: 1.4.0
"""
import base64
import binascii
import collections
import datetime
import hashlib
import io
import os
import struct
import time
import stem.client.datatype
import stem.descriptor.certificate
import stem.prereq
import stem.util
import stem.util.connection
import stem.util.str_tools
import stem.util.tor_tools
from stem.client.datatype import CertType
from stem.descriptor.certificate import ExtensionType, Ed25519Extension, Ed25519Certificate, Ed25519CertificateV1
from stem.descriptor import (
PGP_BLOCK_END,
Descriptor,
_descriptor_content,
_descriptor_components,
_read_until_keywords,
_bytes_for_block,
_value,
_values,
_parse_simple_line,
_parse_if_present,
_parse_int_line,
_parse_timestamp_line,
_parse_key_block,
_random_date,
_random_crypto_blob,
)
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
try:
from cryptography.hazmat.backends.openssl.backend import backend
X25519_AVAILABLE = hasattr(backend, 'x25519_supported') and backend.x25519_supported()
except ImportError:
X25519_AVAILABLE = False
REQUIRED_V2_FIELDS = (
'rendezvous-service-descriptor',
'version',
'permanent-key',
'secret-id-part',
'publication-time',
'protocol-versions',
'signature',
)
REQUIRED_V3_FIELDS = (
'hs-descriptor',
'descriptor-lifetime',
'descriptor-signing-key-cert',
'revision-counter',
'superencrypted',
'signature',
)
INTRODUCTION_POINTS_ATTR = {
'identifier': None,
'address': None,
'port': None,
'onion_key': None,
'service_key': None,
'intro_authentication': [],
}
# introduction-point fields that can only appear once
SINGLE_INTRODUCTION_POINT_FIELDS = [
'introduction-point',
'ip-address',
'onion-port',
'onion-key',
'service-key',
]
BASIC_AUTH = 1
STEALTH_AUTH = 2
CHECKSUM_CONSTANT = b'.onion checksum'
SALT_LEN = 16
MAC_LEN = 32
S_KEY_LEN = 32
S_IV_LEN = 16
class DecryptionFailure(Exception):
"""
Failure to decrypt the hidden service descriptor's introduction-points.
"""
# TODO: rename in stem 2.x (add 'V2' and drop plural)
class IntroductionPoints(collections.namedtuple('IntroductionPoints', INTRODUCTION_POINTS_ATTR.keys())):
"""
Introduction point for a v2 hidden service.
:var str identifier: hash of this introduction point's identity key
:var str address: address of this introduction point
:var int port: port where this introduction point is listening
:var str onion_key: public key for communicating with this introduction point
:var str service_key: public key for communicating with this hidden service
:var list intro_authentication: tuples of the form (auth_type, auth_data) for
establishing a connection
"""
class IntroductionPointV3(collections.namedtuple('IntroductionPointV3', ['link_specifiers', 'onion_key_raw', 'auth_key_cert', 'enc_key_raw', 'enc_key_cert', 'legacy_key_raw', 'legacy_key_cert'])):
"""
Introduction point for a v3 hidden service.
.. versionadded:: 1.8.0
:var list link_specifiers: :class:`~stem.client.datatype.LinkSpecifier` where this service is reachable
:var unicode onion_key_raw: base64 ntor introduction point public key
:var stem.descriptor.certificate.Ed25519Certificate auth_key_cert: cross-certifier of the signing key with the auth key
:var unicode enc_key_raw: base64 introduction request encryption key
:var stem.descriptor.certificate.Ed25519Certificate enc_key_cert: cross-certifier of the signing key by the encryption key
:var str legacy_key_raw: base64 legacy introduction point RSA public key
:var str legacy_key_cert: base64 cross-certifier of the signing key by the legacy key
"""
@staticmethod
def parse(content):
"""
Parses an introduction point from its descriptor content.
:param str content: descriptor content to parse
:returns: :class:`~stem.descriptor.hidden_service.IntroductionPointV3` for the descriptor content
:raises: **ValueError** if descriptor content is malformed
"""
entry = _descriptor_components(content, False)
link_specifiers = IntroductionPointV3._parse_link_specifiers(_value('introduction-point', entry))
onion_key_line = _value('onion-key', entry)
onion_key = onion_key_line[5:] if onion_key_line.startswith('ntor ') else None
_, block_type, auth_key_cert = entry['auth-key'][0]
auth_key_cert = Ed25519Certificate.from_base64(auth_key_cert)
if block_type != 'ED25519 CERT':
raise ValueError('Expected auth-key to have an ed25519 certificate, but was %s' % block_type)
enc_key_line = _value('enc-key', entry)
enc_key = enc_key_line[5:] if enc_key_line.startswith('ntor ') else None
_, block_type, enc_key_cert = entry['enc-key-cert'][0]
enc_key_cert = Ed25519Certificate.from_base64(enc_key_cert)
if block_type != 'ED25519 CERT':
raise ValueError('Expected enc-key-cert to have an ed25519 certificate, but was %s' % block_type)
legacy_key = entry['legacy-key'][0][2] if 'legacy-key' in entry else None
legacy_key_cert = entry['legacy-key-cert'][0][2] if 'legacy-key-cert' in entry else None
return IntroductionPointV3(link_specifiers, onion_key, auth_key_cert, enc_key, enc_key_cert, legacy_key, legacy_key_cert)
@staticmethod
def create_for_address(address, port, expiration = None, onion_key = None, enc_key = None, auth_key = None, signing_key = None):
"""
Simplified constructor for a single address/port link specifier.
:param str address: IPv4 or IPv6 address where the service is reachable
:param int port: port where the service is reachable
:param datetime.datetime expiration: when certificates should expire
:param str onion_key: encoded, X25519PublicKey, or X25519PrivateKey onion key
:param str enc_key: encoded, X25519PublicKey, or X25519PrivateKey encryption key
:param str auth_key: encoded, Ed25519PublicKey, or Ed25519PrivateKey authentication key
:param cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey signing_key: service signing key
:returns: :class:`~stem.descriptor.hidden_service.IntroductionPointV3` with these attributes
:raises: **ValueError** if the address, port, or keys are malformed
"""
if not stem.prereq.is_crypto_available(ed25519 = True):
raise ImportError('Introduction point creation requires the cryptography module ed25519 support')
elif not stem.util.connection.is_valid_port(port):
raise ValueError("'%s' is an invalid port" % port)
if stem.util.connection.is_valid_ipv4_address(address):
link_specifiers = [stem.client.datatype.LinkByIPv4(address, port)]
elif stem.util.connection.is_valid_ipv6_address(address):
link_specifiers = [stem.client.datatype.LinkByIPv6(address, port)]
else:
raise ValueError("'%s' is not a valid IPv4 or IPv6 address" % address)
return IntroductionPointV3.create_for_link_specifiers(link_specifiers, expiration = None, onion_key = None, enc_key = None, auth_key = None, signing_key = None)
@staticmethod
def create_for_link_specifiers(link_specifiers, expiration = None, onion_key = None, enc_key = None, auth_key = None, signing_key = None):
"""
Simplified constructor. For more sophisticated use cases you can use this
as a template for how introduction points are properly created.
:param list link_specifiers: series of stem.client.datatype.LinkSpecifier where the service is reachable
:param datetime.datetime expiration: when certificates should expire
:param str onion_key: encoded, X25519PublicKey, or X25519PrivateKey onion key
:param str enc_key: encoded, X25519PublicKey, or X25519PrivateKey encryption key
:param str auth_key: encoded, Ed25519PublicKey, or Ed25519PrivateKey authentication key
:param cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey signing_key: service signing key
:returns: :class:`~stem.descriptor.hidden_service.IntroductionPointV3` with these attributes
:raises: **ValueError** if the address, port, or keys are malformed
"""
if not stem.prereq.is_crypto_available(ed25519 = True):
raise ImportError('Introduction point creation requires the cryptography module ed25519 support')
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey
if expiration is None:
expiration = datetime.datetime.utcnow() + datetime.timedelta(hours = stem.descriptor.certificate.DEFAULT_EXPIRATION_HOURS)
onion_key = stem.util.str_tools._to_unicode(base64.b64encode(stem.util._pubkey_bytes(onion_key if onion_key else X25519PrivateKey.generate())))
enc_key = stem.util.str_tools._to_unicode(base64.b64encode(stem.util._pubkey_bytes(enc_key if enc_key else X25519PrivateKey.generate())))
auth_key = stem.util._pubkey_bytes(auth_key if auth_key else Ed25519PrivateKey.generate())
signing_key = signing_key if signing_key else Ed25519PrivateKey.generate()
extensions = [Ed25519Extension(ExtensionType.HAS_SIGNING_KEY, None, stem.util._pubkey_bytes(signing_key))]
auth_key_cert = Ed25519CertificateV1(CertType.HS_V3_INTRO_AUTH, expiration, 1, auth_key, extensions, signing_key = signing_key)
enc_key_cert = Ed25519CertificateV1(CertType.HS_V3_NTOR_ENC, expiration, 1, auth_key, extensions, signing_key = signing_key)
return IntroductionPointV3(link_specifiers, onion_key, auth_key_cert, enc_key, enc_key_cert, None, None)
def encode(self):
"""
Descriptor representation of this introduction point.
:returns: **str** for our descriptor representation
"""
lines = []
link_count = stem.client.datatype.Size.CHAR.pack(len(self.link_specifiers))
link_specifiers = link_count + b''.join([l.pack() for l in self.link_specifiers])
lines.append('introduction-point %s' % stem.util.str_tools._to_unicode(base64.b64encode(link_specifiers)))
lines.append('onion-key ntor %s' % self.onion_key_raw)
lines.append('auth-key\n' + self.auth_key_cert.to_base64(pem = True))
if self.enc_key_raw:
lines.append('enc-key ntor %s' % self.enc_key_raw)
lines.append('enc-key-cert\n' + self.enc_key_cert.to_base64(pem = True))
if self.legacy_key_raw:
lines.append('legacy-key\n' + self.legacy_key_raw)
if self.legacy_key_cert:
lines.append('legacy-key-cert\n' + self.legacy_key_cert)
return '\n'.join(lines)
def onion_key(self):
"""
Provides our ntor introduction point public key.
:returns: ntor :class:`~cryptography.hazmat.primitives.asymmetric.x25519.X25519PublicKey`
:raises:
* **ImportError** if required the cryptography module is unavailable
* **EnvironmentError** if OpenSSL x25519 unsupported
"""
return IntroductionPointV3._key_as(self.onion_key_raw, x25519 = True)
def auth_key(self):
"""
Provides our authentication certificate's public key.
:returns: :class:`~cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey`
:raises:
* **ImportError** if required the cryptography module is unavailable
* **EnvironmentError** if OpenSSL x25519 unsupported
"""
return IntroductionPointV3._key_as(self.auth_key_cert.key, ed25519 = True)
def enc_key(self):
"""
Provides our encryption key.
:returns: encryption :class:`~cryptography.hazmat.primitives.asymmetric.x25519.X25519PublicKey`
:raises:
* **ImportError** if required the cryptography module is unavailable
* **EnvironmentError** if OpenSSL x25519 unsupported
"""
return IntroductionPointV3._key_as(self.enc_key_raw, x25519 = True)
def legacy_key(self):
"""
Provides our legacy introduction point public key.
:returns: legacy :class:`~cryptography.hazmat.primitives.asymmetric.x25519.X25519PublicKey`
:raises:
* **ImportError** if required the cryptography module is unavailable
* **EnvironmentError** if OpenSSL x25519 unsupported
"""
return IntroductionPointV3._key_as(self.legacy_key_raw, x25519 = True)
@staticmethod
def _key_as(value, x25519 = False, ed25519 = False):
if value is None or (not x25519 and not ed25519):
return value
elif not stem.prereq.is_crypto_available():
raise ImportError('cryptography module unavailable')
if x25519:
if not X25519_AVAILABLE:
# without this the cryptography raises...
# cryptography.exceptions.UnsupportedAlgorithm: X25519 is not supported by this version of OpenSSL.
raise EnvironmentError('OpenSSL x25519 unsupported')
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PublicKey
return X25519PublicKey.from_public_bytes(base64.b64decode(value))
if ed25519:
if not stem.prereq.is_crypto_available(ed25519 = True):
raise EnvironmentError('cryptography ed25519 unsupported')
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PublicKey
return Ed25519PublicKey.from_public_bytes(value)
@staticmethod
def _parse_link_specifiers(content):
try:
content = base64.b64decode(content)
except Exception as exc:
raise ValueError('Unable to base64 decode introduction point (%s): %s' % (exc, content))
link_specifiers = []
count, content = stem.client.datatype.Size.CHAR.pop(content)
for i in range(count):
link_specifier, content = stem.client.datatype.LinkSpecifier.pop(content)
link_specifiers.append(link_specifier)
if content:
raise ValueError('Introduction point had excessive data (%s)' % content)
return link_specifiers
def __hash__(self):
if not hasattr(self, '_hash'):
self._hash = hash(self.encode())
return self._hash
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, IntroductionPointV3) else False
def __ne__(self, other):
return not self == other
class AuthorizedClient(object):
"""
Client authorized to use a v3 hidden service.
.. versionadded:: 1.8.0
:var str id: base64 encoded client id
:var str iv: base64 encoded randomized initialization vector
:var str cookie: base64 encoded authentication cookie
"""
def __init__(self, id = None, iv = None, cookie = None):
self.id = stem.util.str_tools._to_unicode(id if id else base64.b64encode(os.urandom(8)).rstrip(b'='))
self.iv = stem.util.str_tools._to_unicode(iv if iv else base64.b64encode(os.urandom(16)).rstrip(b'='))
self.cookie = stem.util.str_tools._to_unicode(cookie if cookie else base64.b64encode(os.urandom(16)).rstrip(b'='))
def __hash__(self):
return stem.util._hash_attr(self, 'id', 'iv', 'cookie', cache = True)
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, AuthorizedClient) else False
def __ne__(self, other):
return not self == other
def _parse_file(descriptor_file, desc_type = None, validate = False, **kwargs):
"""
Iterates over the hidden service descriptors in a file.
:param file descriptor_file: file with descriptor content
:param class desc_type: BaseHiddenServiceDescriptor subclass
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param dict kwargs: additional arguments for the descriptor constructor
:returns: iterator for :class:`~stem.descriptor.hidden_service.HiddenServiceDescriptorV2`
instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is **True**
* **IOError** if the file can't be read
"""
if desc_type is None:
desc_type = HiddenServiceDescriptorV2
# Hidden service v3 ends with a signature line, whereas v2 has a pgp style
# block following it.
while True:
descriptor_content = _read_until_keywords('signature', descriptor_file, True)
if desc_type == HiddenServiceDescriptorV2:
block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0]
descriptor_content += _read_until_keywords(block_end_prefix, descriptor_file, True)
if descriptor_content:
if descriptor_content[0].startswith(b'@type'):
descriptor_content = descriptor_content[1:]
yield desc_type(bytes.join(b'', descriptor_content), validate, **kwargs)
else:
break # done parsing file
def _decrypt_layer(encrypted_block, constant, revision_counter, subcredential, blinded_key):
if encrypted_block.startswith('-----BEGIN MESSAGE-----\n') and encrypted_block.endswith('\n-----END MESSAGE-----'):
encrypted_block = encrypted_block[24:-22]
try:
encrypted = base64.b64decode(encrypted_block)
except:
raise ValueError('Unable to decode encrypted block as base64')
if len(encrypted) < SALT_LEN + MAC_LEN:
raise ValueError('Encrypted block malformed (only %i bytes)' % len(encrypted))
salt = encrypted[:SALT_LEN]
ciphertext = encrypted[SALT_LEN:-MAC_LEN]
expected_mac = encrypted[-MAC_LEN:]
cipher, mac_for = _layer_cipher(constant, revision_counter, subcredential, blinded_key, salt)
if expected_mac != mac_for(ciphertext):
raise ValueError('Malformed mac (expected %s, but was %s)' % (expected_mac, mac_for(ciphertext)))
decryptor = cipher.decryptor()
plaintext = decryptor.update(ciphertext) + decryptor.finalize()
return stem.util.str_tools._to_unicode(plaintext)
def _encrypt_layer(plaintext, constant, revision_counter, subcredential, blinded_key):
salt = os.urandom(16)
cipher, mac_for = _layer_cipher(constant, revision_counter, subcredential, blinded_key, salt)
encryptor = cipher.encryptor()
ciphertext = encryptor.update(plaintext) + encryptor.finalize()
encoded = base64.b64encode(salt + ciphertext + mac_for(ciphertext))
return b'-----BEGIN MESSAGE-----\n%s\n-----END MESSAGE-----' % b'\n'.join(stem.util.str_tools._split_by_length(encoded, 64))
def _layer_cipher(constant, revision_counter, subcredential, blinded_key, salt):
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
kdf = hashlib.shake_256(blinded_key + subcredential + struct.pack('>Q', revision_counter) + salt + constant)
keys = kdf.digest(S_KEY_LEN + S_IV_LEN + MAC_LEN)
secret_key = keys[:S_KEY_LEN]
secret_iv = keys[S_KEY_LEN:S_KEY_LEN + S_IV_LEN]
mac_key = keys[S_KEY_LEN + S_IV_LEN:]
cipher = Cipher(algorithms.AES(secret_key), modes.CTR(secret_iv), default_backend())
mac_prefix = struct.pack('>Q', len(mac_key)) + mac_key + struct.pack('>Q', len(salt)) + salt
return cipher, lambda ciphertext: hashlib.sha3_256(mac_prefix + ciphertext).digest()
def _parse_protocol_versions_line(descriptor, entries):
value = _value('protocol-versions', entries)
try:
versions = [int(entry) for entry in value.split(',')]
except ValueError:
raise ValueError('protocol-versions line has non-numeric versoins: protocol-versions %s' % value)
for v in versions:
if v <= 0:
raise ValueError('protocol-versions must be positive integers: %s' % value)
descriptor.protocol_versions = versions
def _parse_introduction_points_line(descriptor, entries):
_, block_type, block_contents = entries['introduction-points'][0]
if not block_contents or block_type != 'MESSAGE':
raise ValueError("'introduction-points' should be followed by a MESSAGE block, but was a %s" % block_type)
descriptor.introduction_points_encoded = block_contents
descriptor.introduction_points_auth = [] # field was never implemented in tor (#15190)
try:
descriptor.introduction_points_content = _bytes_for_block(block_contents)
except TypeError:
raise ValueError("'introduction-points' isn't base64 encoded content:\n%s" % block_contents)
def _parse_v3_outer_clients(descriptor, entries):
# "auth-client" client-id iv encrypted-cookie
clients = {}
for value in _values('auth-client', entries):
value_comp = value.split()
if len(value_comp) < 3:
raise ValueError('auth-client should have a client-id, iv, and cookie: auth-client %s' % value)
clients[value_comp[0]] = AuthorizedClient(value_comp[0], value_comp[1], value_comp[2])
descriptor.clients = clients
def _parse_v3_inner_formats(descriptor, entries):
value, formats = _value('create2-formats', entries), []
for entry in value.split(' '):
if not entry.isdigit():
raise ValueError("create2-formats should only contain integers, but was '%s'" % value)
formats.append(int(entry))
descriptor.formats = formats
def _parse_v3_introduction_points(descriptor, entries):
if hasattr(descriptor, '_unparsed_introduction_points'):
introduction_points = []
remaining = descriptor._unparsed_introduction_points
while remaining:
div = remaining.find(b'\nintroduction-point ', 10)
content, remaining = (remaining[:div], remaining[div + 1:]) if div != -1 else (remaining, '')
introduction_points.append(IntroductionPointV3.parse(content))
descriptor.introduction_points = introduction_points
del descriptor._unparsed_introduction_points
_parse_v2_version_line = _parse_int_line('version', 'version', allow_negative = False)
_parse_rendezvous_service_descriptor_line = _parse_simple_line('rendezvous-service-descriptor', 'descriptor_id')
_parse_permanent_key_line = _parse_key_block('permanent-key', 'permanent_key', 'RSA PUBLIC KEY')
_parse_secret_id_part_line = _parse_simple_line('secret-id-part', 'secret_id_part')
_parse_publication_time_line = _parse_timestamp_line('publication-time', 'published')
_parse_v2_signature_line = _parse_key_block('signature', 'signature', 'SIGNATURE')
_parse_v3_version_line = _parse_int_line('hs-descriptor', 'version', allow_negative = False)
_parse_lifetime_line = _parse_int_line('descriptor-lifetime', 'lifetime', allow_negative = False)
_parse_signing_cert = Ed25519Certificate._from_descriptor('descriptor-signing-key-cert', 'signing_cert')
_parse_revision_counter_line = _parse_int_line('revision-counter', 'revision_counter', allow_negative = False)
_parse_superencrypted_line = _parse_key_block('superencrypted', 'superencrypted', 'MESSAGE')
_parse_v3_signature_line = _parse_simple_line('signature', 'signature')
_parse_v3_outer_auth_type = _parse_simple_line('desc-auth-type', 'auth_type')
_parse_v3_outer_ephemeral_key = _parse_simple_line('desc-auth-ephemeral-key', 'ephemeral_key')
_parse_v3_outer_encrypted = _parse_key_block('encrypted', 'encrypted', 'MESSAGE')
_parse_v3_inner_intro_auth = _parse_simple_line('intro-auth-required', 'intro_auth', func = lambda v: v.split(' '))
_parse_v3_inner_single_service = _parse_if_present('single-onion-service', 'is_single_service')
class BaseHiddenServiceDescriptor(Descriptor):
"""
Hidden service descriptor.
.. versionadded:: 1.8.0
"""
# TODO: rename this class to HiddenServiceDescriptor in stem 2.x
class HiddenServiceDescriptorV2(BaseHiddenServiceDescriptor):
"""
Version 2 hidden service descriptor.
:var str descriptor_id: **\\*** identifier for this descriptor, this is a base32 hash of several fields
:var int version: **\\*** hidden service descriptor version
:var str permanent_key: **\\*** long term key of the hidden service
:var str secret_id_part: **\\*** hash of the time period, cookie, and replica
values so our descriptor_id can be validated
:var datetime published: **\\*** time in UTC when this descriptor was made
:var list protocol_versions: **\\*** list of **int** versions that are supported when establishing a connection
:var str introduction_points_encoded: raw introduction points blob
:var list introduction_points_auth: **\\*** tuples of the form
(auth_method, auth_data) for our introduction_points_content
(**deprecated**, always **[]**)
:var bytes introduction_points_content: decoded introduction-points content
without authentication data, if using cookie authentication this is
encrypted
:var str signature: signature of the descriptor content
**\\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
.. versionchanged:: 1.6.0
Moved from the deprecated `pycrypto
`_ module to `cryptography
`_ for validating signatures.
.. versionchanged:: 1.6.0
Added the **skip_crypto_validation** constructor argument.
"""
TYPE_ANNOTATION_NAME = 'hidden-service-descriptor'
ATTRIBUTES = {
'descriptor_id': (None, _parse_rendezvous_service_descriptor_line),
'version': (None, _parse_v2_version_line),
'permanent_key': (None, _parse_permanent_key_line),
'secret_id_part': (None, _parse_secret_id_part_line),
'published': (None, _parse_publication_time_line),
'protocol_versions': ([], _parse_protocol_versions_line),
'introduction_points_encoded': (None, _parse_introduction_points_line),
'introduction_points_auth': ([], _parse_introduction_points_line),
'introduction_points_content': (None, _parse_introduction_points_line),
'signature': (None, _parse_v2_signature_line),
}
PARSER_FOR_LINE = {
'rendezvous-service-descriptor': _parse_rendezvous_service_descriptor_line,
'version': _parse_v2_version_line,
'permanent-key': _parse_permanent_key_line,
'secret-id-part': _parse_secret_id_part_line,
'publication-time': _parse_publication_time_line,
'protocol-versions': _parse_protocol_versions_line,
'introduction-points': _parse_introduction_points_line,
'signature': _parse_v2_signature_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('rendezvous-service-descriptor', 'y3olqqblqw2gbh6phimfuiroechjjafa'),
('version', '2'),
('permanent-key', _random_crypto_blob('RSA PUBLIC KEY')),
('secret-id-part', 'e24kgecavwsznj7gpbktqsiwgvngsf4e'),
('publication-time', _random_date()),
('protocol-versions', '2,3'),
('introduction-points', '\n-----BEGIN MESSAGE-----\n-----END MESSAGE-----'),
), (
('signature', _random_crypto_blob('SIGNATURE')),
))
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False):
return cls(cls.content(attr, exclude, sign), validate = validate, skip_crypto_validation = not sign)
def __init__(self, raw_contents, validate = False, skip_crypto_validation = False):
super(HiddenServiceDescriptorV2, self).__init__(raw_contents, lazy_load = not validate)
entries = _descriptor_components(raw_contents, validate, non_ascii_fields = ('introduction-points'))
if validate:
for keyword in REQUIRED_V2_FIELDS:
if keyword not in entries:
raise ValueError("Hidden service descriptor must have a '%s' entry" % keyword)
elif keyword in entries and len(entries[keyword]) > 1:
raise ValueError("The '%s' entry can only appear once in a hidden service descriptor" % keyword)
if 'rendezvous-service-descriptor' != list(entries.keys())[0]:
raise ValueError("Hidden service descriptor must start with a 'rendezvous-service-descriptor' entry")
elif 'signature' != list(entries.keys())[-1]:
raise ValueError("Hidden service descriptor must end with a 'signature' entry")
self._parse(entries, validate)
if not skip_crypto_validation and stem.prereq.is_crypto_available():
signed_digest = self._digest_for_signature(self.permanent_key, self.signature)
digest_content = self._content_range('rendezvous-service-descriptor ', '\nsignature\n')
content_digest = hashlib.sha1(digest_content).hexdigest().upper()
if signed_digest != content_digest:
raise ValueError('Decrypted digest does not match local digest (calculated: %s, local: %s)' % (signed_digest, content_digest))
else:
self._entries = entries
@lru_cache()
def introduction_points(self, authentication_cookie = None):
"""
Provided this service's introduction points.
:returns: **list** of :class:`~stem.descriptor.hidden_service.IntroductionPoints`
:raises:
* **ValueError** if the our introduction-points is malformed
* **DecryptionFailure** if unable to decrypt this field
"""
content = self.introduction_points_content
if not content:
return []
elif authentication_cookie:
if not stem.prereq.is_crypto_available():
raise DecryptionFailure('Decrypting introduction-points requires the cryptography module')
try:
authentication_cookie = stem.util.str_tools._decode_b64(authentication_cookie)
except TypeError as exc:
raise DecryptionFailure('authentication_cookie must be a base64 encoded string (%s)' % exc)
authentication_type = int(binascii.hexlify(content[0:1]), 16)
if authentication_type == BASIC_AUTH:
content = HiddenServiceDescriptorV2._decrypt_basic_auth(content, authentication_cookie)
elif authentication_type == STEALTH_AUTH:
content = HiddenServiceDescriptorV2._decrypt_stealth_auth(content, authentication_cookie)
else:
raise DecryptionFailure("Unrecognized authentication type '%s', currently we only support basic auth (%s) and stealth auth (%s)" % (authentication_type, BASIC_AUTH, STEALTH_AUTH))
if not content.startswith(b'introduction-point '):
raise DecryptionFailure('Unable to decrypt the introduction-points, maybe this is the wrong key?')
elif not content.startswith(b'introduction-point '):
raise DecryptionFailure('introduction-points content is encrypted, you need to provide its authentication_cookie')
return HiddenServiceDescriptorV2._parse_introduction_points(content)
@staticmethod
def _decrypt_basic_auth(content, authentication_cookie):
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
try:
client_blocks = int(binascii.hexlify(content[1:2]), 16)
except ValueError:
raise DecryptionFailure("When using basic auth the content should start with a number of blocks but wasn't a hex digit: %s" % binascii.hexlify(content[1:2]))
# parse the client id and encrypted session keys
client_entries_length = client_blocks * 16 * 20
client_entries = content[2:2 + client_entries_length]
client_keys = [(client_entries[i:i + 4], client_entries[i + 4:i + 20]) for i in range(0, client_entries_length, 4 + 16)]
iv = content[2 + client_entries_length:2 + client_entries_length + 16]
encrypted = content[2 + client_entries_length + 16:]
client_id = hashlib.sha1(authentication_cookie + iv).digest()[:4]
for entry_id, encrypted_session_key in client_keys:
if entry_id != client_id:
continue # not the session key for this client
# try decrypting the session key
cipher = Cipher(algorithms.AES(authentication_cookie), modes.CTR(b'\x00' * len(iv)), default_backend())
decryptor = cipher.decryptor()
session_key = decryptor.update(encrypted_session_key) + decryptor.finalize()
# attempt to decrypt the intro points with the session key
cipher = Cipher(algorithms.AES(session_key), modes.CTR(iv), default_backend())
decryptor = cipher.decryptor()
decrypted = decryptor.update(encrypted) + decryptor.finalize()
# check if the decryption looks correct
if decrypted.startswith(b'introduction-point '):
return decrypted
return content # nope, unable to decrypt the content
@staticmethod
def _decrypt_stealth_auth(content, authentication_cookie):
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
# byte 1 = authentication type, 2-17 = input vector, 18 on = encrypted content
iv, encrypted = content[1:17], content[17:]
cipher = Cipher(algorithms.AES(authentication_cookie), modes.CTR(iv), default_backend())
decryptor = cipher.decryptor()
return decryptor.update(encrypted) + decryptor.finalize()
@staticmethod
def _parse_introduction_points(content):
"""
Provides the parsed list of IntroductionPoints for the unencrypted content.
"""
introduction_points = []
content_io = io.BytesIO(content)
while True:
content = b''.join(_read_until_keywords('introduction-point', content_io, ignore_first = True))
if not content:
break # reached the end
attr = dict(INTRODUCTION_POINTS_ATTR)
entries = _descriptor_components(content, False)
for keyword, values in list(entries.items()):
value, block_type, block_contents = values[0]
if keyword in SINGLE_INTRODUCTION_POINT_FIELDS and len(values) > 1:
raise ValueError("'%s' can only appear once in an introduction-point block, but appeared %i times" % (keyword, len(values)))
if keyword == 'introduction-point':
attr['identifier'] = value
elif keyword == 'ip-address':
if not stem.util.connection.is_valid_ipv4_address(value):
raise ValueError("'%s' is an invalid IPv4 address" % value)
attr['address'] = value
elif keyword == 'onion-port':
if not stem.util.connection.is_valid_port(value):
raise ValueError("'%s' is an invalid port" % value)
attr['port'] = int(value)
elif keyword == 'onion-key':
attr['onion_key'] = block_contents
elif keyword == 'service-key':
attr['service_key'] = block_contents
elif keyword == 'intro-authentication':
auth_entries = []
for auth_value, _, _ in values:
if ' ' not in auth_value:
raise ValueError("We expected 'intro-authentication [auth_type] [auth_data]', but had '%s'" % auth_value)
auth_type, auth_data = auth_value.split(' ')[:2]
auth_entries.append((auth_type, auth_data))
introduction_points.append(IntroductionPoints(**attr))
return introduction_points
class HiddenServiceDescriptorV3(BaseHiddenServiceDescriptor):
"""
Version 3 hidden service descriptor.
:var int version: **\\*** hidden service descriptor version
:var int lifetime: **\\*** minutes after publication this descriptor is valid
:var stem.descriptor.certificate.Ed25519Certificate signing_cert: **\\*** cross-certifier for the short-term descriptor signing key
:var int revision_counter: **\\*** descriptor revision number
:var str superencrypted: **\\*** encrypted HS-DESC-ENC payload
:var str signature: **\\*** signature of this descriptor
**\\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
.. versionadded:: 1.8.0
"""
# TODO: requested this @type on https://trac.torproject.org/projects/tor/ticket/31481
TYPE_ANNOTATION_NAME = 'hidden-service-descriptor-3'
ATTRIBUTES = {
'version': (None, _parse_v3_version_line),
'lifetime': (None, _parse_lifetime_line),
'signing_cert': (None, _parse_signing_cert),
'revision_counter': (None, _parse_revision_counter_line),
'superencrypted': (None, _parse_superencrypted_line),
'signature': (None, _parse_v3_signature_line),
}
PARSER_FOR_LINE = {
'hs-descriptor': _parse_v3_version_line,
'descriptor-lifetime': _parse_lifetime_line,
'descriptor-signing-key-cert': _parse_signing_cert,
'revision-counter': _parse_revision_counter_line,
'superencrypted': _parse_superencrypted_line,
'signature': _parse_v3_signature_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False, inner_layer = None, outer_layer = None, identity_key = None, signing_key = None, signing_cert = None, revision_counter = None, blinding_nonce = None):
"""
Hidden service v3 descriptors consist of three parts:
* InnerLayer, which most notably contain introduction points where the
service can be reached.
* OuterLayer, which encrypts the InnerLayer among other paremters.
* HiddenServiceDescriptorV3, which contains the OuterLayer and plaintext
parameters.
Construction through this method can supply any or none of these, with
omitted parameters populated with randomized defaults.
Ed25519 key blinding adds an additional ~20 ms, and as such is disabled by
default. To blind with a random nonce simply call...
::
HiddenServiceDescriptorV3.create(blinding_nonce = os.urandom(32))
:param dict attr: keyword/value mappings to be included in plaintext descriptor
:param list exclude: mandatory keywords to exclude from the descriptor, this
results in an invalid descriptor
:param bool sign: includes cryptographic signatures and digests if True
:param stem.descriptor.hidden_service.InnerLayer inner_layer: inner
encrypted layer
:param stem.descriptor.hidden_service.OuterLayer outer_layer: outer
encrypted layer
:param cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey
identity_key: service identity key
:param cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey
signing_key: service signing key
:param stem.descriptor.Ed25519CertificateV1 signing_cert: certificate
signing this descriptor
:param int revision_counter: descriptor revision number
:param bytes blinding_nonce: 32 byte blinding factor to derive the blinding key
:returns: **str** with the content of a descriptor
:raises:
* **ValueError** if parameters are malformed
* **ImportError** if cryptography is unavailable
"""
if not stem.prereq.is_crypto_available(ed25519 = True):
raise ImportError('Hidden service descriptor creation requires cryptography version 2.6')
elif not stem.prereq._is_sha3_available():
raise ImportError('Hidden service descriptor creation requires python 3.6+ or the pysha3 module (https://pypi.org/project/pysha3/)')
elif blinding_nonce and len(blinding_nonce) != 32:
raise ValueError('Blinding nonce must be 32 bytes, but was %i' % len(blinding_nonce))
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
inner_layer = inner_layer if inner_layer else InnerLayer.create(exclude = exclude)
identity_key = identity_key if identity_key else Ed25519PrivateKey.generate()
signing_key = signing_key if signing_key else Ed25519PrivateKey.generate()
revision_counter = revision_counter if revision_counter else int(time.time())
blinded_key = _blinded_pubkey(identity_key, blinding_nonce) if blinding_nonce else b'a' * 32
subcredential = HiddenServiceDescriptorV3._subcredential(identity_key, blinded_key)
custom_sig = attr.pop('signature') if (attr and 'signature' in attr) else None
if not outer_layer:
outer_layer = OuterLayer.create(
exclude = exclude,
inner_layer = inner_layer,
revision_counter = revision_counter,
subcredential = subcredential,
blinded_key = blinded_key,
)
if not signing_cert:
extensions = [Ed25519Extension(ExtensionType.HAS_SIGNING_KEY, None, blinded_key)]
signing_cert = Ed25519CertificateV1(cert_type = CertType.HS_V3_DESC_SIGNING, key = signing_key, extensions = extensions)
signing_cert.signature = _blinded_sign(signing_cert.pack(), identity_key, blinded_key, blinding_nonce) if blinding_nonce else b'b' * 64
desc_content = _descriptor_content(attr, exclude, (
('hs-descriptor', '3'),
('descriptor-lifetime', '180'),
('descriptor-signing-key-cert', '\n' + signing_cert.to_base64(pem = True)),
('revision-counter', str(revision_counter)),
('superencrypted', b'\n' + outer_layer._encrypt(revision_counter, subcredential, blinded_key)),
), ()) + b'\n'
if custom_sig:
desc_content += b'signature %s' % stem.util.str_tools._to_bytes(custom_sig)
elif 'signature' not in exclude:
sig_content = stem.descriptor.certificate.SIG_PREFIX_HS_V3 + desc_content
desc_content += b'signature %s' % base64.b64encode(signing_key.sign(sig_content)).rstrip(b'=')
return desc_content
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, inner_layer = None, outer_layer = None, identity_key = None, signing_key = None, signing_cert = None, revision_counter = None, blinding_nonce = None):
return cls(cls.content(attr, exclude, sign, inner_layer, outer_layer, identity_key, signing_key, signing_cert, revision_counter, blinding_nonce), validate = validate)
def __init__(self, raw_contents, validate = False):
super(HiddenServiceDescriptorV3, self).__init__(raw_contents, lazy_load = not validate)
self._inner_layer = None
entries = _descriptor_components(raw_contents, validate)
if validate:
for keyword in REQUIRED_V3_FIELDS:
if keyword not in entries:
raise ValueError("Hidden service descriptor must have a '%s' entry" % keyword)
elif keyword in entries and len(entries[keyword]) > 1:
raise ValueError("The '%s' entry can only appear once in a hidden service descriptor" % keyword)
if 'hs-descriptor' != list(entries.keys())[0]:
raise ValueError("Hidden service descriptor must start with a 'hs-descriptor' entry")
elif 'signature' != list(entries.keys())[-1]:
raise ValueError("Hidden service descriptor must end with a 'signature' entry")
self._parse(entries, validate)
if self.signing_cert and stem.prereq.is_crypto_available(ed25519 = True):
self.signing_cert.validate(self)
else:
self._entries = entries
def decrypt(self, onion_address):
"""
Decrypt this descriptor. Hidden serice descriptors contain two encryption
layers (:class:`~stem.descriptor.hidden_service.OuterLayer` and
:class:`~stem.descriptor.hidden_service.InnerLayer`).
:param str onion_address: hidden service address this descriptor is from
:returns: :class:`~stem.descriptor.hidden_service.InnerLayer` with our
decrypted content
:raises:
* **ImportError** if required cryptography or sha3 module is unavailable
* **ValueError** if unable to decrypt or validation fails
"""
if not stem.prereq.is_crypto_available(ed25519 = True):
raise ImportError('Hidden service descriptor decryption requires cryptography version 2.6')
elif not stem.prereq._is_sha3_available():
raise ImportError('Hidden service descriptor decryption requires python 3.6+ or the pysha3 module (https://pypi.org/project/pysha3/)')
if self._inner_layer is None:
blinded_key = self.signing_cert.signing_key() if self.signing_cert else None
if not blinded_key:
raise ValueError('No signing key is present')
identity_public_key = HiddenServiceDescriptorV3.identity_key_from_address(onion_address)
subcredential = HiddenServiceDescriptorV3._subcredential(identity_public_key, blinded_key)
outer_layer = OuterLayer._decrypt(self.superencrypted, self.revision_counter, subcredential, blinded_key)
self._inner_layer = InnerLayer._decrypt(outer_layer, self.revision_counter, subcredential, blinded_key)
return self._inner_layer
@staticmethod
def address_from_identity_key(key, suffix = True):
"""
Converts a hidden service identity key into its address. This accepts all
key formats (private, public, or public bytes).
:param Ed25519PublicKey,Ed25519PrivateKey,bytes key: hidden service identity key
:param bool suffix: includes the '.onion' suffix if true, excluded otherwise
:returns: **unicode** hidden service address
:raises: **ImportError** if sha3 unsupported
"""
if not stem.prereq._is_sha3_available():
raise ImportError('Hidden service address conversion requires python 3.6+ or the pysha3 module (https://pypi.org/project/pysha3/)')
key = stem.util._pubkey_bytes(key) # normalize key into bytes
version = stem.client.datatype.Size.CHAR.pack(3)
checksum = hashlib.sha3_256(CHECKSUM_CONSTANT + key + version).digest()[:2]
onion_address = base64.b32encode(key + checksum + version)
return stem.util.str_tools._to_unicode(onion_address + b'.onion' if suffix else onion_address).lower()
@staticmethod
def identity_key_from_address(onion_address):
"""
Converts a hidden service address into its public identity key.
:param str onion_address: hidden service address
:returns: **bytes** for the hidden service's public identity key
:raises:
* **ImportError** if sha3 unsupported
* **ValueError** if address malformed or checksum is invalid
"""
if not stem.prereq._is_sha3_available():
raise ImportError('Hidden service address conversion requires python 3.6+ or the pysha3 module (https://pypi.org/project/pysha3/)')
if onion_address.endswith('.onion'):
onion_address = onion_address[:-6]
if not stem.util.tor_tools.is_valid_hidden_service_address(onion_address, version = 3):
raise ValueError("'%s.onion' isn't a valid hidden service v3 address" % onion_address)
# onion_address = base32(PUBKEY | CHECKSUM | VERSION) + '.onion'
# CHECKSUM = H('.onion checksum' | PUBKEY | VERSION)[:2]
decoded_address = base64.b32decode(onion_address.upper())
pubkey = decoded_address[:32]
expected_checksum = decoded_address[32:34]
version = decoded_address[34:35]
checksum = hashlib.sha3_256(CHECKSUM_CONSTANT + pubkey + version).digest()[:2]
if expected_checksum != checksum:
checksum_str = stem.util.str_tools._to_unicode(binascii.hexlify(checksum))
expected_checksum_str = stem.util.str_tools._to_unicode(binascii.hexlify(expected_checksum))
raise ValueError('Bad checksum (expected %s but was %s)' % (expected_checksum_str, checksum_str))
return pubkey
@staticmethod
def _subcredential(identity_key, blinded_key):
# credential = H('credential' | public-identity-key)
# subcredential = H('subcredential' | credential | blinded-public-key)
credential = hashlib.sha3_256(b'credential%s' % stem.util._pubkey_bytes(identity_key)).digest()
return hashlib.sha3_256(b'subcredential%s%s' % (credential, blinded_key)).digest()
class OuterLayer(Descriptor):
"""
Initial encryped layer of a hidden service v3 descriptor (`spec
`_).
.. versionadded:: 1.8.0
:var str auth_type: **\\*** encryption scheme used for descriptor authorization
:var str ephemeral_key: **\\*** base64 encoded x25519 public key
:var dict clients: **\\*** mapping of authorized client ids to their
:class:`~stem.descriptor.hidden_service.AuthorizedClient`
:var str encrypted: **\\*** encrypted descriptor inner layer
**\\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
ATTRIBUTES = {
'auth_type': (None, _parse_v3_outer_auth_type),
'ephemeral_key': (None, _parse_v3_outer_ephemeral_key),
'clients': ({}, _parse_v3_outer_clients),
'encrypted': (None, _parse_v3_outer_encrypted),
}
PARSER_FOR_LINE = {
'desc-auth-type': _parse_v3_outer_auth_type,
'desc-auth-ephemeral-key': _parse_v3_outer_ephemeral_key,
'auth-client': _parse_v3_outer_clients,
'encrypted': _parse_v3_outer_encrypted,
}
@staticmethod
def _decrypt(encrypted, revision_counter, subcredential, blinded_key):
plaintext = _decrypt_layer(encrypted, b'hsdir-superencrypted-data', revision_counter, subcredential, blinded_key)
return OuterLayer(plaintext)
def _encrypt(self, revision_counter, subcredential, blinded_key):
# Spec mandated padding: "Before encryption the plaintext is padded with
# NUL bytes to the nearest multiple of 10k bytes."
content = self.get_bytes() + b'\x00' * (len(self.get_bytes()) % 10000)
# encrypt back into a hidden service descriptor's 'superencrypted' field
return _encrypt_layer(content, b'hsdir-superencrypted-data', revision_counter, subcredential, blinded_key)
@classmethod
def content(cls, attr = None, exclude = (), validate = True, sign = False, inner_layer = None, revision_counter = None, authorized_clients = None, subcredential = None, blinded_key = None):
if not stem.prereq.is_crypto_available(ed25519 = True):
raise ImportError('Hidden service layer creation requires cryptography version 2.6')
elif not stem.prereq._is_sha3_available():
raise ImportError('Hidden service layer creation requires python 3.6+ or the pysha3 module (https://pypi.org/project/pysha3/)')
elif authorized_clients and 'auth-client' in attr:
raise ValueError('Authorized clients cannot be specified through both attr and authorized_clients')
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey
inner_layer = inner_layer if inner_layer else InnerLayer.create()
revision_counter = revision_counter if revision_counter else 1
blinded_key = blinded_key if blinded_key else stem.util._pubkey_bytes(Ed25519PrivateKey.generate())
subcredential = subcredential if subcredential else HiddenServiceDescriptorV3._subcredential(Ed25519PrivateKey.generate(), blinded_key)
if not authorized_clients:
authorized_clients = []
if attr and 'auth-client' in attr:
pass # caller is providing raw auth-client lines through the attr
else:
for i in range(16):
authorized_clients.append(AuthorizedClient())
return _descriptor_content(attr, exclude, [
('desc-auth-type', 'x25519'),
('desc-auth-ephemeral-key', base64.b64encode(stem.util._pubkey_bytes(X25519PrivateKey.generate()))),
] + [
('auth-client', '%s %s %s' % (c.id, c.iv, c.cookie)) for c in authorized_clients
], (
('encrypted', b'\n' + inner_layer._encrypt(revision_counter, subcredential, blinded_key)),
))
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, inner_layer = None, revision_counter = None, authorized_clients = None, subcredential = None, blinded_key = None):
return cls(cls.content(attr, exclude, validate, sign, inner_layer, revision_counter, authorized_clients, subcredential, blinded_key), validate = validate)
def __init__(self, content, validate = False):
content = stem.util.str_tools._to_bytes(content).rstrip(b'\x00') # strip null byte padding
super(OuterLayer, self).__init__(content, lazy_load = not validate)
entries = _descriptor_components(content, validate)
if validate:
self._parse(entries, validate)
else:
self._entries = entries
class InnerLayer(Descriptor):
"""
Second encryped layer of a hidden service v3 descriptor (`spec
`_).
.. versionadded:: 1.8.0
:var stem.descriptor.hidden_service.OuterLayer outer: enclosing encryption layer
:var list formats: **\\*** recognized CREATE2 cell formats
:var list intro_auth: **\\*** introduction-layer authentication types
:var bool is_single_service: **\\*** **True** if this is a `single onion service `_, **False** otherwise
:var list introduction_points: :class:`~stem.descriptor.hidden_service.IntroductionPointV3` where this service is reachable
**\\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
ATTRIBUTES = {
'formats': ([], _parse_v3_inner_formats),
'intro_auth': ([], _parse_v3_inner_intro_auth),
'is_single_service': (False, _parse_v3_inner_single_service),
'introduction_points': ([], _parse_v3_introduction_points),
}
PARSER_FOR_LINE = {
'create2-formats': _parse_v3_inner_formats,
'intro-auth-required': _parse_v3_inner_intro_auth,
'single-onion-service': _parse_v3_inner_single_service,
}
@staticmethod
def _decrypt(outer_layer, revision_counter, subcredential, blinded_key):
plaintext = _decrypt_layer(outer_layer.encrypted, b'hsdir-encrypted-data', revision_counter, subcredential, blinded_key)
return InnerLayer(plaintext, validate = True, outer_layer = outer_layer)
def _encrypt(self, revision_counter, subcredential, blinded_key):
# encrypt back into an outer layer's 'encrypted' field
return _encrypt_layer(self.get_bytes(), b'hsdir-encrypted-data', revision_counter, subcredential, blinded_key)
@classmethod
def content(cls, attr = None, exclude = (), sign = False, introduction_points = None):
if introduction_points:
suffix = '\n' + '\n'.join(map(IntroductionPointV3.encode, introduction_points))
else:
suffix = ''
return _descriptor_content(attr, exclude, (
('create2-formats', '2'),
)) + stem.util.str_tools._to_bytes(suffix)
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, introduction_points = None):
return cls(cls.content(attr, exclude, sign, introduction_points), validate = validate)
def __init__(self, content, validate = False, outer_layer = None):
super(InnerLayer, self).__init__(content, lazy_load = not validate)
self.outer = outer_layer
# inner layer begins with a few header fields, followed by any
# number of introduction-points
content = stem.util.str_tools._to_bytes(content)
div = content.find(b'\nintroduction-point ')
if div != -1:
self._unparsed_introduction_points = content[div + 1:]
content = content[:div]
else:
self._unparsed_introduction_points = None
entries = _descriptor_components(content, validate)
if validate:
self._parse(entries, validate)
_parse_v3_introduction_points(self, entries)
else:
self._entries = entries
def _blinded_pubkey(identity_key, blinding_nonce):
from stem.util import ed25519
mult = 2 ** (ed25519.b - 2) + sum(2 ** i * ed25519.bit(blinding_nonce, i) for i in range(3, ed25519.b - 2))
P = ed25519.decodepoint(stem.util._pubkey_bytes(identity_key))
return ed25519.encodepoint(ed25519.scalarmult(P, mult))
def _blinded_sign(msg, identity_key, blinded_key, blinding_nonce):
from cryptography.hazmat.primitives import serialization
from stem.util import ed25519
identity_key_bytes = identity_key.private_bytes(
encoding = serialization.Encoding.Raw,
format = serialization.PrivateFormat.Raw,
encryption_algorithm = serialization.NoEncryption(),
)
# pad private identity key into an ESK (encrypted secret key)
h = ed25519.H(identity_key_bytes)
a = 2 ** (ed25519.b - 2) + sum(2 ** i * ed25519.bit(h, i) for i in range(3, ed25519.b - 2))
k = b''.join([h[i:i + 1] for i in range(ed25519.b // 8, ed25519.b // 4)])
esk = ed25519.encodeint(a) + k
# blind the ESK with this nonce
mult = 2 ** (ed25519.b - 2) + sum(2 ** i * ed25519.bit(blinding_nonce, i) for i in range(3, ed25519.b - 2))
s = ed25519.decodeint(esk[:32])
s_prime = (s * mult) % ed25519.l
k = esk[32:]
k_prime = ed25519.H(b'Derive temporary signing key hash input' + k)[:32]
blinded_esk = ed25519.encodeint(s_prime) + k_prime
# finally, sign the message
a = ed25519.decodeint(blinded_esk[:32])
r = ed25519.Hint(b''.join([blinded_esk[i:i + 1] for i in range(ed25519.b // 8, ed25519.b // 4)]) + msg)
R = ed25519.scalarmult(ed25519.B, r)
S = (r + ed25519.Hint(ed25519.encodepoint(R) + blinded_key + msg) * a) % ed25519.l
return ed25519.encodepoint(R) + ed25519.encodeint(S)
# TODO: drop this alias in stem 2.x
HiddenServiceDescriptor = HiddenServiceDescriptorV2
stem-1.8.0/stem/descriptor/microdescriptor.py 0000664 0001750 0001750 00000027015 13501272761 022133 0 ustar atagar atagar 0000000 0000000 # Copyright 2013-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for Tor microdescriptors, which contain a distilled version of a
relay's server descriptor. As of Tor version 0.2.3.3-alpha Tor no longer
downloads server descriptors by default, opting for microdescriptors instead.
Unlike most descriptor documents these aren't available on the metrics site
(since they don't contain any information that the server descriptors don't).
The limited information in microdescriptors make them rather clunky to use
compared with server descriptors. For instance microdescriptors lack the
relay's fingerprint, making it difficut to use them to look up the relay's
other descriptors.
To do so you need to match the microdescriptor's digest against its
corresponding router status entry. For added fun as of this writing the
controller doesn't even surface those router status entries
(:trac:`7953`).
For instance, here's an example that prints the nickname and fingerprints of
the exit relays.
::
import os
from stem.control import Controller
from stem.descriptor import parse_file
with Controller.from_port(port = 9051) as controller:
controller.authenticate()
exit_digests = set()
data_dir = controller.get_conf('DataDirectory')
for desc in controller.get_microdescriptors():
if desc.exit_policy.is_exiting_allowed():
exit_digests.add(desc.digest)
print 'Exit Relays:'
for desc in parse_file(os.path.join(data_dir, 'cached-microdesc-consensus')):
if desc.digest in exit_digests:
print ' %s (%s)' % (desc.nickname, desc.fingerprint)
Doing the same is trivial with server descriptors...
::
from stem.descriptor import parse_file
print 'Exit Relays:'
for desc in parse_file('/home/atagar/.tor/cached-descriptors'):
if desc.exit_policy.is_exiting_allowed():
print ' %s (%s)' % (desc.nickname, desc.fingerprint)
**Module Overview:**
::
Microdescriptor - Tor microdescriptor.
"""
import hashlib
import stem.exit_policy
import stem.prereq
from stem.descriptor import (
Descriptor,
DigestHash,
DigestEncoding,
_descriptor_content,
_descriptor_components,
_read_until_keywords,
_values,
_parse_simple_line,
_parse_protocol_line,
_parse_key_block,
_random_crypto_blob,
)
from stem.descriptor.router_status_entry import (
_parse_a_line,
_parse_p_line,
)
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
REQUIRED_FIELDS = (
'onion-key',
)
SINGLE_FIELDS = (
'onion-key',
'ntor-onion-key',
'family',
'p',
'p6',
'pr',
)
def _parse_file(descriptor_file, validate = False, **kwargs):
"""
Iterates over the microdescriptors in a file.
:param file descriptor_file: file with descriptor content
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param dict kwargs: additional arguments for the descriptor constructor
:returns: iterator for Microdescriptor instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is True
* **IOError** if the file can't be read
"""
while True:
annotations = _read_until_keywords('onion-key', descriptor_file)
# read until we reach an annotation or onion-key line
descriptor_lines = []
# read the onion-key line, done if we're at the end of the document
onion_key_line = descriptor_file.readline()
if onion_key_line:
descriptor_lines.append(onion_key_line)
else:
break
while True:
last_position = descriptor_file.tell()
line = descriptor_file.readline()
if not line:
break # EOF
elif line.startswith(b'@') or line.startswith(b'onion-key'):
descriptor_file.seek(last_position)
break
else:
descriptor_lines.append(line)
if descriptor_lines:
if descriptor_lines[0].startswith(b'@type'):
descriptor_lines = descriptor_lines[1:]
# strip newlines from annotations
annotations = list(map(bytes.strip, annotations))
descriptor_text = bytes.join(b'', descriptor_lines)
yield Microdescriptor(descriptor_text, validate, annotations, **kwargs)
else:
break # done parsing descriptors
def _parse_id_line(descriptor, entries):
identities = {}
for entry in _values('id', entries):
entry_comp = entry.split()
if len(entry_comp) >= 2:
key_type, key_value = entry_comp[0], entry_comp[1]
if key_type in identities:
raise ValueError("There can only be one 'id' line per a key type, but '%s' appeared multiple times" % key_type)
descriptor.identifier_type = key_type
descriptor.identifier = key_value
identities[key_type] = key_value
else:
raise ValueError("'id' lines should contain both the key type and digest: id %s" % entry)
descriptor.identifiers = identities
_parse_onion_key_line = _parse_key_block('onion-key', 'onion_key', 'RSA PUBLIC KEY')
_parse_ntor_onion_key_line = _parse_simple_line('ntor-onion-key', 'ntor_onion_key')
_parse_family_line = _parse_simple_line('family', 'family', func = lambda v: v.split(' '))
_parse_p6_line = _parse_simple_line('p6', 'exit_policy_v6', func = lambda v: stem.exit_policy.MicroExitPolicy(v))
_parse_pr_line = _parse_protocol_line('pr', 'protocols')
class Microdescriptor(Descriptor):
"""
Microdescriptor (`descriptor specification
`_)
:var str onion_key: **\\*** key used to encrypt EXTEND cells
:var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol
:var list or_addresses: **\\*** alternative for our address/or_port attributes, each
entry is a tuple of the form (address (**str**), port (**int**), is_ipv6
(**bool**))
:var list family: **\\*** nicknames or fingerprints of declared family
:var stem.exit_policy.MicroExitPolicy exit_policy: **\\*** relay's exit policy
:var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\\*** exit policy for IPv6
:var hash identifiers: mapping of key types (like rsa1024 or ed25519) to
their base64 encoded identity, this is only used for collision prevention
(:trac:`11743`)
:var dict protocols: mapping of protocols to their supported versions
:var str identifier: base64 encoded identity digest (**deprecated**, use
identifiers instead)
:var str identifier_type: identity digest key type (**deprecated**, use
identifiers instead)
**\\*** attribute is required when we're parsed with validation
.. versionchanged:: 1.1.0
Added the identifier and identifier_type attributes.
.. versionchanged:: 1.5.0
Added the identifiers attribute, and deprecated identifier and
identifier_type since the field can now appear multiple times.
.. versionchanged:: 1.6.0
Added the protocols attribute.
.. versionchanged:: 1.8.0
Replaced our **digest** attribute with a much more flexible **digest()**
method. Unfortunately I cannot do this in a backward compatible way
because of the name conflict. The old digest had multiple problems (for
instance, being hex rather than base64 encoded), so hopefully no one was
using it. Very sorry if this causes trouble for anyone.
"""
TYPE_ANNOTATION_NAME = 'microdescriptor'
ATTRIBUTES = {
'onion_key': (None, _parse_onion_key_line),
'ntor_onion_key': (None, _parse_ntor_onion_key_line),
'or_addresses': ([], _parse_a_line),
'family': ([], _parse_family_line),
'exit_policy': (stem.exit_policy.MicroExitPolicy('reject 1-65535'), _parse_p_line),
'exit_policy_v6': (None, _parse_p6_line),
'identifier_type': (None, _parse_id_line), # deprecated in favor of identifiers
'identifier': (None, _parse_id_line), # deprecated in favor of identifiers
'identifiers': ({}, _parse_id_line),
'protocols': ({}, _parse_pr_line),
}
PARSER_FOR_LINE = {
'onion-key': _parse_onion_key_line,
'ntor-onion-key': _parse_ntor_onion_key_line,
'a': _parse_a_line,
'family': _parse_family_line,
'p': _parse_p_line,
'p6': _parse_p6_line,
'pr': _parse_pr_line,
'id': _parse_id_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('onion-key', _random_crypto_blob('RSA PUBLIC KEY')),
))
def __init__(self, raw_contents, validate = False, annotations = None):
super(Microdescriptor, self).__init__(raw_contents, lazy_load = not validate)
self._annotation_lines = annotations if annotations else []
entries = _descriptor_components(raw_contents, validate)
if validate:
self._parse(entries, validate)
self._check_constraints(entries)
else:
self._entries = entries
def digest(self, hash_type = DigestHash.SHA256, encoding = DigestEncoding.BASE64):
"""
Digest of this microdescriptor. These are referenced by...
* **Microdescriptor Consensus**
* Referer: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3` **digest** attribute
* Format: **SHA256/BASE64**
.. versionadded:: 1.8.0
:param stem.descriptor.DigestHash hash_type: digest hashing algorithm
:param stem.descriptor.DigestEncoding encoding: digest encoding
:returns: **hashlib.HASH** or **str** based on our encoding argument
"""
if hash_type == DigestHash.SHA1:
return stem.descriptor._encode_digest(hashlib.sha1(self.get_bytes()), encoding)
elif hash_type == DigestHash.SHA256:
return stem.descriptor._encode_digest(hashlib.sha256(self.get_bytes()), encoding)
else:
raise NotImplementedError('Microdescriptor digests are only available in sha1 and sha256, not %s' % hash_type)
@lru_cache()
def get_annotations(self):
"""
Provides content that appeared prior to the descriptor. If this comes from
the cached-microdescs then this commonly contains content like...
::
@last-listed 2013-02-24 00:18:30
:returns: **dict** with the key/value pairs in our annotations
"""
annotation_dict = {}
for line in self._annotation_lines:
if b' ' in line:
key, value = line.split(b' ', 1)
annotation_dict[key] = value
else:
annotation_dict[line] = None
return annotation_dict
def get_annotation_lines(self):
"""
Provides the lines of content that appeared prior to the descriptor. This
is the same as the
:func:`~stem.descriptor.microdescriptor.Microdescriptor.get_annotations`
results, but with the unparsed lines and ordering retained.
:returns: **list** with the lines of annotation that came before this descriptor
"""
return self._annotation_lines
def _check_constraints(self, entries):
"""
Does a basic check that the entries conform to this descriptor type's
constraints.
:param dict entries: keyword => (value, pgp key) entries
:raises: **ValueError** if an issue arises in validation
"""
for keyword in REQUIRED_FIELDS:
if keyword not in entries:
raise ValueError("Microdescriptor must have a '%s' entry" % keyword)
for keyword in SINGLE_FIELDS:
if keyword in entries and len(entries[keyword]) > 1:
raise ValueError("The '%s' entry can only appear once in a microdescriptor" % keyword)
if 'onion-key' != list(entries.keys())[0]:
raise ValueError("Microdescriptor must start with a 'onion-key' entry")
def _name(self, is_plural = False):
return 'microdescriptors' if is_plural else 'microdescriptor'
stem-1.8.0/stem/version.py 0000664 0001750 0001750 00000034761 13600526326 016237 0 ustar atagar atagar 0000000 0000000 # Copyright 2011-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Tor versioning information and requirements for its features. These can be
easily parsed and compared, for instance...
::
>>> from stem.version import get_system_tor_version, Requirement
>>> my_version = get_system_tor_version()
>>> print(my_version)
0.2.1.30
>>> my_version >= Requirement.TORRC_CONTROL_SOCKET
True
**Module Overview:**
::
get_system_tor_version - gets the version of our system's tor installation
Version - Tor versioning information
.. data:: Requirement (enum)
Enumerations for the version requirements of features.
.. deprecated:: 1.6.0
Requirement entries belonging to tor versions which have been obsolete for
at least six months will be removed when we break backward compatibility
in the 2.x stem release.
===================================== ===========
Requirement Description
===================================== ===========
**AUTH_SAFECOOKIE** SAFECOOKIE authentication method
**DESCRIPTOR_COMPRESSION** `Expanded compression support for ZSTD and LZMA `_
**DORMANT_MODE** **DORMANT** and **ACTIVE** :data:`~stem.Signal`
**DROPGUARDS** DROPGUARDS requests
**EVENT_AUTHDIR_NEWDESCS** AUTHDIR_NEWDESC events
**EVENT_BUILDTIMEOUT_SET** BUILDTIMEOUT_SET events
**EVENT_CIRC_MINOR** CIRC_MINOR events
**EVENT_CLIENTS_SEEN** CLIENTS_SEEN events
**EVENT_CONF_CHANGED** CONF_CHANGED events
**EVENT_DESCCHANGED** DESCCHANGED events
**EVENT_GUARD** GUARD events
**EVENT_HS_DESC_CONTENT** HS_DESC_CONTENT events
**EVENT_NETWORK_LIVENESS** NETWORK_LIVENESS events
**EVENT_NEWCONSENSUS** NEWCONSENSUS events
**EVENT_NS** NS events
**EVENT_SIGNAL** SIGNAL events
**EVENT_STATUS** STATUS_GENERAL, STATUS_CLIENT, and STATUS_SERVER events
**EVENT_STREAM_BW** STREAM_BW events
**EVENT_TRANSPORT_LAUNCHED** TRANSPORT_LAUNCHED events
**EVENT_CONN_BW** CONN_BW events
**EVENT_CIRC_BW** CIRC_BW events
**EVENT_CELL_STATS** CELL_STATS events
**EVENT_TB_EMPTY** TB_EMPTY events
**EVENT_HS_DESC** HS_DESC events
**EXTENDCIRCUIT_PATH_OPTIONAL** EXTENDCIRCUIT queries can omit the path if the circuit is zero
**FEATURE_EXTENDED_EVENTS** 'EXTENDED_EVENTS' optional feature
**FEATURE_VERBOSE_NAMES** 'VERBOSE_NAMES' optional feature
**GETINFO_CONFIG_TEXT** 'GETINFO config-text' query
**GETINFO_GEOIP_AVAILABLE** 'GETINFO ip-to-country/ipv4-available' query and its ipv6 counterpart
**GETINFO_MICRODESCRIPTORS** 'GETINFO md/all' query
**GETINFO_UPTIME** 'GETINFO uptime' query
**HIDDEN_SERVICE_V3** Support for v3 hidden services
**HSFETCH** HSFETCH requests
**HSFETCH_V3** HSFETCH for version 3 hidden services
**HSPOST** HSPOST requests
**ADD_ONION** ADD_ONION and DEL_ONION requests
**ADD_ONION_BASIC_AUTH** ADD_ONION supports basic authentication
**ADD_ONION_NON_ANONYMOUS** ADD_ONION supports non-anonymous mode
**ADD_ONION_MAX_STREAMS** ADD_ONION support for MaxStreamsCloseCircuit
**LOADCONF** LOADCONF requests
**MICRODESCRIPTOR_IS_DEFAULT** Tor gets microdescriptors by default rather than server descriptors
**SAVECONF_FORCE** Added the 'FORCE' flag to SAVECONF
**TAKEOWNERSHIP** TAKEOWNERSHIP requests
**TORRC_CONTROL_SOCKET** 'ControlSocket ' config option
**TORRC_PORT_FORWARDING** 'PortForwarding' config option
**TORRC_DISABLE_DEBUGGER_ATTACHMENT** 'DisableDebuggerAttachment' config option
**TORRC_VIA_STDIN** Allow torrc options via 'tor -f -' (:trac:`13865`)
===================================== ===========
"""
import os
import re
import stem.prereq
import stem.util
import stem.util.enum
import stem.util.system
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
# cache for the get_system_tor_version function
VERSION_CACHE = {}
VERSION_PATTERN = re.compile(r'^([0-9]+)\.([0-9]+)\.([0-9]+)(\.[0-9]+)?(-\S*)?(( \(\S*\))*)$')
def get_system_tor_version(tor_cmd = 'tor'):
"""
Queries tor for its version. This is os dependent, only working on linux,
osx, and bsd.
:param str tor_cmd: command used to run tor
:returns: :class:`~stem.version.Version` provided by the tor command
:raises: **IOError** if unable to query or parse the version
"""
if tor_cmd not in VERSION_CACHE:
version_cmd = '%s --version' % tor_cmd
try:
version_output = stem.util.system.call(version_cmd)
except OSError as exc:
# make the error message nicer if this is due to tor being unavialable
if 'No such file or directory' in str(exc):
if os.path.isabs(tor_cmd):
exc = "Unable to check tor's version. '%s' doesn't exist." % tor_cmd
else:
exc = "Unable to run '%s'. Maybe tor isn't in your PATH?" % version_cmd
raise IOError(exc)
for line in version_output:
# output example:
# Oct 21 07:19:27.438 [notice] Tor v0.2.1.30. This is experimental software. Do not rely on it for strong anonymity. (Running on Linux i686)
# Tor version 0.2.1.30.
if line.startswith('Tor version ') and line.endswith('.'):
try:
version_str = line[12:-1]
VERSION_CACHE[tor_cmd] = Version(version_str)
break
except ValueError as exc:
raise IOError(exc)
if tor_cmd not in VERSION_CACHE:
raise IOError("'%s' didn't provide a parseable version:\n\n%s" % (version_cmd, '\n'.join(version_output)))
return VERSION_CACHE[tor_cmd]
@lru_cache()
def _get_version(version_str):
return Version(version_str)
class Version(object):
"""
Comparable tor version. These are constructed from strings that conform to
the 'new' style in the `tor version-spec
`_,
such as "0.1.4" or "0.2.2.23-alpha (git-7dcd105be34a4f44)".
.. versionchanged:: 1.6.0
Added all_extra parameter.
:var int major: major version
:var int minor: minor version
:var int micro: micro version
:var int patch: patch level (**None** if undefined)
:var str status: status tag such as 'alpha' or 'beta-dev' (**None** if undefined)
:var str extra: first extra information without its parentheses such as
'git-8be6058d8f31e578' (**None** if undefined)
:var list all_extra: all extra information entries, without their parentheses
:var str git_commit: git commit id (**None** if it wasn't provided)
:param str version_str: version to be parsed
:raises: **ValueError** if input isn't a valid tor version
"""
def __init__(self, version_str):
self.version_str = version_str
version_parts = VERSION_PATTERN.match(version_str)
if version_parts:
major, minor, micro, patch, status, extra_str, _ = version_parts.groups()
# The patch and status matches are optional (may be None) and have an extra
# proceeding period or dash if they exist. Stripping those off.
if patch:
patch = int(patch[1:])
if status:
status = status[1:]
self.major = int(major)
self.minor = int(minor)
self.micro = int(micro)
self.patch = patch
self.status = status
self.all_extra = [entry[1:-1] for entry in extra_str.strip().split()] if extra_str else []
self.extra = self.all_extra[0] if self.all_extra else None
self.git_commit = None
for extra in self.all_extra:
if extra and re.match('^git-[0-9a-f]{16}$', extra):
self.git_commit = extra[4:]
break
else:
raise ValueError("'%s' isn't a properly formatted tor version" % version_str)
def __str__(self):
"""
Provides the string used to construct the version.
"""
return self.version_str
def _compare(self, other, method):
"""
Compares version ordering according to the spec.
"""
if not isinstance(other, Version):
return False
for attr in ('major', 'minor', 'micro', 'patch'):
my_version = getattr(self, attr)
other_version = getattr(other, attr)
if my_version is None:
my_version = 0
if other_version is None:
other_version = 0
if my_version != other_version:
return method(my_version, other_version)
# According to the version spec...
#
# If we *do* encounter two versions that differ only by status tag, we
# compare them lexically as ASCII byte strings.
my_status = self.status if self.status else ''
other_status = other.status if other.status else ''
return method(my_status, other_status)
def __hash__(self):
return stem.util._hash_attr(self, 'major', 'minor', 'micro', 'patch', 'status', cache = True)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __gt__(self, other):
"""
Checks if this version meets the requirements for a given feature. We can
be compared to either a :class:`~stem.version.Version` or
:class:`~stem.version._VersionRequirements`.
"""
if isinstance(other, _VersionRequirements):
for rule in other.rules:
if rule(self):
return True
return False
return self._compare(other, lambda s, o: s > o)
def __ge__(self, other):
if isinstance(other, _VersionRequirements):
for rule in other.rules:
if rule(self):
return True
return False
return self._compare(other, lambda s, o: s >= o)
class _VersionRequirements(object):
"""
Series of version constraints that can be compared to. For instance, this
allows for comparisons like 'if I'm greater than version X in the 0.2.2
series, or greater than version Y in the 0.2.3 series'.
This is a logical 'or' of the series of rules.
"""
def __init__(self):
self.rules = []
def greater_than(self, version, inclusive = True):
"""
Adds a constraint that we're greater than the given version.
:param stem.version.Version version: version we're checking against
:param bool inclusive: if comparison is inclusive or not
"""
if inclusive:
self.rules.append(lambda v: version <= v)
else:
self.rules.append(lambda v: version < v)
def less_than(self, version, inclusive = True):
"""
Adds a constraint that we're less than the given version.
:param stem.version.Version version: version we're checking against
:param bool inclusive: if comparison is inclusive or not
"""
if inclusive:
self.rules.append(lambda v: version >= v)
else:
self.rules.append(lambda v: version > v)
def in_range(self, from_version, to_version, from_inclusive = True, to_inclusive = False):
"""
Adds constraint that we're within the range from one version to another.
:param stem.version.Version from_version: beginning of the comparison range
:param stem.version.Version to_version: end of the comparison range
:param bool from_inclusive: if comparison is inclusive with the starting version
:param bool to_inclusive: if comparison is inclusive with the ending version
"""
def new_rule(v):
if from_inclusive and to_inclusive:
return from_version <= v <= to_version
elif from_inclusive:
return from_version <= v < to_version
else:
return from_version < v < to_version
self.rules.append(new_rule)
safecookie_req = _VersionRequirements()
safecookie_req.in_range(Version('0.2.2.36'), Version('0.2.3.0'))
safecookie_req.greater_than(Version('0.2.3.13'))
Requirement = stem.util.enum.Enum(
('AUTH_SAFECOOKIE', safecookie_req),
('DESCRIPTOR_COMPRESSION', Version('0.3.1.1-alpha')),
('DORMANT_MODE', Version('0.4.0.1-alpha')),
('DROPGUARDS', Version('0.2.5.1-alpha')),
('EVENT_AUTHDIR_NEWDESCS', Version('0.1.1.10-alpha')),
('EVENT_BUILDTIMEOUT_SET', Version('0.2.2.7-alpha')),
('EVENT_CIRC_MINOR', Version('0.2.3.11-alpha')),
('EVENT_CLIENTS_SEEN', Version('0.2.1.10-alpha')),
('EVENT_CONF_CHANGED', Version('0.2.3.3-alpha')),
('EVENT_DESCCHANGED', Version('0.1.2.2-alpha')),
('EVENT_GUARD', Version('0.1.2.5-alpha')),
('EVENT_HS_DESC_CONTENT', Version('0.2.7.1-alpha')),
('EVENT_NS', Version('0.1.2.3-alpha')),
('EVENT_NETWORK_LIVENESS', Version('0.2.7.2-alpha')),
('EVENT_NEWCONSENSUS', Version('0.2.1.13-alpha')),
('EVENT_SIGNAL', Version('0.2.3.1-alpha')),
('EVENT_STATUS', Version('0.1.2.3-alpha')),
('EVENT_STREAM_BW', Version('0.1.2.8-beta')),
('EVENT_TRANSPORT_LAUNCHED', Version('0.2.5.0-alpha')),
('EVENT_CONN_BW', Version('0.2.5.2-alpha')),
('EVENT_CIRC_BW', Version('0.2.5.2-alpha')),
('EVENT_CELL_STATS', Version('0.2.5.2-alpha')),
('EVENT_TB_EMPTY', Version('0.2.5.2-alpha')),
('EVENT_HS_DESC', Version('0.2.5.2-alpha')),
('EXTENDCIRCUIT_PATH_OPTIONAL', Version('0.2.2.9')),
('FEATURE_EXTENDED_EVENTS', Version('0.2.2.1-alpha')),
('FEATURE_VERBOSE_NAMES', Version('0.2.2.1-alpha')),
('GETINFO_CONFIG_TEXT', Version('0.2.2.7-alpha')),
('GETINFO_GEOIP_AVAILABLE', Version('0.3.2.1-alpha')),
('GETINFO_MICRODESCRIPTORS', Version('0.3.5.1-alpha')),
('GETINFO_UPTIME', Version('0.3.5.1-alpha')),
('HIDDEN_SERVICE_V3', Version('0.3.3.1-alpha')),
('HSFETCH', Version('0.2.7.1-alpha')),
('HSFETCH_V3', Version('0.4.1.1-alpha')),
('HSPOST', Version('0.2.7.1-alpha')),
('ADD_ONION', Version('0.2.7.1-alpha')),
('ADD_ONION_BASIC_AUTH', Version('0.2.9.1-alpha')),
('ADD_ONION_NON_ANONYMOUS', Version('0.2.9.3-alpha')),
('ADD_ONION_MAX_STREAMS', Version('0.2.7.2-alpha')),
('LOADCONF', Version('0.2.1.1')),
('MICRODESCRIPTOR_IS_DEFAULT', Version('0.2.3.3')),
('SAVECONF_FORCE', Version('0.3.1.1-alpha')),
('TAKEOWNERSHIP', Version('0.2.2.28-beta')),
('TORRC_CONTROL_SOCKET', Version('0.2.0.30')),
('TORRC_PORT_FORWARDING', Version('0.2.3.1-alpha')),
('TORRC_DISABLE_DEBUGGER_ATTACHMENT', Version('0.2.3.9')),
('TORRC_VIA_STDIN', Version('0.2.6.3-alpha')),
)
stem-1.8.0/stem/client/ 0000775 0001750 0001750 00000000000 13602232262 015436 5 ustar atagar atagar 0000000 0000000 stem-1.8.0/stem/client/datatype.py 0000664 0001750 0001750 00000060016 13564354230 017635 0 ustar atagar atagar 0000000 0000000 # Copyright 2018-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Support for `Tor's ORPort protocol
`_.
**This module only consists of low level components, and is not intended for
users.** See our :class:`~stem.client.Relay` the API you probably want.
.. versionadded:: 1.7.0
::
split - splits bytes into substrings
LinkProtocol - ORPort protocol version.
Field - Packable and unpackable datatype.
|- LinkSpecifier - Communication method relays in a circuit.
| |- LinkByIPv4 - TLS connection to an IPv4 address.
| |- LinkByIPv6 - TLS connection to an IPv6 address.
| |- LinkByFingerprint - SHA1 identity fingerprint.
| +- LinkByEd25519 - Ed25519 identity fingerprint.
|
|- Size - Field of a static size.
|- Address - Relay address.
|- Certificate - Relay certificate.
|
|- pack - encodes content
|- unpack - decodes content
+- pop - decodes content with remainder
KDF - KDF-TOR derivatived attributes
+- from_value - parses key material
.. data:: AddrType (enum)
Form an address takes.
===================== ===========
AddressType Description
===================== ===========
**HOSTNAME** relay hostname
**IPv4** IPv4 address
**IPv6** IPv6 address
**ERROR_TRANSIENT** temporarily error retrieving address
**ERROR_PERMANENT** permanent error retrieving address
**UNKNOWN** unrecognized address type
===================== ===========
.. data:: RelayCommand (enum)
Command concerning streams and circuits we've established with a relay.
Commands have two characteristics...
* **forward/backward**: **forward** commands are issued from the orgin,
whereas **backward** come from the relay
* **stream/circuit**: **steam** commands concern an individual steam, whereas
**circuit** concern the entire circuit we've established with a relay
===================== ===========
RelayCommand Description
===================== ===========
**BEGIN** begin a stream (**forward**, **stream**)
**DATA** transmit data (**forward/backward**, **stream**)
**END** end a stream (**forward/backward**, **stream**)
**CONNECTED** BEGIN reply (**backward**, **stream**)
**SENDME** ready to accept more cells (**forward/backward**, **stream/circuit**)
**EXTEND** extend the circuit through another relay (**forward**, **circuit**)
**EXTENDED** EXTEND reply (**backward**, **circuit**)
**TRUNCATE** remove last circuit hop (**forward**, **circuit**)
**TRUNCATED** TRUNCATE reply (**backward**, **circuit**)
**DROP** ignorable no-op (**forward/backward**, **circuit**)
**RESOLVE** request DNS resolution (**forward**, **stream**)
**RESOLVED** RESOLVE reply (**backward**, **stream**)
**BEGIN_DIR** request descriptor (**forward**, **steam**)
**EXTEND2** ntor EXTEND request (**forward**, **circuit**)
**EXTENDED2** EXTEND2 reply (**backward**, **circuit**)
**UNKNOWN** unrecognized command
===================== ===========
.. data:: CertType (enum)
Certificate purpose. For more information see...
* `tor-spec.txt `_ section 4.2
* `cert-spec.txt `_ section A.1
* `rend-spec-v3.txt `_ appendix E
.. versionchanged:: 1.8.0
Added the ED25519_SIGNING, LINK_CERT, ED25519_AUTHENTICATE,
ED25519_IDENTITY, HS_V3_DESC_SIGNING, HS_V3_INTRO_AUTH, NTOR_ONION_KEY,
and HS_V3_NTOR_ENC certificate types.
========================= ===========
CertType Description
========================= ===========
**LINK** link key certificate certified by RSA1024 identity
**IDENTITY** RSA1024 Identity certificate
**AUTHENTICATE** RSA1024 AUTHENTICATE cell link certificate
**ED25519_SIGNING** Ed25519 signing key, signed with identity key
**LINK_CERT** TLS link certificate, signed with ed25519 signing key
**ED25519_AUTHENTICATE** Ed25519 AUTHENTICATE cell key, signed with ed25519 signing key
**ED25519_IDENTITY** Ed25519 identity, signed with RSA identity
**HS_V3_DESC_SIGNING** hidden service v3 short-term descriptor signing key
**HS_V3_INTRO_AUTH** hidden service v3 introduction point authentication key
**NTOR_ONION_KEY** ntor onion key cross-certifying ed25519 identity key
**HS_V3_NTOR_ENC** hidden service v3 ntor-extra encryption key
**UNKNOWN** unrecognized certificate type
========================= ===========
.. data:: CloseReason (enum)
Reason a relay is closed.
===================== ===========
CloseReason Description
===================== ===========
**NONE** no reason given
**PROTOCOL** tor protocol violation
**INTERNAL** internal error
**REQUESTED** client sent a TRUNCATE command
**HIBERNATING** relay suspended, trying to save bandwidth
**RESOURCELIMIT** out of memory, sockets, or circuit IDs
**CONNECTFAILED** unable to reach relay
**OR_IDENTITY** connected, but its OR identity was not as expected
**OR_CONN_CLOSED** connection that was carrying this circuit died
**FINISHED** circuit has expired for being dirty or old
**TIMEOUT** circuit construction took too long
**DESTROYED** circuit was destroyed without a client TRUNCATE
**NOSUCHSERVICE** request was for an unknown hidden service
**UNKNOWN** unrecognized reason
===================== ===========
"""
import binascii
import collections
import hashlib
import struct
import stem.client.cell
import stem.prereq
import stem.util
import stem.util.connection
import stem.util.enum
ZERO = b'\x00'
HASH_LEN = 20
KEY_LEN = 16
class _IntegerEnum(stem.util.enum.Enum):
"""
Integer backed enumeration. Enumerations of this type always have an implicit
**UNKNOWN** value for integer values that lack a mapping.
"""
def __init__(self, *args):
self._enum_to_int = {}
self._int_to_enum = {}
parent_args = []
for entry in args:
if len(entry) == 2:
enum, int_val = entry
str_val = enum
elif len(entry) == 3:
enum, str_val, int_val = entry
else:
raise ValueError('IntegerEnums can only be constructed with two or three value tuples: %s' % repr(entry))
self._enum_to_int[str_val] = int_val
self._int_to_enum[int_val] = str_val
parent_args.append((enum, str_val))
parent_args.append(('UNKNOWN', 'UNKNOWN'))
super(_IntegerEnum, self).__init__(*parent_args)
def get(self, val):
"""
Provides the (enum, int_value) tuple for a given value.
"""
if stem.util._is_int(val):
return self._int_to_enum.get(val, self.UNKNOWN), val
elif val in self:
return val, self._enum_to_int.get(val, val)
else:
raise ValueError("Invalid enumeration '%s', options are %s" % (val, ', '.join(self)))
AddrType = _IntegerEnum(
('HOSTNAME', 0),
('IPv4', 4),
('IPv6', 6),
('ERROR_TRANSIENT', 16),
('ERROR_PERMANENT', 17),
)
RelayCommand = _IntegerEnum(
('BEGIN', 'RELAY_BEGIN', 1),
('DATA', 'RELAY_DATA', 2),
('END', 'RELAY_END', 3),
('CONNECTED', 'RELAY_CONNECTED', 4),
('SENDME', 'RELAY_SENDME', 5),
('EXTEND', 'RELAY_EXTEND', 6),
('EXTENDED', 'RELAY_EXTENDED', 7),
('TRUNCATE', 'RELAY_TRUNCATE', 8),
('TRUNCATED', 'RELAY_TRUNCATED', 9),
('DROP', 'RELAY_DROP', 10),
('RESOLVE', 'RELAY_RESOLVE', 11),
('RESOLVED', 'RELAY_RESOLVED', 12),
('BEGIN_DIR', 'RELAY_BEGIN_DIR', 13),
('EXTEND2', 'RELAY_EXTEND2', 14),
('EXTENDED2', 'RELAY_EXTENDED2', 15),
)
CertType = _IntegerEnum(
('LINK', 1), # (tor-spec.txt section 4.2)
('IDENTITY', 2), # (tor-spec.txt section 4.2)
('AUTHENTICATE', 3), # (tor-spec.txt section 4.2)
('ED25519_SIGNING', 4), # (prop220 section 4.2)
('LINK_CERT', 5), # (prop220 section 4.2)
('ED25519_AUTHENTICATE', 6), # (prop220 section 4.2)
('ED25519_IDENTITY', 7), # (prop220 section 4.2)
('HS_V3_DESC_SIGNING', 8), # (rend-spec-v3.txt, "DESC_OUTER" description)
('HS_V3_INTRO_AUTH', 9), # (rend-spec-v3.txt, "auth-key" description)
('NTOR_ONION_KEY', 10), # (dir-spec.txt, "ntor-onion-key-crosscert" description)
('HS_V3_NTOR_ENC', 11), # (rend-spec-v3.txt, "enc-key-cert" description)
)
CloseReason = _IntegerEnum(
('NONE', 0),
('PROTOCOL', 1),
('INTERNAL', 2),
('REQUESTED', 3),
('HIBERNATING', 4),
('RESOURCELIMIT', 5),
('CONNECTFAILED', 6),
('OR_IDENTITY', 7),
('OR_CONN_CLOSED', 8),
('FINISHED', 9),
('TIMEOUT', 10),
('DESTROYED', 11),
('NOSUCHSERVICE', 12),
)
def split(content, size):
"""
Simple split of bytes into two substrings.
:param bytes content: string to split
:param int size: index to split the string on
:returns: two value tuple with the split bytes
"""
return content[:size], content[size:]
class LinkProtocol(int):
"""
Constants that vary by our link protocol version.
:var int version: link protocol version
:var stem.client.datatype.Size circ_id_size: circuit identifier field size
:var int fixed_cell_length: size of cells with a fixed length
:var int first_circ_id: When creating circuits we pick an unused identifier
from a range that's determined by our link protocol.
"""
def __new__(cls, version):
if isinstance(version, LinkProtocol):
return version # already a LinkProtocol
protocol = int.__new__(cls, version)
protocol.version = version
protocol.circ_id_size = Size.LONG if version > 3 else Size.SHORT
protocol.first_circ_id = 0x80000000 if version > 3 else 0x01
cell_header_size = protocol.circ_id_size.size + 1 # circuit id (2 or 4 bytes) + command (1 byte)
protocol.fixed_cell_length = cell_header_size + stem.client.cell.FIXED_PAYLOAD_LEN
return protocol
def __hash__(self):
# All LinkProtocol attributes can be derived from our version, so that's
# all we need in our hash. Offsetting by our type so we don't hash conflict
# with ints.
return self.version * hash(str(type(self)))
def __eq__(self, other):
if isinstance(other, int):
return self.version == other
elif isinstance(other, LinkProtocol):
return hash(self) == hash(other)
else:
return False
def __ne__(self, other):
return not self == other
def __int__(self):
return self.version
class Field(object):
"""
Packable and unpackable datatype.
"""
def pack(self):
"""
Encodes field into bytes.
:returns: **bytes** that can be communicated over Tor's ORPort
:raises: **ValueError** if incorrect type or size
"""
raise NotImplementedError('Not yet available')
@classmethod
def unpack(cls, packed):
"""
Decodes bytes into a field of this type.
:param bytes packed: content to decode
:returns: instance of this class
:raises: **ValueError** if packed data is malformed
"""
unpacked, remainder = cls.pop(packed)
if remainder:
raise ValueError('%s is the wrong size for a %s field' % (repr(packed), cls.__name__))
return unpacked
@staticmethod
def pop(packed):
"""
Decodes bytes as this field type, providing it and the remainder.
:param bytes packed: content to decode
:returns: tuple of the form (unpacked, remainder)
:raises: **ValueError** if packed data is malformed
"""
raise NotImplementedError('Not yet available')
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, Field) else False
def __ne__(self, other):
return not self == other
class Size(Field):
"""
Unsigned `struct.pack format
` for
network-order fields.
==================== ===========
Pack Description
==================== ===========
CHAR Unsigned char (1 byte)
SHORT Unsigned short (2 bytes)
LONG Unsigned long (4 bytes)
LONG_LONG Unsigned long long (8 bytes)
==================== ===========
"""
def __init__(self, name, size, pack_format):
self.name = name
self.size = size
self.format = pack_format
@staticmethod
def pop(packed):
raise NotImplementedError("Use our constant's unpack() and pop() instead")
def pack(self, content):
# TODO: Python 2.6's struct module behaves a little differently in a couple
# respsects...
#
# * Invalid types raise a TypeError rather than a struct.error.
#
# * Negative values are happily packed despite being unsigned fields with
# a message printed to stdout (!) that says...
#
# stem/client/datatype.py:362: DeprecationWarning: struct integer overflow masking is deprecated
# packed = struct.pack(self.format, content)
# stem/client/datatype.py:362: DeprecationWarning: 'B' format requires 0 <= number <= 255
# packed = struct.pack(self.format, content)
#
# Rather than adjust this method to account for these differences doing
# duplicate upfront checks just for python 2.6. When we drop 2.6 support
# this can obviously be dropped.
if stem.prereq._is_python_26():
if not stem.util._is_int(content):
raise ValueError('Size.pack encodes an integer, but was a %s' % type(content).__name__)
elif content < 0:
raise ValueError('Packed values must be positive (attempted to pack %i as a %s)' % (content, self.name))
# TODO: When we drop python 2.x support this can be simplified via
# integer's to_bytes() method. For example...
#
# struct.pack('>Q', my_number)
#
# ... is the same as...
#
# my_number.to_bytes(8, 'big')
try:
packed = struct.pack(self.format, content)
except struct.error:
if not stem.util._is_int(content):
raise ValueError('Size.pack encodes an integer, but was a %s' % type(content).__name__)
elif content < 0:
raise ValueError('Packed values must be positive (attempted to pack %i as a %s)' % (content, self.name))
else:
raise # some other struct exception
if self.size != len(packed):
raise ValueError('%s is the wrong size for a %s field' % (repr(packed), self.name))
return packed
def unpack(self, packed):
if self.size != len(packed):
raise ValueError('%s is the wrong size for a %s field' % (repr(packed), self.name))
return struct.unpack(self.format, packed)[0]
def pop(self, packed):
to_unpack, remainder = split(packed, self.size)
return self.unpack(to_unpack), remainder
def __hash__(self):
return stem.util._hash_attr(self, 'name', 'size', 'format', cache = True)
class Address(Field):
"""
Relay address.
:var stem.client.AddrType type: address type
:var int type_int: integer value of the address type
:var unicode value: address value
:var bytes value_bin: encoded address value
"""
def __init__(self, value, addr_type = None):
if addr_type is None:
if stem.util.connection.is_valid_ipv4_address(value):
addr_type = AddrType.IPv4
elif stem.util.connection.is_valid_ipv6_address(value):
addr_type = AddrType.IPv6
else:
raise ValueError("'%s' isn't an IPv4 or IPv6 address" % value)
self.type, self.type_int = AddrType.get(addr_type)
if self.type == AddrType.IPv4:
if stem.util.connection.is_valid_ipv4_address(value):
self.value = value
self.value_bin = b''.join([Size.CHAR.pack(int(v)) for v in value.split('.')])
else:
if len(value) != 4:
raise ValueError('Packed IPv4 addresses should be four bytes, but was: %s' % repr(value))
self.value = _unpack_ipv4_address(value)
self.value_bin = value
elif self.type == AddrType.IPv6:
if stem.util.connection.is_valid_ipv6_address(value):
self.value = stem.util.connection.expand_ipv6_address(value).lower()
self.value_bin = b''.join([Size.SHORT.pack(int(v, 16)) for v in self.value.split(':')])
else:
if len(value) != 16:
raise ValueError('Packed IPv6 addresses should be sixteen bytes, but was: %s' % repr(value))
self.value = _unpack_ipv6_address(value)
self.value_bin = value
else:
# The spec doesn't really tell us what form to expect errors to be. For
# now just leaving the value unset so we can fill it in later when we
# know what would be most useful.
self.value = None
self.value_bin = value
def pack(self):
cell = bytearray()
cell += Size.CHAR.pack(self.type_int)
cell += Size.CHAR.pack(len(self.value_bin))
cell += self.value_bin
return bytes(cell)
@staticmethod
def pop(content):
addr_type, content = Size.CHAR.pop(content)
addr_length, content = Size.CHAR.pop(content)
if len(content) < addr_length:
raise ValueError('Address specified a payload of %i bytes, but only had %i' % (addr_length, len(content)))
addr_value, content = split(content, addr_length)
return Address(addr_value, addr_type), content
def __hash__(self):
return stem.util._hash_attr(self, 'type_int', 'value_bin', cache = True)
class Certificate(Field):
"""
Relay certificate as defined in tor-spec section 4.2.
:var stem.client.CertType type: certificate type
:var int type_int: integer value of the certificate type
:var bytes value: certificate value
"""
def __init__(self, cert_type, value):
self.type, self.type_int = CertType.get(cert_type)
self.value = value
def pack(self):
cell = bytearray()
cell += Size.CHAR.pack(self.type_int)
cell += Size.SHORT.pack(len(self.value))
cell += self.value
return bytes(cell)
@staticmethod
def pop(content):
cert_type, content = Size.CHAR.pop(content)
cert_size, content = Size.SHORT.pop(content)
if cert_size > len(content):
raise ValueError('CERTS cell should have a certificate with %i bytes, but only had %i remaining' % (cert_size, len(content)))
cert_bytes, content = split(content, cert_size)
return Certificate(cert_type, cert_bytes), content
def __hash__(self):
return stem.util._hash_attr(self, 'type_int', 'value')
class LinkSpecifier(Field):
"""
Method of communicating with a circuit's relay. Recognized link specification
types are an instantiation of a subclass. For more information see the
`EXTEND cell specification
`_.
.. versionadded:: 1.8.0
:var int type: numeric identifier of our type
:var bytes value: encoded link specification destination
"""
def __init__(self, link_type, value):
self.type = link_type
self.value = value
@staticmethod
def pop(packed):
# LSTYPE (Link specifier type) [1 byte]
# LSLEN (Link specifier length) [1 byte]
# LSPEC (Link specifier) [LSLEN bytes]
link_type, packed = Size.CHAR.pop(packed)
value_size, packed = Size.CHAR.pop(packed)
if value_size > len(packed):
raise ValueError('Link specifier should have %i bytes, but only had %i remaining' % (value_size, len(packed)))
value, packed = split(packed, value_size)
if link_type == 0:
return LinkByIPv4.unpack(value), packed
elif link_type == 1:
return LinkByIPv6.unpack(value), packed
elif link_type == 2:
return LinkByFingerprint(value), packed
elif link_type == 3:
return LinkByEd25519(value), packed
else:
return LinkSpecifier(link_type, value), packed # unrecognized type
def pack(self):
cell = bytearray()
cell += Size.CHAR.pack(self.type)
cell += Size.CHAR.pack(len(self.value))
cell += self.value
return bytes(cell)
class LinkByIPv4(LinkSpecifier):
"""
TLS connection to an IPv4 address.
.. versionadded:: 1.8.0
:var str address: relay IPv4 address
:var int port: relay ORPort
"""
def __init__(self, address, port):
super(LinkByIPv4, self).__init__(0, _pack_ipv4_address(address) + Size.SHORT.pack(port))
self.address = address
self.port = port
@staticmethod
def unpack(value):
if len(value) != 6:
raise ValueError('IPv4 link specifiers should be six bytes, but was %i instead: %s' % (len(value), binascii.hexlify(value)))
addr, port = split(value, 4)
return LinkByIPv4(_unpack_ipv4_address(addr), Size.SHORT.unpack(port))
class LinkByIPv6(LinkSpecifier):
"""
TLS connection to an IPv6 address.
.. versionadded:: 1.8.0
:var str address: relay IPv6 address
:var int port: relay ORPort
"""
def __init__(self, address, port):
super(LinkByIPv6, self).__init__(1, _pack_ipv6_address(address) + Size.SHORT.pack(port))
self.address = address
self.port = port
@staticmethod
def unpack(value):
if len(value) != 18:
raise ValueError('IPv6 link specifiers should be eighteen bytes, but was %i instead: %s' % (len(value), binascii.hexlify(value)))
addr, port = split(value, 16)
return LinkByIPv6(_unpack_ipv6_address(addr), Size.SHORT.unpack(port))
class LinkByFingerprint(LinkSpecifier):
"""
Connection to a SHA1 identity fingerprint.
.. versionadded:: 1.8.0
:var str fingerprint: relay sha1 fingerprint
"""
def __init__(self, value):
super(LinkByFingerprint, self).__init__(2, value)
if len(value) != 20:
raise ValueError('Fingerprint link specifiers should be twenty bytes, but was %i instead: %s' % (len(value), binascii.hexlify(value)))
self.fingerprint = stem.util.str_tools._to_unicode(value)
class LinkByEd25519(LinkSpecifier):
"""
Connection to a Ed25519 identity fingerprint.
.. versionadded:: 1.8.0
:var str fingerprint: relay ed25519 fingerprint
"""
def __init__(self, value):
super(LinkByEd25519, self).__init__(3, value)
if len(value) != 32:
raise ValueError('Fingerprint link specifiers should be thirty two bytes, but was %i instead: %s' % (len(value), binascii.hexlify(value)))
self.fingerprint = stem.util.str_tools._to_unicode(value)
class KDF(collections.namedtuple('KDF', ['key_hash', 'forward_digest', 'backward_digest', 'forward_key', 'backward_key'])):
"""
Computed KDF-TOR derived values for TAP, CREATE_FAST handshakes, and hidden
service protocols as defined tor-spec section 5.2.1.
:var bytes key_hash: hash that proves knowledge of our shared key
:var bytes forward_digest: forward digest hash seed
:var bytes backward_digest: backward digest hash seed
:var bytes forward_key: forward encryption key
:var bytes backward_key: backward encryption key
"""
@staticmethod
def from_value(key_material):
# Derived key material, as per...
#
# K = H(K0 | [00]) | H(K0 | [01]) | H(K0 | [02]) | ...
derived_key = b''
counter = 0
while len(derived_key) < KEY_LEN * 2 + HASH_LEN * 3:
derived_key += hashlib.sha1(key_material + Size.CHAR.pack(counter)).digest()
counter += 1
key_hash, derived_key = split(derived_key, HASH_LEN)
forward_digest, derived_key = split(derived_key, HASH_LEN)
backward_digest, derived_key = split(derived_key, HASH_LEN)
forward_key, derived_key = split(derived_key, KEY_LEN)
backward_key, derived_key = split(derived_key, KEY_LEN)
return KDF(key_hash, forward_digest, backward_digest, forward_key, backward_key)
def _pack_ipv4_address(address):
return b''.join([Size.CHAR.pack(int(v)) for v in address.split('.')])
def _unpack_ipv4_address(value):
return '.'.join([str(Size.CHAR.unpack(value[i:i + 1])) for i in range(4)])
def _pack_ipv6_address(address):
return b''.join([Size.SHORT.pack(int(v, 16)) for v in address.split(':')])
def _unpack_ipv6_address(value):
return ':'.join(['%04x' % Size.SHORT.unpack(value[i * 2:(i + 1) * 2]) for i in range(8)])
setattr(Size, 'CHAR', Size('CHAR', 1, '!B'))
setattr(Size, 'SHORT', Size('SHORT', 2, '!H'))
setattr(Size, 'LONG', Size('LONG', 4, '!L'))
setattr(Size, 'LONG_LONG', Size('LONG_LONG', 8, '!Q'))
stem-1.8.0/stem/client/__init__.py 0000664 0001750 0001750 00000032222 13501272761 017556 0 ustar atagar atagar 0000000 0000000 # Copyright 2018-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Interaction with a Tor relay's ORPort. :class:`~stem.client.Relay` is
a wrapper for :class:`~stem.socket.RelaySocket`, much the same way as
:class:`~stem.control.Controller` provides higher level functions for
:class:`~stem.socket.ControlSocket`.
.. versionadded:: 1.7.0
::
Relay - Connection with a tor relay's ORPort.
| +- connect - Establishes a connection with a relay.
|
|- is_alive - reports if our connection is open or closed
|- connection_time - time when we last connected or disconnected
|- close - shuts down our connection
|
+- create_circuit - establishes a new circuit
Circuit - Circuit we've established through a relay.
|- send - sends a message through this circuit
+- close - closes this circuit
"""
import hashlib
import threading
import stem
import stem.client.cell
import stem.socket
import stem.util.connection
from stem.client.cell import (
CELL_TYPE_SIZE,
FIXED_PAYLOAD_LEN,
Cell,
)
from stem.client.datatype import (
ZERO,
Address,
KDF,
LinkProtocol,
RelayCommand,
split,
)
__all__ = [
'cell',
'datatype',
]
DEFAULT_LINK_PROTOCOLS = (3, 4, 5)
class Relay(object):
"""
Connection with a Tor relay's ORPort.
:var int link_protocol: link protocol version we established
"""
def __init__(self, orport, link_protocol):
# TODO: Python 3.x adds a getbuffer() method which
# lets us get the size...
#
# https://stackoverflow.com/questions/26827055/python-how-to-get-iobytes-allocated-memory-length
#
# When we drop python 2.x support we should replace
# self._orport_buffer with an io.BytesIO.
self.link_protocol = LinkProtocol(link_protocol)
self._orport = orport
self._orport_buffer = b'' # unread bytes
self._orport_lock = threading.RLock()
self._circuits = {}
@staticmethod
def connect(address, port, link_protocols = DEFAULT_LINK_PROTOCOLS):
"""
Establishes a connection with the given ORPort.
:param str address: ip address of the relay
:param int port: ORPort of the relay
:param tuple link_protocols: acceptable link protocol versions
:raises:
* **ValueError** if address or port are invalid
* :class:`stem.SocketError` if we're unable to establish a connection
"""
relay_addr = Address(address)
if not stem.util.connection.is_valid_port(port):
raise ValueError("'%s' isn't a valid port" % port)
elif not link_protocols:
raise ValueError("Connection can't be established without a link protocol.")
try:
conn = stem.socket.RelaySocket(address, port)
except stem.SocketError as exc:
if 'Connection refused' in str(exc):
raise stem.SocketError("Failed to connect to %s:%i. Maybe it isn't an ORPort?" % (address, port))
# If not an ORPort (for instance, mistakenly connecting to a ControlPort
# instead) we'll likely fail during SSL negotiation. This can result
# in a variety of responses so normalizing what we can...
#
# Debian 9.5: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:661)
# Ubuntu 16.04: [SSL: UNKNOWN_PROTOCOL] unknown protocol (_ssl.c:590)
# Ubuntu 12.04: [Errno 1] _ssl.c:504: error:140770FC:SSL routines:SSL23_GET_SERVER_HELLO:unknown protocol
if 'unknown protocol' in str(exc) or 'wrong version number' in str(exc):
raise stem.SocketError("Failed to SSL authenticate to %s:%i. Maybe it isn't an ORPort?" % (address, port))
raise
# To negotiate our link protocol the first VERSIONS cell is expected to use
# a circuit ID field size from protocol version 1-3 for backward
# compatibility...
#
# The first VERSIONS cell, and any cells sent before the
# first VERSIONS cell, always have CIRCID_LEN == 2 for backward
# compatibility.
conn.send(stem.client.cell.VersionsCell(link_protocols).pack(2))
response = conn.recv()
# Link negotiation ends right away if we lack a common protocol
# version. (#25139)
if not response:
conn.close()
raise stem.SocketError('Unable to establish a common link protocol with %s:%i' % (address, port))
versions_reply = stem.client.cell.Cell.pop(response, 2)[0]
common_protocols = set(link_protocols).intersection(versions_reply.versions)
if not common_protocols:
conn.close()
raise stem.SocketError('Unable to find a common link protocol. We support %s but %s:%i supports %s.' % (', '.join(link_protocols), address, port, ', '.join(versions_reply.versions)))
# Establishing connections requires sending a NETINFO, but including our
# address is optional. We can revisit including it when we have a usecase
# where it would help.
link_protocol = max(common_protocols)
conn.send(stem.client.cell.NetinfoCell(relay_addr, []).pack(link_protocol))
return Relay(conn, link_protocol)
def _recv(self, raw = False):
"""
Reads the next cell from our ORPort. If none is present this blocks
until one is available.
:param bool raw: provides bytes rather than parsing as a cell if **True**
:returns: next :class:`~stem.client.cell.Cell`
"""
with self._orport_lock:
# cells begin with [circ_id][cell_type][...]
circ_id_size = self.link_protocol.circ_id_size.size
while len(self._orport_buffer) < (circ_id_size + CELL_TYPE_SIZE.size):
self._orport_buffer += self._orport.recv() # read until we know the cell type
cell_type = Cell.by_value(CELL_TYPE_SIZE.pop(self._orport_buffer[circ_id_size:])[0])
if cell_type.IS_FIXED_SIZE:
cell_size = circ_id_size + CELL_TYPE_SIZE.size + FIXED_PAYLOAD_LEN
else:
# variable length, our next field is the payload size
while len(self._orport_buffer) < (circ_id_size + CELL_TYPE_SIZE.size + FIXED_PAYLOAD_LEN.size):
self._orport_buffer += self._orport.recv() # read until we know the cell size
payload_len = FIXED_PAYLOAD_LEN.pop(self._orport_buffer[circ_id_size + CELL_TYPE_SIZE.size:])[0]
cell_size = circ_id_size + CELL_TYPE_SIZE.size + FIXED_PAYLOAD_LEN.size + payload_len
while len(self._orport_buffer) < cell_size:
self._orport_buffer += self._orport.recv() # read until we have the full cell
if raw:
content, self._orport_buffer = split(self._orport_buffer, cell_size)
return content
else:
cell, self._orport_buffer = Cell.pop(self._orport_buffer, self.link_protocol)
return cell
def _msg(self, cell):
"""
Sends a cell on the ORPort and provides the response we receive in reply.
Unfortunately unlike control sockets, ORPorts don't have generalized rules
for predictable message IO. With control sockets...
* Each message we send receives a single reply.
* We may also receive asynchronous events marked with a 650 status.
ORPorts by contrast receive variable length cells with differing rules on
their arrival. As such making a best effort attempt at a send-and-receive
method in which we do the following...
* Discard any existing unread data from the socket.
* Send our request.
* Await up to a second for a reply.
It's quite possible this is a stupid approach. If so, patches welcome.
:param stem.client.cell.Cell cell: cell to be sent
:returns: **generator** with the cells received in reply
"""
self._orport.recv(timeout = 0) # discard unread data
self._orport.send(cell.pack(self.link_protocol))
response = self._orport.recv(timeout = 1)
for received_cell in stem.client.cell.Cell.pop(response, self.link_protocol):
yield received_cell
def is_alive(self):
"""
Checks if our socket is currently connected. This is a pass-through for our
socket's :func:`~stem.socket.BaseSocket.is_alive` method.
:returns: **bool** that's **True** if our socket is connected and **False** otherwise
"""
return self._orport.is_alive()
def connection_time(self):
"""
Provides the unix timestamp for when our socket was either connected or
disconnected. That is to say, the time we connected if we're currently
connected and the time we disconnected if we're not connected.
:returns: **float** for when we last connected or disconnected, zero if
we've never connected
"""
return self._orport.connection_time()
def close(self):
"""
Closes our socket connection. This is a pass-through for our socket's
:func:`~stem.socket.BaseSocket.close` method.
"""
with self._orport_lock:
return self._orport.close()
def create_circuit(self):
"""
Establishes a new circuit.
"""
with self._orport_lock:
circ_id = max(self._circuits) + 1 if self._circuits else self.link_protocol.first_circ_id
create_fast_cell = stem.client.cell.CreateFastCell(circ_id)
created_fast_cell = None
for cell in self._msg(create_fast_cell):
if isinstance(cell, stem.client.cell.CreatedFastCell):
created_fast_cell = cell
break
if not created_fast_cell:
raise ValueError('We should get a CREATED_FAST response from a CREATE_FAST request')
kdf = KDF.from_value(create_fast_cell.key_material + created_fast_cell.key_material)
if created_fast_cell.derivative_key != kdf.key_hash:
raise ValueError('Remote failed to prove that it knows our shared key')
circ = Circuit(self, circ_id, kdf)
self._circuits[circ.id] = circ
return circ
def __iter__(self):
with self._orport_lock:
for circ in self._circuits.values():
yield circ
def __enter__(self):
return self
def __exit__(self, exit_type, value, traceback):
self.close()
class Circuit(object):
"""
Circuit through which requests can be made of a `Tor relay's ORPort
`_.
:var stem.client.Relay relay: relay through which this circuit has been established
:var int id: circuit id
:var hashlib.sha1 forward_digest: digest for forward integrity check
:var hashlib.sha1 backward_digest: digest for backward integrity check
:var bytes forward_key: forward encryption key
:var bytes backward_key: backward encryption key
"""
def __init__(self, relay, circ_id, kdf):
if not stem.prereq.is_crypto_available():
raise ImportError('Circuit construction requires the cryptography module')
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
ctr = modes.CTR(ZERO * (algorithms.AES.block_size // 8))
self.relay = relay
self.id = circ_id
self.forward_digest = hashlib.sha1(kdf.forward_digest)
self.backward_digest = hashlib.sha1(kdf.backward_digest)
self.forward_key = Cipher(algorithms.AES(kdf.forward_key), ctr, default_backend()).encryptor()
self.backward_key = Cipher(algorithms.AES(kdf.backward_key), ctr, default_backend()).decryptor()
def directory(self, request, stream_id = 0):
"""
Request descriptors from the relay.
:param str request: directory request to make
:param int stream_id: specific stream this concerns
:returns: **str** with the requested descriptor data
"""
with self.relay._orport_lock:
self._send(RelayCommand.BEGIN_DIR, stream_id = stream_id)
self._send(RelayCommand.DATA, request, stream_id = stream_id)
response = []
while True:
# Decrypt relay cells received in response. Our digest/key only
# updates when handled successfully.
encrypted_cell = self.relay._recv(raw = True)
decrypted_cell, backward_key, backward_digest = stem.client.cell.RelayCell.decrypt(self.relay.link_protocol, encrypted_cell, self.backward_key, self.backward_digest)
if self.id != decrypted_cell.circ_id:
raise stem.ProtocolError('Response should be for circuit id %i, not %i' % (self.id, decrypted_cell.circ_id))
self.backward_digest = backward_digest
self.backward_key = backward_key
if decrypted_cell.command == RelayCommand.END:
return b''.join([cell.data for cell in response])
else:
response.append(decrypted_cell)
def _send(self, command, data = '', stream_id = 0):
"""
Sends a message over the circuit.
:param stem.client.datatype.RelayCommand command: command to be issued
:param bytes data: message payload
:param int stream_id: specific stream this concerns
"""
with self.relay._orport_lock:
# Encrypt and send the cell. Our digest/key only updates if the cell is
# successfully sent.
cell = stem.client.cell.RelayCell(self.id, command, data, stream_id = stream_id)
payload, forward_key, forward_digest = cell.encrypt(self.relay.link_protocol, self.forward_key, self.forward_digest)
self.relay._orport.send(payload)
self.forward_digest = forward_digest
self.forward_key = forward_key
def close(self):
with self.relay._orport_lock:
self.relay._orport.send(stem.client.cell.DestroyCell(self.id).pack(self.relay.link_protocol))
del self.relay._circuits[self.id]
def __enter__(self):
return self
def __exit__(self, exit_type, value, traceback):
self.close()
stem-1.8.0/stem/client/cell.py 0000664 0001750 0001750 00000065275 13501272761 016754 0 ustar atagar atagar 0000000 0000000 # Copyright 2018-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Messages communicated over a Tor relay's ORPort.
.. versionadded:: 1.7.0
**Module Overview:**
::
Cell - Base class for ORPort messages.
|- CircuitCell - Circuit management.
| |- CreateCell - Create a circuit. (section 5.1)
| |- CreatedCell - Acknowledge create. (section 5.1)
| |- RelayCell - End-to-end data. (section 6.1)
| |- DestroyCell - Stop using a circuit. (section 5.4)
| |- CreateFastCell - Create a circuit, no PK. (section 5.1)
| |- CreatedFastCell - Circuit created, no PK. (section 5.1)
| |- RelayEarlyCell - End-to-end data; limited. (section 5.6)
| |- Create2Cell - Extended CREATE cell. (section 5.1)
| +- Created2Cell - Extended CREATED cell. (section 5.1)
|
|- PaddingCell - Padding negotiation. (section 7.2)
|- VersionsCell - Negotiate proto version. (section 4)
|- NetinfoCell - Time and address info. (section 4.5)
|- PaddingNegotiateCell - Padding negotiation. (section 7.2)
|- VPaddingCell - Variable-length padding. (section 7.2)
|- CertsCell - Relay certificates. (section 4.2)
|- AuthChallengeCell - Challenge value. (section 4.3)
|- AuthenticateCell - Client authentication. (section 4.5)
|- AuthorizeCell - Client authorization. (not yet used)
|
|- pack - encodes cell into bytes
|- unpack - decodes series of cells
+- pop - decodes cell with remainder
"""
import copy
import datetime
import inspect
import os
import sys
import stem.util
from stem import UNDEFINED
from stem.client.datatype import HASH_LEN, ZERO, LinkProtocol, Address, Certificate, CloseReason, RelayCommand, Size, split
from stem.util import datetime_to_unix, str_tools
FIXED_PAYLOAD_LEN = 509 # PAYLOAD_LEN, per tor-spec section 0.2
AUTH_CHALLENGE_SIZE = 32
CELL_TYPE_SIZE = Size.CHAR
PAYLOAD_LEN_SIZE = Size.SHORT
RELAY_DIGEST_SIZE = Size.LONG
STREAM_ID_REQUIRED = (
RelayCommand.BEGIN,
RelayCommand.DATA,
RelayCommand.END,
RelayCommand.CONNECTED,
RelayCommand.RESOLVE,
RelayCommand.RESOLVED,
RelayCommand.BEGIN_DIR,
)
STREAM_ID_DISALLOWED = (
RelayCommand.EXTEND,
RelayCommand.EXTENDED,
RelayCommand.TRUNCATE,
RelayCommand.TRUNCATED,
RelayCommand.DROP,
RelayCommand.EXTEND2,
RelayCommand.EXTENDED2,
)
class Cell(object):
"""
Metadata for ORPort cells.
Unused padding are **not** used in equality checks or hashing. If two cells
differ only in their *unused* attribute they are functionally equal.
The following cell types explicitly don't have *unused* content:
* PaddingCell (we consider all content part of payload)
* VersionsCell (all content is unpacked and treated as a version specification)
* VPaddingCell (we consider all content part of payload)
:var bytes unused: unused filler that padded the cell to the expected size
"""
NAME = 'UNKNOWN'
VALUE = -1
IS_FIXED_SIZE = False
def __init__(self, unused = b''):
super(Cell, self).__init__()
self.unused = unused
@staticmethod
def by_name(name):
"""
Provides cell attributes by its name.
:param str name: cell command to fetch
:raises: **ValueError** if cell type is invalid
"""
for _, cls in inspect.getmembers(sys.modules[__name__]):
if name == getattr(cls, 'NAME', UNDEFINED):
return cls
raise ValueError("'%s' isn't a valid cell type" % name)
@staticmethod
def by_value(value):
"""
Provides cell attributes by its value.
:param int value: cell value to fetch
:raises: **ValueError** if cell type is invalid
"""
for _, cls in inspect.getmembers(sys.modules[__name__]):
if value == getattr(cls, 'VALUE', UNDEFINED):
return cls
raise ValueError("'%s' isn't a valid cell value" % value)
def pack(self, link_protocol):
raise NotImplementedError('Packing not yet implemented for %s cells' % type(self).NAME)
@staticmethod
def unpack(content, link_protocol):
"""
Unpacks all cells from a response.
:param bytes content: payload to decode
:param int link_protocol: link protocol version
:returns: :class:`~stem.client.cell.Cell` generator
:raises:
* ValueError if content is malformed
* NotImplementedError if unable to unpack any of the cell types
"""
while content:
cell, content = Cell.pop(content, link_protocol)
yield cell
@staticmethod
def pop(content, link_protocol):
"""
Unpacks the first cell.
:param bytes content: payload to decode
:param int link_protocol: link protocol version
:returns: (:class:`~stem.client.cell.Cell`, remainder) tuple
:raises:
* ValueError if content is malformed
* NotImplementedError if unable to unpack this cell type
"""
link_protocol = LinkProtocol(link_protocol)
circ_id, content = link_protocol.circ_id_size.pop(content)
command, content = CELL_TYPE_SIZE.pop(content)
cls = Cell.by_value(command)
if cls.IS_FIXED_SIZE:
payload_len = FIXED_PAYLOAD_LEN
else:
payload_len, content = PAYLOAD_LEN_SIZE.pop(content)
if len(content) < payload_len:
raise ValueError('%s cell should have a payload of %i bytes, but only had %i' % (cls.NAME, payload_len, len(content)))
payload, content = split(content, payload_len)
return cls._unpack(payload, circ_id, link_protocol), content
@classmethod
def _pack(cls, link_protocol, payload, unused = b'', circ_id = None):
"""
Provides bytes that can be used on the wire for these cell attributes.
Format of a properly packed cell depends on if it's fixed or variable
sized...
::
Fixed: [ CircuitID ][ Command ][ Payload ][ Padding ]
Variable: [ CircuitID ][ Command ][ Size ][ Payload ]
:param str name: cell command
:param int link_protocol: link protocol version
:param bytes payload: cell payload
:param int circ_id: circuit id, if a CircuitCell
:returns: **bytes** with the encoded payload
:raises: **ValueError** if cell type invalid or payload makes cell too large
"""
if issubclass(cls, CircuitCell):
if circ_id is None:
raise ValueError('%s cells require a circuit identifier' % cls.NAME)
elif circ_id < 1:
raise ValueError('Circuit identifiers must a positive integer, not %s' % circ_id)
else:
if circ_id is not None:
raise ValueError('%s cells should not specify a circuit identifier' % cls.NAME)
circ_id = 0 # cell doesn't concern a circuit, default field to zero
link_protocol = LinkProtocol(link_protocol)
cell = bytearray()
cell += link_protocol.circ_id_size.pack(circ_id)
cell += Size.CHAR.pack(cls.VALUE)
cell += b'' if cls.IS_FIXED_SIZE else Size.SHORT.pack(len(payload) + len(unused))
cell += payload
# include the unused portion (typically from unpacking)
cell += unused
# pad fixed sized cells to the required length
if cls.IS_FIXED_SIZE:
if len(cell) > link_protocol.fixed_cell_length:
raise ValueError('Cell of type %s is too large (%i bytes), must not be more than %i. Check payload size (was %i bytes)' % (cls.NAME, len(cell), link_protocol.fixed_cell_length, len(payload)))
cell += ZERO * (link_protocol.fixed_cell_length - len(cell))
return bytes(cell)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
"""
Subclass implementation for unpacking cell content.
:param bytes content: payload to decode
:param stem.client.datatype.LinkProtocol link_protocol: link protocol version
:param int circ_id: circuit id cell is for
:returns: instance of this cell type
:raises: **ValueError** if content is malformed
"""
raise NotImplementedError('Unpacking not yet implemented for %s cells' % cls.NAME)
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, Cell) else False
def __ne__(self, other):
return not self == other
class CircuitCell(Cell):
"""
Cell concerning circuits.
:var int circ_id: circuit id
"""
def __init__(self, circ_id, unused = b''):
super(CircuitCell, self).__init__(unused)
self.circ_id = circ_id
class PaddingCell(Cell):
"""
Randomized content to either keep activity going on a circuit.
:var bytes payload: randomized payload
"""
NAME = 'PADDING'
VALUE = 0
IS_FIXED_SIZE = True
def __init__(self, payload = None):
if not payload:
payload = os.urandom(FIXED_PAYLOAD_LEN)
elif len(payload) != FIXED_PAYLOAD_LEN:
raise ValueError('Padding payload should be %i bytes, but was %i' % (FIXED_PAYLOAD_LEN, len(payload)))
super(PaddingCell, self).__init__()
self.payload = payload
def pack(self, link_protocol):
return PaddingCell._pack(link_protocol, self.payload)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
return PaddingCell(content)
def __hash__(self):
return stem.util._hash_attr(self, 'payload', cache = True)
class CreateCell(CircuitCell):
NAME = 'CREATE'
VALUE = 1
IS_FIXED_SIZE = True
def __init__(self):
super(CreateCell, self).__init__() # TODO: implement
class CreatedCell(CircuitCell):
NAME = 'CREATED'
VALUE = 2
IS_FIXED_SIZE = True
def __init__(self):
super(CreatedCell, self).__init__() # TODO: implement
class RelayCell(CircuitCell):
"""
Command concerning a relay circuit.
Our 'recognized' attribute provides a cheap (but incomplete) check for if our
cell payload is encrypted. If non-zero our payload *IS* encrypted, but if
zero we're *PROBABLY* fully decrypted. This uncertainty is because encrypted
cells have a small chance of coincidently producing zero for this value as
well.
:var stem.client.RelayCommand command: command to be issued
:var int command_int: integer value of our command
:var bytes data: payload of the cell
:var int recognized: non-zero if payload is encrypted
:var int digest: running digest held with the relay
:var int stream_id: specific stream this concerns
"""
NAME = 'RELAY'
VALUE = 3
IS_FIXED_SIZE = True
def __init__(self, circ_id, command, data, digest = 0, stream_id = 0, recognized = 0, unused = b''):
if 'hash' in str(type(digest)).lower():
# Unfortunately hashlib generates from a dynamic private class so
# isinstance() isn't such a great option. With python2/python3 the
# name is 'hashlib.HASH' whereas PyPy calls it just 'HASH' or 'Hash'.
digest_packed = digest.digest()[:RELAY_DIGEST_SIZE.size]
digest = RELAY_DIGEST_SIZE.unpack(digest_packed)
elif stem.util._is_str(digest):
digest_packed = digest[:RELAY_DIGEST_SIZE.size]
digest = RELAY_DIGEST_SIZE.unpack(digest_packed)
elif stem.util._is_int(digest):
pass
else:
raise ValueError('RELAY cell digest must be a hash, string, or int but was a %s' % type(digest).__name__)
super(RelayCell, self).__init__(circ_id, unused)
self.command, self.command_int = RelayCommand.get(command)
self.recognized = recognized
self.stream_id = stream_id
self.digest = digest
self.data = str_tools._to_bytes(data)
if digest == 0:
if not stream_id and self.command in STREAM_ID_REQUIRED:
raise ValueError('%s relay cells require a stream id' % self.command)
elif stream_id and self.command in STREAM_ID_DISALLOWED:
raise ValueError('%s relay cells concern the circuit itself and cannot have a stream id' % self.command)
def pack(self, link_protocol):
payload = bytearray()
payload += Size.CHAR.pack(self.command_int)
payload += Size.SHORT.pack(self.recognized)
payload += Size.SHORT.pack(self.stream_id)
payload += Size.LONG.pack(self.digest)
payload += Size.SHORT.pack(len(self.data))
payload += self.data
return RelayCell._pack(link_protocol, bytes(payload), self.unused, self.circ_id)
@staticmethod
def decrypt(link_protocol, content, key, digest):
"""
Decrypts content as a relay cell addressed to us. This provides back a
tuple of the form...
::
(cell (RelayCell), new_key (CipherContext), new_digest (HASH))
:param int link_protocol: link protocol version
:param bytes content: cell content to be decrypted
:param cryptography.hazmat.primitives.ciphers.CipherContext key:
key established with the relay we received this cell from
:param hashlib.HASH digest: running digest held with the relay
:returns: **tuple** with our decrypted cell and updated key/digest
:raises: :class:`stem.ProtocolError` if content doesn't belong to a relay
cell
"""
new_key = copy.copy(key)
new_digest = digest.copy()
if len(content) != link_protocol.fixed_cell_length:
raise stem.ProtocolError('RELAY cells should be %i bytes, but received %i' % (link_protocol.fixed_cell_length, len(content)))
circ_id, content = link_protocol.circ_id_size.pop(content)
command, encrypted_payload = Size.CHAR.pop(content)
if command != RelayCell.VALUE:
raise stem.ProtocolError('Cannot decrypt as a RELAY cell. This had command %i instead.' % command)
payload = new_key.update(encrypted_payload)
cell = RelayCell._unpack(payload, circ_id, link_protocol)
# TODO: Implement our decryption digest. It is used to support relaying
# within multi-hop circuits. On first glance this should go something
# like...
#
# # Our updated digest is calculated based on this cell with a blanked
# # digest field.
#
# digest_cell = RelayCell(self.circ_id, self.command, self.data, 0, self.stream_id, self.recognized, self.unused)
# new_digest.update(digest_cell.pack(link_protocol))
#
# is_encrypted == cell.recognized != 0 or self.digest == new_digest
#
# ... or something like that. Until we attempt to support relaying this is
# both moot and difficult to exercise in order to ensure we get it right.
return cell, new_key, new_digest
def encrypt(self, link_protocol, key, digest):
"""
Encrypts our cell content to be sent with the given key. This provides back
a tuple of the form...
::
(payload (bytes), new_key (CipherContext), new_digest (HASH))
:param int link_protocol: link protocol version
:param cryptography.hazmat.primitives.ciphers.CipherContext key:
key established with the relay we're sending this cell to
:param hashlib.HASH digest: running digest held with the relay
:returns: **tuple** with our encrypted payload and updated key/digest
"""
new_key = copy.copy(key)
new_digest = digest.copy()
# Digests are computed from our payload, not including our header's circuit
# id (2 or 4 bytes) and command (1 byte).
header_size = link_protocol.circ_id_size.size + 1
payload_without_digest = self.pack(link_protocol)[header_size:]
new_digest.update(payload_without_digest)
# Pack a copy of ourselves with our newly calculated digest, and encrypt
# the payload. Header remains plaintext.
cell = RelayCell(self.circ_id, self.command, self.data, new_digest, self.stream_id, self.recognized, self.unused)
header, payload = split(cell.pack(link_protocol), header_size)
return header + new_key.update(payload), new_key, new_digest
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
command, content = Size.CHAR.pop(content)
recognized, content = Size.SHORT.pop(content) # 'recognized' field
stream_id, content = Size.SHORT.pop(content)
digest, content = Size.LONG.pop(content)
data_len, content = Size.SHORT.pop(content)
data, unused = split(content, data_len)
if len(data) != data_len:
raise ValueError('%s cell said it had %i bytes of data, but only had %i' % (cls.NAME, data_len, len(data)))
return RelayCell(circ_id, command, data, digest, stream_id, recognized, unused)
def __hash__(self):
return stem.util._hash_attr(self, 'command_int', 'stream_id', 'digest', 'data', cache = True)
class DestroyCell(CircuitCell):
"""
Closes the given circuit.
:var stem.client.CloseReason reason: reason the circuit is being closed
:var int reason_int: integer value of our closure reason
"""
NAME = 'DESTROY'
VALUE = 4
IS_FIXED_SIZE = True
def __init__(self, circ_id, reason = CloseReason.NONE, unused = b''):
super(DestroyCell, self).__init__(circ_id, unused)
self.reason, self.reason_int = CloseReason.get(reason)
def pack(self, link_protocol):
return DestroyCell._pack(link_protocol, Size.CHAR.pack(self.reason_int), self.unused, self.circ_id)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
reason, unused = Size.CHAR.pop(content)
return DestroyCell(circ_id, reason, unused)
def __hash__(self):
return stem.util._hash_attr(self, 'circ_id', 'reason_int', cache = True)
class CreateFastCell(CircuitCell):
"""
Create a circuit with our first hop. This is lighter weight than further hops
because we've already established the relay's identity and secret key.
:var bytes key_material: randomized key material
"""
NAME = 'CREATE_FAST'
VALUE = 5
IS_FIXED_SIZE = True
def __init__(self, circ_id, key_material = None, unused = b''):
if not key_material:
key_material = os.urandom(HASH_LEN)
elif len(key_material) != HASH_LEN:
raise ValueError('Key material should be %i bytes, but was %i' % (HASH_LEN, len(key_material)))
super(CreateFastCell, self).__init__(circ_id, unused)
self.key_material = key_material
def pack(self, link_protocol):
return CreateFastCell._pack(link_protocol, self.key_material, self.unused, self.circ_id)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
key_material, unused = split(content, HASH_LEN)
if len(key_material) != HASH_LEN:
raise ValueError('Key material should be %i bytes, but was %i' % (HASH_LEN, len(key_material)))
return CreateFastCell(circ_id, key_material, unused)
def __hash__(self):
return stem.util._hash_attr(self, 'circ_id', 'key_material', cache = True)
class CreatedFastCell(CircuitCell):
"""
CREATE_FAST reply.
:var bytes key_material: randomized key material
:var bytes derivative_key: hash proving the relay knows our shared key
"""
NAME = 'CREATED_FAST'
VALUE = 6
IS_FIXED_SIZE = True
def __init__(self, circ_id, derivative_key, key_material = None, unused = b''):
if not key_material:
key_material = os.urandom(HASH_LEN)
elif len(key_material) != HASH_LEN:
raise ValueError('Key material should be %i bytes, but was %i' % (HASH_LEN, len(key_material)))
if len(derivative_key) != HASH_LEN:
raise ValueError('Derivatived key should be %i bytes, but was %i' % (HASH_LEN, len(derivative_key)))
super(CreatedFastCell, self).__init__(circ_id, unused)
self.key_material = key_material
self.derivative_key = derivative_key
def pack(self, link_protocol):
return CreatedFastCell._pack(link_protocol, self.key_material + self.derivative_key, self.unused, self.circ_id)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
if len(content) < HASH_LEN * 2:
raise ValueError('Key material and derivatived key should be %i bytes, but was %i' % (HASH_LEN * 2, len(content)))
key_material, content = split(content, HASH_LEN)
derivative_key, content = split(content, HASH_LEN)
return CreatedFastCell(circ_id, derivative_key, key_material, content)
def __hash__(self):
return stem.util._hash_attr(self, 'circ_id', 'derivative_key', 'key_material', cache = True)
class VersionsCell(Cell):
"""
Link version negotiation cell.
:var list versions: link versions
"""
NAME = 'VERSIONS'
VALUE = 7
IS_FIXED_SIZE = False
def __init__(self, versions):
super(VersionsCell, self).__init__()
self.versions = versions
def pack(self, link_protocol):
payload = b''.join([Size.SHORT.pack(v) for v in self.versions])
return VersionsCell._pack(link_protocol, payload)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
link_protocols = []
while content:
version, content = Size.SHORT.pop(content)
link_protocols.append(version)
return VersionsCell(link_protocols)
def __hash__(self):
return stem.util._hash_attr(self, 'versions', cache = True)
class NetinfoCell(Cell):
"""
Information relays exchange about each other.
:var datetime timestamp: current time
:var stem.client.datatype.Address receiver_address: receiver's OR address
:var list sender_addresses: sender's OR addresses
"""
NAME = 'NETINFO'
VALUE = 8
IS_FIXED_SIZE = True
def __init__(self, receiver_address, sender_addresses, timestamp = None, unused = b''):
super(NetinfoCell, self).__init__(unused)
self.timestamp = timestamp if timestamp else datetime.datetime.now()
self.receiver_address = receiver_address
self.sender_addresses = sender_addresses
def pack(self, link_protocol):
payload = bytearray()
payload += Size.LONG.pack(int(datetime_to_unix(self.timestamp)))
payload += self.receiver_address.pack()
payload += Size.CHAR.pack(len(self.sender_addresses))
for addr in self.sender_addresses:
payload += addr.pack()
return NetinfoCell._pack(link_protocol, bytes(payload), self.unused)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
timestamp, content = Size.LONG.pop(content)
receiver_address, content = Address.pop(content)
sender_addresses = []
sender_addr_count, content = Size.CHAR.pop(content)
for i in range(sender_addr_count):
addr, content = Address.pop(content)
sender_addresses.append(addr)
return NetinfoCell(receiver_address, sender_addresses, datetime.datetime.utcfromtimestamp(timestamp), unused = content)
def __hash__(self):
return stem.util._hash_attr(self, 'timestamp', 'receiver_address', 'sender_addresses', cache = True)
class RelayEarlyCell(CircuitCell):
NAME = 'RELAY_EARLY'
VALUE = 9
IS_FIXED_SIZE = True
def __init__(self):
super(RelayEarlyCell, self).__init__() # TODO: implement
class Create2Cell(CircuitCell):
NAME = 'CREATE2'
VALUE = 10
IS_FIXED_SIZE = True
def __init__(self):
super(Create2Cell, self).__init__() # TODO: implement
class Created2Cell(Cell):
NAME = 'CREATED2'
VALUE = 11
IS_FIXED_SIZE = True
def __init__(self):
super(Created2Cell, self).__init__() # TODO: implement
class PaddingNegotiateCell(Cell):
NAME = 'PADDING_NEGOTIATE'
VALUE = 12
IS_FIXED_SIZE = True
def __init__(self):
super(PaddingNegotiateCell, self).__init__() # TODO: implement
class VPaddingCell(Cell):
"""
Variable length randomized content to either keep activity going on a circuit.
:var bytes payload: randomized payload
"""
NAME = 'VPADDING'
VALUE = 128
IS_FIXED_SIZE = False
def __init__(self, size = None, payload = None):
if size is None and payload is None:
raise ValueError('VPaddingCell constructor must specify payload or size')
elif size is not None and size < 0:
raise ValueError('VPaddingCell size (%s) cannot be negative' % size)
elif size is not None and payload is not None and size != len(payload):
raise ValueError('VPaddingCell constructor specified both a size of %i bytes and payload of %i bytes' % (size, len(payload)))
super(VPaddingCell, self).__init__()
self.payload = payload if payload is not None else os.urandom(size)
def pack(self, link_protocol):
return VPaddingCell._pack(link_protocol, self.payload)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
return VPaddingCell(payload = content)
def __hash__(self):
return stem.util._hash_attr(self, 'payload', cache = True)
class CertsCell(Cell):
"""
Certificate held by the relay we're communicating with.
:var list certificates: :class:`~stem.client.Certificate` of the relay
"""
NAME = 'CERTS'
VALUE = 129
IS_FIXED_SIZE = False
def __init__(self, certs, unused = b''):
super(CertsCell, self).__init__(unused)
self.certificates = certs
def pack(self, link_protocol):
return CertsCell._pack(link_protocol, Size.CHAR.pack(len(self.certificates)) + b''.join([cert.pack() for cert in self.certificates]), self.unused)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
cert_count, content = Size.CHAR.pop(content)
certs = []
for i in range(cert_count):
if not content:
raise ValueError('CERTS cell indicates it should have %i certificates, but only contained %i' % (cert_count, len(certs)))
cert, content = Certificate.pop(content)
certs.append(cert)
return CertsCell(certs, unused = content)
def __hash__(self):
return stem.util._hash_attr(self, 'certificates', cache = True)
class AuthChallengeCell(Cell):
"""
First step of the authentication handshake.
:var bytes challenge: random bytes for us to sign to authenticate
:var list methods: authentication methods supported by the relay we're
communicating with
"""
NAME = 'AUTH_CHALLENGE'
VALUE = 130
IS_FIXED_SIZE = False
def __init__(self, methods, challenge = None, unused = b''):
if not challenge:
challenge = os.urandom(AUTH_CHALLENGE_SIZE)
elif len(challenge) != AUTH_CHALLENGE_SIZE:
raise ValueError('AUTH_CHALLENGE must be %i bytes, but was %i' % (AUTH_CHALLENGE_SIZE, len(challenge)))
super(AuthChallengeCell, self).__init__(unused)
self.challenge = challenge
self.methods = methods
def pack(self, link_protocol):
payload = bytearray()
payload += self.challenge
payload += Size.SHORT.pack(len(self.methods))
for method in self.methods:
payload += Size.SHORT.pack(method)
return AuthChallengeCell._pack(link_protocol, bytes(payload), self.unused)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
min_size = AUTH_CHALLENGE_SIZE + Size.SHORT.size
if len(content) < min_size:
raise ValueError('AUTH_CHALLENGE payload should be at least %i bytes, but was %i' % (min_size, len(content)))
challenge, content = split(content, AUTH_CHALLENGE_SIZE)
method_count, content = Size.SHORT.pop(content)
if len(content) < method_count * Size.SHORT.size:
raise ValueError('AUTH_CHALLENGE should have %i methods, but only had %i bytes for it' % (method_count, len(content)))
methods = []
for i in range(method_count):
method, content = Size.SHORT.pop(content)
methods.append(method)
return AuthChallengeCell(methods, challenge, unused = content)
def __hash__(self):
return stem.util._hash_attr(self, 'challenge', 'methods', cache = True)
class AuthenticateCell(Cell):
NAME = 'AUTHENTICATE'
VALUE = 131
IS_FIXED_SIZE = False
def __init__(self):
super(AuthenticateCell, self).__init__() # TODO: implement
class AuthorizeCell(Cell):
NAME = 'AUTHORIZE'
VALUE = 132
IS_FIXED_SIZE = False
def __init__(self):
super(AuthorizeCell, self).__init__() # TODO: implement
stem-1.8.0/stem/interpreter/ 0000775 0001750 0001750 00000000000 13602232262 016523 5 ustar atagar atagar 0000000 0000000 stem-1.8.0/stem/interpreter/help.py 0000664 0001750 0001750 00000007303 13501272761 020036 0 ustar atagar atagar 0000000 0000000 # Copyright 2014-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Provides our /help responses.
"""
import stem.prereq
from stem.interpreter import (
STANDARD_OUTPUT,
BOLD_OUTPUT,
ERROR_OUTPUT,
msg,
uses_settings,
)
from stem.util.term import format
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
def response(controller, arg):
"""
Provides our /help response.
:param stem.control.Controller controller: tor control connection
:param str arg: controller or interpreter command to provide help output for
:returns: **str** with our help response
"""
# Normalizing inputs first so we can better cache responses.
return _response(controller, _normalize(arg))
def _normalize(arg):
arg = arg.upper()
# If there's multiple arguments then just take the first. This is
# particularly likely if they're trying to query a full command (for
# instance "/help GETINFO version")
arg = arg.split(' ')[0]
# strip slash if someone enters an interpreter command (ex. "/help /help")
if arg.startswith('/'):
arg = arg[1:]
return arg
@lru_cache()
@uses_settings
def _response(controller, arg, config):
if not arg:
return _general_help()
usage_info = config.get('help.usage', {})
if arg not in usage_info:
return format("No help information available for '%s'..." % arg, *ERROR_OUTPUT)
output = format(usage_info[arg] + '\n', *BOLD_OUTPUT)
description = config.get('help.description.%s' % arg.lower(), '')
for line in description.splitlines():
output += format(' ' + line, *STANDARD_OUTPUT) + '\n'
output += '\n'
if arg == 'GETINFO':
results = controller.get_info('info/names', None)
if results:
for line in results.splitlines():
if ' -- ' in line:
opt, summary = line.split(' -- ', 1)
output += format('%-33s' % opt, *BOLD_OUTPUT)
output += format(' - %s' % summary, *STANDARD_OUTPUT) + '\n'
elif arg == 'GETCONF':
results = controller.get_info('config/names', None)
if results:
options = [opt.split(' ', 1)[0] for opt in results.splitlines()]
for i in range(0, len(options), 2):
line = ''
for entry in options[i:i + 2]:
line += '%-42s' % entry
output += format(line.rstrip(), *STANDARD_OUTPUT) + '\n'
elif arg == 'SIGNAL':
signal_options = config.get('help.signal.options', {})
for signal, summary in signal_options.items():
output += format('%-15s' % signal, *BOLD_OUTPUT)
output += format(' - %s' % summary, *STANDARD_OUTPUT) + '\n'
elif arg == 'SETEVENTS':
results = controller.get_info('events/names', None)
if results:
entries = results.split()
# displays four columns of 20 characters
for i in range(0, len(entries), 4):
line = ''
for entry in entries[i:i + 4]:
line += '%-20s' % entry
output += format(line.rstrip(), *STANDARD_OUTPUT) + '\n'
elif arg == 'USEFEATURE':
results = controller.get_info('features/names', None)
if results:
output += format(results, *STANDARD_OUTPUT) + '\n'
elif arg in ('LOADCONF', 'POSTDESCRIPTOR'):
# gives a warning that this option isn't yet implemented
output += format(msg('msg.multiline_unimplemented_notice'), *ERROR_OUTPUT) + '\n'
return output.rstrip()
def _general_help():
lines = []
for line in msg('help.general').splitlines():
div = line.find(' - ')
if div != -1:
cmd, description = line[:div], line[div:]
lines.append(format(cmd, *BOLD_OUTPUT) + format(description, *STANDARD_OUTPUT))
else:
lines.append(format(line, *BOLD_OUTPUT))
return '\n'.join(lines)
stem-1.8.0/stem/interpreter/arguments.py 0000664 0001750 0001750 00000005365 13501272761 021121 0 ustar atagar atagar 0000000 0000000 # Copyright 2015-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Commandline argument parsing for our interpreter prompt.
"""
import collections
import getopt
import os
import stem.interpreter
import stem.util.connection
DEFAULT_ARGS = {
'control_address': '127.0.0.1',
'control_port': 'default',
'user_provided_port': False,
'control_socket': '/var/run/tor/control',
'user_provided_socket': False,
'tor_path': 'tor',
'run_cmd': None,
'run_path': None,
'disable_color': False,
'print_help': False,
}
OPT = 'i:s:h'
OPT_EXPANDED = ['interface=', 'socket=', 'tor=', 'run=', 'no-color', 'help']
def parse(argv):
"""
Parses our arguments, providing a named tuple with their values.
:param list argv: input arguments to be parsed
:returns: a **named tuple** with our parsed arguments
:raises: **ValueError** if we got an invalid argument
"""
args = dict(DEFAULT_ARGS)
try:
recognized_args, unrecognized_args = getopt.getopt(argv, OPT, OPT_EXPANDED)
if unrecognized_args:
error_msg = "aren't recognized arguments" if len(unrecognized_args) > 1 else "isn't a recognized argument"
raise getopt.GetoptError("'%s' %s" % ("', '".join(unrecognized_args), error_msg))
except Exception as exc:
raise ValueError('%s (for usage provide --help)' % exc)
for opt, arg in recognized_args:
if opt in ('-i', '--interface'):
if ':' in arg:
address, port = arg.rsplit(':', 1)
else:
address, port = None, arg
if address is not None:
if not stem.util.connection.is_valid_ipv4_address(address):
raise ValueError("'%s' isn't a valid IPv4 address" % address)
args['control_address'] = address
if not stem.util.connection.is_valid_port(port):
raise ValueError("'%s' isn't a valid port number" % port)
args['control_port'] = int(port)
args['user_provided_port'] = True
elif opt in ('-s', '--socket'):
args['control_socket'] = arg
args['user_provided_socket'] = True
elif opt in ('--tor'):
args['tor_path'] = arg
elif opt in ('--run'):
if os.path.exists(arg):
args['run_path'] = arg
else:
args['run_cmd'] = arg
elif opt == '--no-color':
args['disable_color'] = True
elif opt in ('-h', '--help'):
args['print_help'] = True
# translates our args dict into a named tuple
Args = collections.namedtuple('Args', args.keys())
return Args(**args)
def get_help():
"""
Provides our --help usage information.
:returns: **str** with our usage information
"""
return stem.interpreter.msg(
'msg.help',
address = DEFAULT_ARGS['control_address'],
port = DEFAULT_ARGS['control_port'],
socket = DEFAULT_ARGS['control_socket'],
)
stem-1.8.0/stem/interpreter/commands.py 0000664 0001750 0001750 00000027545 13501272761 020721 0 ustar atagar atagar 0000000 0000000 # Copyright 2014-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Handles making requests and formatting the responses.
"""
import code
import contextlib
import socket
import sys
import stem
import stem.control
import stem.descriptor.remote
import stem.interpreter.help
import stem.util.connection
import stem.util.str_tools
import stem.util.tor_tools
from stem.interpreter import STANDARD_OUTPUT, BOLD_OUTPUT, ERROR_OUTPUT, uses_settings, msg
from stem.util.term import format
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
MAX_EVENTS = 100
def _get_fingerprint(arg, controller):
"""
Resolves user input into a relay fingerprint. This accepts...
* Fingerprints
* Nicknames
* IPv4 addresses, either with or without an ORPort
* Empty input, which is resolved to ourselves if we're a relay
:param str arg: input to be resolved to a relay fingerprint
:param stem.control.Controller controller: tor control connection
:returns: **str** for the relay fingerprint
:raises: **ValueError** if we're unable to resolve the input to a relay
"""
if not arg:
try:
return controller.get_info('fingerprint')
except:
raise ValueError("We aren't a relay, no information to provide")
elif stem.util.tor_tools.is_valid_fingerprint(arg):
return arg
elif stem.util.tor_tools.is_valid_nickname(arg):
try:
return controller.get_network_status(arg).fingerprint
except:
raise ValueError("Unable to find a relay with the nickname of '%s'" % arg)
elif ':' in arg or stem.util.connection.is_valid_ipv4_address(arg):
if ':' in arg:
address, port = arg.rsplit(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address):
raise ValueError("'%s' isn't a valid IPv4 address" % address)
elif port and not stem.util.connection.is_valid_port(port):
raise ValueError("'%s' isn't a valid port" % port)
port = int(port)
else:
address, port = arg, None
matches = {}
for desc in controller.get_network_statuses():
if desc.address == address:
if not port or desc.or_port == port:
matches[desc.or_port] = desc.fingerprint
if len(matches) == 0:
raise ValueError('No relays found at %s' % arg)
elif len(matches) == 1:
return list(matches.values())[0]
else:
response = "There's multiple relays at %s, include a port to specify which.\n\n" % arg
for i, or_port in enumerate(matches):
response += ' %i. %s:%s, fingerprint: %s\n' % (i + 1, address, or_port, matches[or_port])
raise ValueError(response)
else:
raise ValueError("'%s' isn't a fingerprint, nickname, or IP address" % arg)
@contextlib.contextmanager
def redirect(stdout, stderr):
original = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stdout, stderr
try:
yield
finally:
sys.stdout, sys.stderr = original
class ControlInterpreter(code.InteractiveConsole):
"""
Handles issuing requests and providing nicely formed responses, with support
for special irc style subcommands.
"""
def __init__(self, controller):
self._received_events = []
code.InteractiveConsole.__init__(self, {
'stem': stem,
'stem.control': stem.control,
'controller': controller,
'events': self.get_events,
})
self._controller = controller
self._run_python_commands = True
# Indicates if we're processing a multiline command, such as conditional
# block or loop.
self.is_multiline_context = False
# Intercept events our controller hears about at a pretty low level since
# the user will likely be requesting them by direct 'SETEVENTS' calls.
handle_event_real = self._controller._handle_event
def handle_event_wrapper(event_message):
handle_event_real(event_message)
self._received_events.insert(0, event_message)
if len(self._received_events) > MAX_EVENTS:
self._received_events.pop()
self._controller._handle_event = handle_event_wrapper
def get_events(self, *event_types):
events = list(self._received_events)
event_types = list(map(str.upper, event_types)) # make filtering case insensitive
if event_types:
events = [e for e in events if e.type in event_types]
return events
def do_help(self, arg):
"""
Performs the '/help' operation, giving usage information for the given
argument or a general summary if there wasn't one.
"""
return stem.interpreter.help.response(self._controller, arg)
def do_events(self, arg):
"""
Performs the '/events' operation, dumping the events that we've received
belonging to the given types. If no types are specified then this provides
all buffered events.
If the user runs '/events clear' then this clears the list of events we've
received.
"""
event_types = arg.upper().split()
if 'CLEAR' in event_types:
del self._received_events[:]
return format('cleared event backlog', *STANDARD_OUTPUT)
return '\n'.join([format(str(e), *STANDARD_OUTPUT) for e in self.get_events(*event_types)])
def do_info(self, arg):
"""
Performs the '/info' operation, looking up a relay by fingerprint, IP
address, or nickname and printing its descriptor and consensus entries in a
pretty fashion.
"""
try:
fingerprint = _get_fingerprint(arg, self._controller)
except ValueError as exc:
return format(str(exc), *ERROR_OUTPUT)
ns_desc = self._controller.get_network_status(fingerprint, None)
server_desc = self._controller.get_server_descriptor(fingerprint, None)
extrainfo_desc = None
micro_desc = self._controller.get_microdescriptor(fingerprint, None)
# We'll mostly rely on the router status entry. Either the server
# descriptor or microdescriptor will be missing, so we'll treat them as
# being optional.
if not ns_desc:
return format('Unable to find consensus information for %s' % fingerprint, *ERROR_OUTPUT)
# More likely than not we'll have the microdescriptor but not server and
# extrainfo descriptors. If so then fetching them.
downloader = stem.descriptor.remote.DescriptorDownloader(timeout = 5)
server_desc_query = downloader.get_server_descriptors(fingerprint)
extrainfo_desc_query = downloader.get_extrainfo_descriptors(fingerprint)
for desc in server_desc_query:
server_desc = desc
for desc in extrainfo_desc_query:
extrainfo_desc = desc
address_extrainfo = []
try:
address_extrainfo.append(socket.gethostbyaddr(ns_desc.address)[0])
except:
pass
try:
address_extrainfo.append(self._controller.get_info('ip-to-country/%s' % ns_desc.address))
except:
pass
address_extrainfo_label = ' (%s)' % ', '.join(address_extrainfo) if address_extrainfo else ''
if server_desc:
exit_policy_label = str(server_desc.exit_policy)
elif micro_desc:
exit_policy_label = str(micro_desc.exit_policy)
else:
exit_policy_label = 'Unknown'
lines = [
'%s (%s)' % (ns_desc.nickname, fingerprint),
format('address: ', *BOLD_OUTPUT) + '%s:%s%s' % (ns_desc.address, ns_desc.or_port, address_extrainfo_label),
]
if server_desc:
lines.append(format('tor version: ', *BOLD_OUTPUT) + str(server_desc.tor_version))
lines.append(format('flags: ', *BOLD_OUTPUT) + ', '.join(ns_desc.flags))
lines.append(format('exit policy: ', *BOLD_OUTPUT) + exit_policy_label)
if server_desc and server_desc.contact:
contact = stem.util.str_tools._to_unicode(server_desc.contact)
# clears up some highly common obscuring
for alias in (' at ', ' AT '):
contact = contact.replace(alias, '@')
for alias in (' dot ', ' DOT '):
contact = contact.replace(alias, '.')
lines.append(format('contact: ', *BOLD_OUTPUT) + contact)
descriptor_section = [
('Server Descriptor:', server_desc),
('Extrainfo Descriptor:', extrainfo_desc),
('Microdescriptor:', micro_desc),
('Router Status Entry:', ns_desc),
]
div = format('-' * 80, *STANDARD_OUTPUT)
for label, desc in descriptor_section:
if desc:
lines += ['', div, format(label, *BOLD_OUTPUT), div, '']
lines += [format(l, *STANDARD_OUTPUT) for l in str(desc).splitlines()]
return '\n'.join(lines)
def do_python(self, arg):
"""
Performs the '/python' operation, toggling if we accept python commands or
not.
"""
if not arg:
status = 'enabled' if self._run_python_commands else 'disabled'
return format('Python support is currently %s.' % status, *STANDARD_OUTPUT)
elif arg.lower() == 'enable':
self._run_python_commands = True
elif arg.lower() == 'disable':
self._run_python_commands = False
else:
return format("'%s' is not recognized. Please run either '/python enable' or '/python disable'." % arg, *ERROR_OUTPUT)
if self._run_python_commands:
response = "Python support enabled, we'll now run non-interpreter commands as python."
else:
response = "Python support disabled, we'll now pass along all commands to tor."
return format(response, *STANDARD_OUTPUT)
@uses_settings
def run_command(self, command, config, print_response = False):
"""
Runs the given command. Requests starting with a '/' are special commands
to the interpreter, and anything else is sent to the control port.
:param stem.control.Controller controller: tor control connection
:param str command: command to be processed
:param bool print_response: prints the response to stdout if true
:returns: **list** out output lines, each line being a list of
(msg, format) tuples
:raises: **stem.SocketClosed** if the control connection has been severed
"""
# Commands fall into three categories:
#
# * Interpreter commands. These start with a '/'.
#
# * Controller commands stem knows how to handle. We use our Controller's
# methods for these to take advantage of caching and present nicer
# output.
#
# * Other tor commands. We pass these directly on to the control port.
cmd, arg = command.strip(), ''
if ' ' in cmd:
cmd, arg = cmd.split(' ', 1)
output = ''
if cmd.startswith('/'):
cmd = cmd.lower()
if cmd == '/quit':
raise stem.SocketClosed()
elif cmd == '/events':
output = self.do_events(arg)
elif cmd == '/info':
output = self.do_info(arg)
elif cmd == '/python':
output = self.do_python(arg)
elif cmd == '/help':
output = self.do_help(arg)
else:
output = format("'%s' isn't a recognized command" % command, *ERROR_OUTPUT)
else:
cmd = cmd.upper() # makes commands uppercase to match the spec
if cmd.replace('+', '') in ('LOADCONF', 'POSTDESCRIPTOR'):
# provides a notice that multi-line controller input isn't yet implemented
output = format(msg('msg.multiline_unimplemented_notice'), *ERROR_OUTPUT)
elif cmd == 'QUIT':
self._controller.msg(command)
raise stem.SocketClosed()
else:
is_tor_command = cmd in config.get('help.usage', {}) and cmd.lower() != 'events'
if self._run_python_commands and not is_tor_command:
console_output = StringIO()
with redirect(console_output, console_output):
self.is_multiline_context = code.InteractiveConsole.push(self, command)
output = console_output.getvalue().strip()
else:
try:
output = format(self._controller.msg(command).raw_content().strip(), *STANDARD_OUTPUT)
except stem.ControllerError as exc:
if isinstance(exc, stem.SocketClosed):
raise
else:
output = format(str(exc), *ERROR_OUTPUT)
if output:
output += '\n' # give ourselves an extra line before the next prompt
if print_response:
print(output)
return output
stem-1.8.0/stem/interpreter/__init__.py 0000664 0001750 0001750 00000013671 13501272761 020652 0 ustar atagar atagar 0000000 0000000 # Copyright 2015-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Interactive interpreter for interacting with Tor directly. This adds usability
features such as tab completion, history, and IRC-style functions (like /help).
"""
import os
import sys
import stem
import stem.connection
import stem.prereq
import stem.process
import stem.util.conf
import stem.util.system
import stem.util.term
from stem.util.term import Attr, Color, format
__all__ = [
'arguments',
'autocomplete',
'commands',
'help',
]
PROMPT = format('>>> ', Color.GREEN, Attr.BOLD, Attr.READLINE_ESCAPE)
STANDARD_OUTPUT = (Color.BLUE, Attr.LINES)
BOLD_OUTPUT = (Color.BLUE, Attr.BOLD, Attr.LINES)
HEADER_OUTPUT = (Color.GREEN, Attr.LINES)
HEADER_BOLD_OUTPUT = (Color.GREEN, Attr.BOLD, Attr.LINES)
ERROR_OUTPUT = (Attr.BOLD, Color.RED, Attr.LINES)
settings_path = os.path.join(os.path.dirname(__file__), 'settings.cfg')
uses_settings = stem.util.conf.uses_settings('stem_interpreter', settings_path)
@uses_settings
def msg(message, config, **attr):
return config.get(message).format(**attr)
def main():
import readline
import stem.interpreter.arguments
import stem.interpreter.autocomplete
import stem.interpreter.commands
try:
args = stem.interpreter.arguments.parse(sys.argv[1:])
except ValueError as exc:
print(exc)
sys.exit(1)
if args.print_help:
print(stem.interpreter.arguments.get_help())
sys.exit()
if args.disable_color or not sys.stdout.isatty():
global PROMPT
stem.util.term.DISABLE_COLOR_SUPPORT = True
PROMPT = '>>> '
# If the user isn't connecting to something in particular then offer to start
# tor if it isn't running.
if not (args.user_provided_port or args.user_provided_socket):
is_tor_running = stem.util.system.is_running('tor') or stem.util.system.is_running('tor.real')
if not is_tor_running:
if args.tor_path == 'tor' and not stem.util.system.is_available('tor'):
print(format(msg('msg.tor_unavailable'), *ERROR_OUTPUT))
sys.exit(1)
else:
if not args.run_cmd and not args.run_path:
print(format(msg('msg.starting_tor'), *HEADER_OUTPUT))
control_port = '9051' if args.control_port == 'default' else str(args.control_port)
try:
stem.process.launch_tor_with_config(
config = {
'SocksPort': '0',
'ControlPort': control_port,
'CookieAuthentication': '1',
'ExitPolicy': 'reject *:*',
},
tor_cmd = args.tor_path,
completion_percent = 5,
take_ownership = True,
)
except OSError as exc:
print(format(msg('msg.unable_to_start_tor', error = exc), *ERROR_OUTPUT))
sys.exit(1)
control_port = (args.control_address, args.control_port)
control_socket = args.control_socket
# If the user explicitely specified an endpoint then just try to connect to
# that.
if args.user_provided_socket and not args.user_provided_port:
control_port = None
elif args.user_provided_port and not args.user_provided_socket:
control_socket = None
controller = stem.connection.connect(
control_port = control_port,
control_socket = control_socket,
password_prompt = True,
)
if controller is None:
sys.exit(1)
with controller:
interpreter = stem.interpreter.commands.ControlInterpreter(controller)
showed_close_confirmation = False
if args.run_cmd:
if args.run_cmd.upper().startswith('SETEVENTS '):
# TODO: we can use a lambda here when dropping python 2.x support, but
# until then print's status as a keyword prevents it from being used in
# lambdas
def handle_event(event_message):
print(format(str(event_message), *STANDARD_OUTPUT))
controller._handle_event = handle_event
if sys.stdout.isatty():
events = args.run_cmd.upper().split(' ', 1)[1]
print(format('Listening to %s events. Press any key to quit.\n' % events, *HEADER_BOLD_OUTPUT))
controller.msg(args.run_cmd)
try:
raw_input()
except (KeyboardInterrupt, stem.SocketClosed):
pass
else:
interpreter.run_command(args.run_cmd, print_response = True)
elif args.run_path:
try:
for line in open(args.run_path).readlines():
interpreter.run_command(line.strip(), print_response = True)
except IOError as exc:
print(format(msg('msg.unable_to_read_file', path = args.run_path, error = exc), *ERROR_OUTPUT))
sys.exit(1)
else:
autocompleter = stem.interpreter.autocomplete.Autocompleter(controller)
readline.parse_and_bind('tab: complete')
readline.set_completer(autocompleter.complete)
readline.set_completer_delims('\n')
for line in msg('msg.startup_banner').splitlines():
line_format = HEADER_BOLD_OUTPUT if line.startswith(' ') else HEADER_OUTPUT
print(format(line, *line_format))
print('')
while True:
try:
prompt = '... ' if interpreter.is_multiline_context else PROMPT
user_input = input(prompt) if stem.prereq.is_python_3() else raw_input(prompt)
interpreter.run_command(user_input, print_response = True)
except stem.SocketClosed:
if showed_close_confirmation:
print(format('Unable to run tor commands. The control connection has been closed.', *ERROR_OUTPUT))
else:
prompt = format("Tor's control port has closed. Do you want to continue this interpreter? (y/n) ", *HEADER_BOLD_OUTPUT)
user_input = input(prompt) if stem.prereq.is_python_3() else raw_input(prompt)
print('') # blank line
if user_input.lower() in ('y', 'yes'):
showed_close_confirmation = True
else:
break
except (KeyboardInterrupt, EOFError, stem.SocketClosed):
print('') # move cursor to the following line
break
stem-1.8.0/stem/interpreter/settings.cfg 0000664 0001750 0001750 00000030141 13501272761 021051 0 ustar atagar atagar 0000000 0000000 ################################################################################
#
# Configuration data used by Stem's interpreter prompt.
#
################################################################################
##################
# GENERAL MESSAGES #
##################
msg.multiline_unimplemented_notice Multi-line control options like this are not yet implemented.
msg.help
|Interactive interpreter for Tor. This provides you with direct access
|to Tor's control interface via either python or direct requests.
|
| -i, --interface [ADDRESS:]PORT change control interface from {address}:{port}
| -s, --socket SOCKET_PATH attach using unix domain socket if present,
| SOCKET_PATH defaults to: {socket}
| --tor PATH tor binary if tor isn't already running
| --run executes the given command or file of commands
| --no-color disables colorized output
| -h, --help presents this help
|
msg.startup_banner
|Welcome to Stem's interpreter prompt. This provides you with direct access to
|Tor's control interface.
|
|This acts like a standard python interpreter with a Tor connection available
|via your 'controller' variable...
|
| >>> controller.get_info('version')
| '0.2.5.1-alpha-dev (git-245ecfff36c0cecc)'
|
|You can also issue requests directly to Tor...
|
| >>> GETINFO version
| 250-version=0.2.5.1-alpha-dev (git-245ecfff36c0cecc)
| 250 OK
|
|For more information run '/help'.
|
msg.tor_unavailable Tor isn't running and the command currently isn't in your PATH.
msg.unable_to_start_tor Unable to start tor: {error}
msg.unable_to_read_file Unable to read {path}: {error}
msg.starting_tor
|Tor isn't running. Starting a temporary Tor instance for our interpreter to
|interact with. This will have a minimal non-relaying configuration, and be
|shut down when you're done.
|
|--------------------------------------------------------------------------------
|
#################
# OUTPUT OF /HELP #
#################
# Response for the '/help' command without any arguments.
help.general
|Interpreter commands include:
| /help - provides information for interpreter and tor commands
| /events - prints events that we've received
| /info - general information for a relay
| /python - enable or disable support for running python commands
| /quit - shuts down the interpreter
|
|Tor commands include:
| GETINFO - queries information from tor
| GETCONF, SETCONF, RESETCONF - show or edit a configuration option
| SIGNAL - issues control signal to the process (for resetting, stopping, etc)
| SETEVENTS - configures the events tor will notify us of
|
| USEFEATURE - enables custom behavior for the controller
| SAVECONF - writes tor's current configuration to our torrc
| LOADCONF - loads the given input like it was part of our torrc
| MAPADDRESS - replaces requests for one address with another
| POSTDESCRIPTOR - adds a relay descriptor to our cache
| EXTENDCIRCUIT - create or extend a tor circuit
| SETCIRCUITPURPOSE - configures the purpose associated with a circuit
| CLOSECIRCUIT - closes the given circuit
| ATTACHSTREAM - associates an application's stream with a tor circuit
| REDIRECTSTREAM - sets a stream's destination
| CLOSESTREAM - closes the given stream
| ADD_ONION - create a new hidden service
| DEL_ONION - delete a hidden service that was created with ADD_ONION
| HSFETCH - retrieve a hidden service descriptor, providing it in a HS_DESC_CONTENT event
| HSPOST - uploads a hidden service descriptor
| RESOLVE - issues an asynchronous dns or rdns request over tor
| TAKEOWNERSHIP - instructs tor to quit when this control connection is closed
| PROTOCOLINFO - queries version and controller authentication information
| QUIT - disconnect the control connection
|
|For more information use '/help [OPTION]'.
# Usage of tor and interpreter commands.
help.usage HELP => /help [OPTION]
help.usage EVENTS => /events [types]
help.usage INFO => /info [relay fingerprint, nickname, or IP address]
help.usage PYTHON => /python [enable,disable]
help.usage QUIT => /quit
help.usage GETINFO => GETINFO OPTION
help.usage GETCONF => GETCONF OPTION
help.usage SETCONF => SETCONF PARAM[=VALUE]
help.usage RESETCONF => RESETCONF PARAM[=VALUE]
help.usage SIGNAL => SIGNAL SIG
help.usage SETEVENTS => SETEVENTS [EXTENDED] [EVENTS]
help.usage USEFEATURE => USEFEATURE OPTION
help.usage SAVECONF => SAVECONF
help.usage LOADCONF => LOADCONF...
help.usage MAPADDRESS => MAPADDRESS SOURCE_ADDR=DESTINATION_ADDR
help.usage POSTDESCRIPTOR => POSTDESCRIPTOR [purpose=general/controller/bridge] [cache=yes/no]...
help.usage EXTENDCIRCUIT => EXTENDCIRCUIT CircuitID [PATH] [purpose=general/controller]
help.usage SETCIRCUITPURPOSE => SETCIRCUITPURPOSE CircuitID purpose=general/controller
help.usage CLOSECIRCUIT => CLOSECIRCUIT CircuitID [IfUnused]
help.usage ATTACHSTREAM => ATTACHSTREAM StreamID CircuitID [HOP=HopNum]
help.usage REDIRECTSTREAM => REDIRECTSTREAM StreamID Address [Port]
help.usage CLOSESTREAM => CLOSESTREAM StreamID Reason [Flag]
help.usage ADD_ONION => KeyType:KeyBlob [Flags=Flag] (Port=Port [,Target])...
help.usage DEL_ONION => ServiceID
help.usage HSFETCH => HSFETCH (HSAddress/v2-DescId) [SERVER=Server]...
help.usage HSPOST => [SERVER=Server] DESCRIPTOR
help.usage RESOLVE => RESOLVE [mode=reverse] address
help.usage TAKEOWNERSHIP => TAKEOWNERSHIP
help.usage PROTOCOLINFO => PROTOCOLINFO [ProtocolVersion]
# Longer description of what tor and interpreter commands do.
help.description.help
|Provides usage information for the given interpreter, tor command, or tor
|configuration option.
|
|Example:
| /help info # provides a description of the '/info' option
| /help GETINFO # usage information for tor's GETINFO controller option
help.description.events
|Provides events that we've received belonging to the given event types. If
|no types are specified then this provides all the messages that we've
|received.
|
|You can also run '/events clear' to clear the backlog of events we've
|received.
help.description.info
|Provides information for a relay that's currently in the consensus. If no
|relay is specified then this provides information on ourselves.
help.description.python
|Enables or disables support for running python commands. This determines how
|we treat commands this interpreter doesn't recognize...
|
|* If enabled then unrecognized commands are executed as python.
|* If disabled then unrecognized commands are passed along to tor.
help.description.quit
|Terminates the interpreter.
help.description.getinfo
|Queries the tor process for information. Options are...
|
help.description.getconf
|Provides the current value for a given configuration value. Options include...
|
help.description.setconf
|Sets the given configuration parameters. Values can be quoted or non-quoted
|strings, and reverts the option to 0 or NULL if not provided.
|
|Examples:
| * Sets a contact address and resets our family to NULL
| SETCONF MyFamily ContactInfo=foo@bar.com
|
| * Sets an exit policy that only includes port 80/443
| SETCONF ExitPolicy=\"accept *:80, accept *:443, reject *:*\"\
help.description.resetconf
|Reverts the given configuration options to their default values. If a value
|is provided then this behaves in the same way as SETCONF.
|
|Examples:
| * Returns both of our accounting parameters to their defaults
| RESETCONF AccountingMax AccountingStart
|
| * Uses the default exit policy and sets our nickname to be 'Goomba'
| RESETCONF ExitPolicy Nickname=Goomba
help.description.signal
|Issues a signal that tells the tor process to reload its torrc, dump its
|stats, halt, etc.
help.description.setevents
|Sets the events that we will receive. This turns off any events that aren't
|listed so sending 'SETEVENTS' without any values will turn off all event reporting.
|
|For Tor versions between 0.1.1.9 and 0.2.2.1 adding 'EXTENDED' causes some
|events to give us additional information. After version 0.2.2.1 this is
|always on.
|
|Events include...
|
help.description.usefeature
|Customizes the behavior of the control port. Options include...
|
help.description.saveconf
|Writes Tor's current configuration to its torrc.
help.description.loadconf
|Reads the given text like it belonged to our torrc.
|
|Example:
| +LOADCONF
| # sets our exit policy to just accept ports 80 and 443
| ExitPolicy accept *:80
| ExitPolicy accept *:443
| ExitPolicy reject *:*
| .
help.description.mapaddress
|Replaces future requests for one address with another.
|
|Example:
| MAPADDRESS 0.0.0.0=torproject.org 1.2.3.4=tor.freehaven.net
help.description.postdescriptor
|Simulates getting a new relay descriptor.
help.description.extendcircuit
|Extends the given circuit or create a new one if the CircuitID is zero. The
|PATH is a comma separated list of fingerprints. If it isn't set then this
|uses Tor's normal path selection.
help.description.setcircuitpurpose
|Sets the purpose attribute for a circuit.
help.description.closecircuit
|Closes the given circuit. If "IfUnused" is included then this only closes
|the circuit if it isn't currently being used.
help.description.attachstream
|Attaches a stream with the given built circuit (tor picks one on its own if
|CircuitID is zero). If HopNum is given then this hop is used to exit the
|circuit, otherwise the last relay is used.
help.description.redirectstream
|Sets the destination for a given stream. This can only be done after a
|stream is created but before it's attached to a circuit.
help.description.closestream
|Closes the given stream, the reason being an integer matching a reason as
|per section 6.3 of the tor-spec.
help.description.add_onion
|Creates a new hidden service. Unlike 'SETCONF HiddenServiceDir...' this
|doesn't persist the service to disk.
help.description.del_onion
|Delete a hidden service that was created with ADD_ONION.
help.description.hsfetch
|Retrieves the descriptor for a hidden service. This is an asynchronous
|request, with the descriptor provided by a HS_DESC_CONTENT event.
help.description.hspost
|Uploads a descriptor to a hidden service directory.
help.description.resolve
|Performs IPv4 DNS resolution over tor, doing a reverse lookup instead if
|"mode=reverse" is included. This request is processed in the background and
|results in a ADDRMAP event with the response.
help.description.takeownership
|Instructs Tor to gracefully shut down when this control connection is closed.
help.description.protocolinfo
|Provides bootstrapping information that a controller might need when first
|starting, like Tor's version and controller authentication. This can be done
|before authenticating to the control port.
help.signal.options RELOAD / HUP => reload our torrc
help.signal.options SHUTDOWN / INT => gracefully shut down, waiting 30 seconds if we're a relay
help.signal.options DUMP / USR1 => logs information about open connections and circuits
help.signal.options DEBUG / USR2 => makes us log at the DEBUG runlevel
help.signal.options HALT / TERM => immediately shut down
help.signal.options CLEARDNSCACHE => clears any cached DNS results
help.signal.options NEWNYM => clears the DNS cache and uses new circuits for future connections
################
# TAB COMPLETION #
################
# Commands we'll autocomplete when the user hits tab. This is just the start of
# our autocompletion list - more are determined dynamically by checking what
# tor supports.
autocomplete /help
autocomplete /events
autocomplete /info
autocomplete /quit
autocomplete SAVECONF
autocomplete MAPADDRESS
autocomplete EXTENDCIRCUIT
autocomplete SETCIRCUITPURPOSE
autocomplete SETROUTERPURPOSE
autocomplete ATTACHSTREAM
#autocomplete +POSTDESCRIPTOR # TODO: needs multi-line support
autocomplete REDIRECTSTREAM
autocomplete CLOSESTREAM
autocomplete CLOSECIRCUIT
autocomplete QUIT
autocomplete RESOLVE
autocomplete PROTOCOLINFO
#autocomplete +LOADCONF # TODO: needs multi-line support
autocomplete TAKEOWNERSHIP
autocomplete AUTHCHALLENGE
autocomplete DROPGUARDS
autocomplete ADD_ONION NEW:BEST
autocomplete ADD_ONION NEW:RSA1024
autocomplete ADD_ONION NEW:ED25519-V3
autocomplete ADD_ONION RSA1024:
autocomplete ADD_ONION ED25519-V3:
autocomplete DEL_ONION
autocomplete HSFETCH
autocomplete HSPOST
stem-1.8.0/stem/interpreter/autocomplete.py 0000664 0001750 0001750 00000005743 13501272761 021615 0 ustar atagar atagar 0000000 0000000 # Copyright 2014-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Tab completion for our interpreter prompt.
"""
import stem.prereq
from stem.interpreter import uses_settings
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
@uses_settings
def _get_commands(controller, config):
"""
Provides commands recognized by tor.
"""
commands = config.get('autocomplete', [])
if controller is None:
return commands
# GETINFO commands. Lines are of the form '[option] -- [description]'. This
# strips '*' from options that accept values.
results = controller.get_info('info/names', None)
if results:
for line in results.splitlines():
option = line.split(' ', 1)[0].rstrip('*')
commands.append('GETINFO %s' % option)
else:
commands.append('GETINFO ')
# GETCONF, SETCONF, and RESETCONF commands. Lines are of the form
# '[option] [type]'.
results = controller.get_info('config/names', None)
if results:
for line in results.splitlines():
option = line.split(' ', 1)[0]
commands.append('GETCONF %s' % option)
commands.append('SETCONF %s' % option)
commands.append('RESETCONF %s' % option)
else:
commands += ['GETCONF ', 'SETCONF ', 'RESETCONF ']
# SETEVENT, USEFEATURE, and SIGNAL commands. For each of these the GETINFO
# results are simply a space separated lists of the values they can have.
options = (
('SETEVENTS ', 'events/names'),
('USEFEATURE ', 'features/names'),
('SIGNAL ', 'signal/names'),
)
for prefix, getinfo_cmd in options:
results = controller.get_info(getinfo_cmd, None)
if results:
commands += [prefix + value for value in results.split()]
else:
commands.append(prefix)
# Adds /help commands.
usage_info = config.get('help.usage', {})
for cmd in usage_info.keys():
commands.append('/help ' + cmd)
return commands
class Autocompleter(object):
def __init__(self, controller):
self._commands = _get_commands(controller)
@lru_cache()
def matches(self, text):
"""
Provides autocompletion matches for the given text.
:param str text: text to check for autocompletion matches with
:returns: **list** with possible matches
"""
lowercase_text = text.lower()
return [cmd for cmd in self._commands if cmd.lower().startswith(lowercase_text)]
def complete(self, text, state):
"""
Provides case insensetive autocompletion options, acting as a functor for
the readlines set_completer function.
:param str text: text to check for autocompletion matches with
:param int state: index of result to be provided, readline fetches matches
until this function provides None
:returns: **str** with the autocompletion match, **None** if eithe none
exists or state is higher than our number of matches
"""
try:
return self.matches(text)[state]
except IndexError:
return None
stem-1.8.0/stem/cached_fallbacks.cfg 0000664 0001750 0001750 00000150027 13526063314 020064 0 ustar atagar atagar 0000000 0000000 tor_commit 1dd95278970f9f32d83a31fe73e0258a30523539
stem_commit ec67e06398d6bbbcefdc14b56d2e91bd49f47539
header.timestamp 20190625114911
header.source whitelist
header.version 2.0.0
header.timestamp0 20190625114911
header.timestamp1 20190628085927
header.type fallback
001524DD403D729F08F7E5D77813EF12756CFA8D.address 185.13.39.197
001524DD403D729F08F7E5D77813EF12756CFA8D.or_port 443
001524DD403D729F08F7E5D77813EF12756CFA8D.dir_port 80
001524DD403D729F08F7E5D77813EF12756CFA8D.nickname Neldoreth
001524DD403D729F08F7E5D77813EF12756CFA8D.has_extrainfo false
025B66CEBC070FCB0519D206CF0CF4965C20C96E.address 185.100.85.61
025B66CEBC070FCB0519D206CF0CF4965C20C96E.or_port 443
025B66CEBC070FCB0519D206CF0CF4965C20C96E.dir_port 80
025B66CEBC070FCB0519D206CF0CF4965C20C96E.nickname nibbana
025B66CEBC070FCB0519D206CF0CF4965C20C96E.has_extrainfo false
0338F9F55111FE8E3570E7DE117EF3AF999CC1D7.address 185.225.17.3
0338F9F55111FE8E3570E7DE117EF3AF999CC1D7.or_port 443
0338F9F55111FE8E3570E7DE117EF3AF999CC1D7.dir_port 80
0338F9F55111FE8E3570E7DE117EF3AF999CC1D7.nickname Nebuchadnezzar
0338F9F55111FE8E3570E7DE117EF3AF999CC1D7.has_extrainfo false
0338F9F55111FE8E3570E7DE117EF3AF999CC1D7.orport6_address 2a0a:c800:1:5::3
0338F9F55111FE8E3570E7DE117EF3AF999CC1D7.orport6_port 443
0B85617241252517E8ECF2CFC7F4C1A32DCD153F.address 163.172.149.155
0B85617241252517E8ECF2CFC7F4C1A32DCD153F.or_port 443
0B85617241252517E8ECF2CFC7F4C1A32DCD153F.dir_port 80
0B85617241252517E8ECF2CFC7F4C1A32DCD153F.nickname niij02
0B85617241252517E8ECF2CFC7F4C1A32DCD153F.has_extrainfo false
0C039F35C2E40DCB71CD8A07E97C7FD7787D42D6.address 5.200.21.144
0C039F35C2E40DCB71CD8A07E97C7FD7787D42D6.or_port 443
0C039F35C2E40DCB71CD8A07E97C7FD7787D42D6.dir_port 80
0C039F35C2E40DCB71CD8A07E97C7FD7787D42D6.nickname libel
0C039F35C2E40DCB71CD8A07E97C7FD7787D42D6.has_extrainfo false
113143469021882C3A4B82F084F8125B08EE471E.address 37.252.185.182
113143469021882C3A4B82F084F8125B08EE471E.or_port 8080
113143469021882C3A4B82F084F8125B08EE471E.dir_port 9030
113143469021882C3A4B82F084F8125B08EE471E.nickname parasol
113143469021882C3A4B82F084F8125B08EE471E.has_extrainfo false
113143469021882C3A4B82F084F8125B08EE471E.orport6_address 2a00:63c1:a:182::2
113143469021882C3A4B82F084F8125B08EE471E.orport6_port 8080
11DF0017A43AF1F08825CD5D973297F81AB00FF3.address 37.120.174.249
11DF0017A43AF1F08825CD5D973297F81AB00FF3.or_port 443
11DF0017A43AF1F08825CD5D973297F81AB00FF3.dir_port 80
11DF0017A43AF1F08825CD5D973297F81AB00FF3.nickname gGDHjdcC6zAlM8k08lX
11DF0017A43AF1F08825CD5D973297F81AB00FF3.has_extrainfo false
11DF0017A43AF1F08825CD5D973297F81AB00FF3.orport6_address 2a03:4000:6:724c:df98:15f9:b34d:443
11DF0017A43AF1F08825CD5D973297F81AB00FF3.orport6_port 443
1211AC1BBB8A1AF7CBA86BCE8689AA3146B86423.address 95.85.8.226
1211AC1BBB8A1AF7CBA86BCE8689AA3146B86423.or_port 443
1211AC1BBB8A1AF7CBA86BCE8689AA3146B86423.dir_port 80
1211AC1BBB8A1AF7CBA86BCE8689AA3146B86423.nickname ccrelaycc
1211AC1BBB8A1AF7CBA86BCE8689AA3146B86423.has_extrainfo false
12AD30E5D25AA67F519780E2111E611A455FDC89.address 193.11.114.43
12AD30E5D25AA67F519780E2111E611A455FDC89.or_port 9001
12AD30E5D25AA67F519780E2111E611A455FDC89.dir_port 9030
12AD30E5D25AA67F519780E2111E611A455FDC89.nickname mdfnet1
12AD30E5D25AA67F519780E2111E611A455FDC89.has_extrainfo false
12AD30E5D25AA67F519780E2111E611A455FDC89.orport6_address 2001:6b0:30:1000::99
12AD30E5D25AA67F519780E2111E611A455FDC89.orport6_port 9050
12FD624EE73CEF37137C90D38B2406A66F68FAA2.address 37.157.195.87
12FD624EE73CEF37137C90D38B2406A66F68FAA2.or_port 443
12FD624EE73CEF37137C90D38B2406A66F68FAA2.dir_port 8030
12FD624EE73CEF37137C90D38B2406A66F68FAA2.nickname thanatosCZ
12FD624EE73CEF37137C90D38B2406A66F68FAA2.has_extrainfo false
183005F78229D94EE51CE7795A42280070A48D0D.address 217.182.51.248
183005F78229D94EE51CE7795A42280070A48D0D.or_port 443
183005F78229D94EE51CE7795A42280070A48D0D.dir_port 80
183005F78229D94EE51CE7795A42280070A48D0D.nickname Cosworth02
183005F78229D94EE51CE7795A42280070A48D0D.has_extrainfo false
185663B7C12777F052B2C2D23D7A239D8DA88A0F.address 171.25.193.25
185663B7C12777F052B2C2D23D7A239D8DA88A0F.or_port 443
185663B7C12777F052B2C2D23D7A239D8DA88A0F.dir_port 80
185663B7C12777F052B2C2D23D7A239D8DA88A0F.nickname DFRI5
185663B7C12777F052B2C2D23D7A239D8DA88A0F.has_extrainfo false
185663B7C12777F052B2C2D23D7A239D8DA88A0F.orport6_address 2001:67c:289c::25
185663B7C12777F052B2C2D23D7A239D8DA88A0F.orport6_port 443
1938EBACBB1A7BFA888D9623C90061130E63BB3F.address 149.56.141.138
1938EBACBB1A7BFA888D9623C90061130E63BB3F.or_port 9001
1938EBACBB1A7BFA888D9623C90061130E63BB3F.dir_port 9030
1938EBACBB1A7BFA888D9623C90061130E63BB3F.nickname Aerodynamik04
1938EBACBB1A7BFA888D9623C90061130E63BB3F.has_extrainfo false
1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.address 81.7.14.253
1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.or_port 443
1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.dir_port 9001
1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.nickname Ichotolot60
1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.has_extrainfo true
1CD17CB202063C51C7DAD3BACEF87ECE81C2350F.address 50.7.74.171
1CD17CB202063C51C7DAD3BACEF87ECE81C2350F.or_port 9001
1CD17CB202063C51C7DAD3BACEF87ECE81C2350F.dir_port 9030
1CD17CB202063C51C7DAD3BACEF87ECE81C2350F.nickname theia1
1CD17CB202063C51C7DAD3BACEF87ECE81C2350F.has_extrainfo false
1CD17CB202063C51C7DAD3BACEF87ECE81C2350F.orport6_address 2001:49f0:d002:2::51
1CD17CB202063C51C7DAD3BACEF87ECE81C2350F.orport6_port 443
1F6ABD086F40B890A33C93CC4606EE68B31C9556.address 199.184.246.250
1F6ABD086F40B890A33C93CC4606EE68B31C9556.or_port 443
1F6ABD086F40B890A33C93CC4606EE68B31C9556.dir_port 80
1F6ABD086F40B890A33C93CC4606EE68B31C9556.nickname dao
1F6ABD086F40B890A33C93CC4606EE68B31C9556.has_extrainfo false
1F6ABD086F40B890A33C93CC4606EE68B31C9556.orport6_address 2620:124:1009:1::171
1F6ABD086F40B890A33C93CC4606EE68B31C9556.orport6_port 443
20462CBA5DA4C2D963567D17D0B7249718114A68.address 212.47.229.2
20462CBA5DA4C2D963567D17D0B7249718114A68.or_port 9001
20462CBA5DA4C2D963567D17D0B7249718114A68.dir_port 9030
20462CBA5DA4C2D963567D17D0B7249718114A68.nickname scaletor
20462CBA5DA4C2D963567D17D0B7249718114A68.has_extrainfo false
20462CBA5DA4C2D963567D17D0B7249718114A68.orport6_address 2001:bc8:4400:2100::f03
20462CBA5DA4C2D963567D17D0B7249718114A68.orport6_port 9001
204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.address 77.247.181.164
204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.or_port 443
204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.dir_port 80
204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.nickname HaveHeart
204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.has_extrainfo false
230A8B2A8BA861210D9B4BA97745AEC217A94207.address 163.172.176.167
230A8B2A8BA861210D9B4BA97745AEC217A94207.or_port 443
230A8B2A8BA861210D9B4BA97745AEC217A94207.dir_port 80
230A8B2A8BA861210D9B4BA97745AEC217A94207.nickname niij01
230A8B2A8BA861210D9B4BA97745AEC217A94207.has_extrainfo false
2F0F32AB1E5B943CA7D062C03F18960C86E70D94.address 97.74.237.196
2F0F32AB1E5B943CA7D062C03F18960C86E70D94.or_port 9001
2F0F32AB1E5B943CA7D062C03F18960C86E70D94.dir_port 9030
2F0F32AB1E5B943CA7D062C03F18960C86E70D94.nickname Minotaur
2F0F32AB1E5B943CA7D062C03F18960C86E70D94.has_extrainfo false
322C6E3A973BC10FC36DE3037AD27BC89F14723B.address 212.83.154.33
322C6E3A973BC10FC36DE3037AD27BC89F14723B.or_port 8443
322C6E3A973BC10FC36DE3037AD27BC89F14723B.dir_port 8080
322C6E3A973BC10FC36DE3037AD27BC89F14723B.nickname bauruine204
322C6E3A973BC10FC36DE3037AD27BC89F14723B.has_extrainfo false
32EE911D968BE3E016ECA572BB1ED0A9EE43FC2F.address 109.105.109.162
32EE911D968BE3E016ECA572BB1ED0A9EE43FC2F.or_port 60784
32EE911D968BE3E016ECA572BB1ED0A9EE43FC2F.dir_port 52860
32EE911D968BE3E016ECA572BB1ED0A9EE43FC2F.nickname ndnr1
32EE911D968BE3E016ECA572BB1ED0A9EE43FC2F.has_extrainfo false
32EE911D968BE3E016ECA572BB1ED0A9EE43FC2F.orport6_address 2001:948:7:2::163
32EE911D968BE3E016ECA572BB1ED0A9EE43FC2F.orport6_port 5001
330CD3DB6AD266DC70CDB512B036957D03D9BC59.address 185.100.84.212
330CD3DB6AD266DC70CDB512B036957D03D9BC59.or_port 443
330CD3DB6AD266DC70CDB512B036957D03D9BC59.dir_port 80
330CD3DB6AD266DC70CDB512B036957D03D9BC59.nickname TeamTardis
330CD3DB6AD266DC70CDB512B036957D03D9BC59.has_extrainfo false
330CD3DB6AD266DC70CDB512B036957D03D9BC59.orport6_address 2a06:1700:0:7::1
330CD3DB6AD266DC70CDB512B036957D03D9BC59.orport6_port 443
361D33C96D0F161275EE67E2C91EE10B276E778B.address 37.157.255.35
361D33C96D0F161275EE67E2C91EE10B276E778B.or_port 9090
361D33C96D0F161275EE67E2C91EE10B276E778B.dir_port 9030
361D33C96D0F161275EE67E2C91EE10B276E778B.nickname cxx4freedom
361D33C96D0F161275EE67E2C91EE10B276E778B.has_extrainfo false
375DCBB2DBD94E5263BC0C015F0C9E756669617E.address 64.79.152.132
375DCBB2DBD94E5263BC0C015F0C9E756669617E.or_port 443
375DCBB2DBD94E5263BC0C015F0C9E756669617E.dir_port 80
375DCBB2DBD94E5263BC0C015F0C9E756669617E.nickname ebola
375DCBB2DBD94E5263BC0C015F0C9E756669617E.has_extrainfo false
39F91959416763AFD34DBEEC05474411B964B2DC.address 213.183.60.21
39F91959416763AFD34DBEEC05474411B964B2DC.or_port 443
39F91959416763AFD34DBEEC05474411B964B2DC.dir_port 9030
39F91959416763AFD34DBEEC05474411B964B2DC.nickname angeltest11
39F91959416763AFD34DBEEC05474411B964B2DC.has_extrainfo false
3AFDAAD91A15B4C6A7686A53AA8627CA871FF491.address 50.7.74.174
3AFDAAD91A15B4C6A7686A53AA8627CA871FF491.or_port 9001
3AFDAAD91A15B4C6A7686A53AA8627CA871FF491.dir_port 9030
3AFDAAD91A15B4C6A7686A53AA8627CA871FF491.nickname theia7
3AFDAAD91A15B4C6A7686A53AA8627CA871FF491.has_extrainfo false
3AFDAAD91A15B4C6A7686A53AA8627CA871FF491.orport6_address 2001:49f0:d002:2::57
3AFDAAD91A15B4C6A7686A53AA8627CA871FF491.orport6_port 443
3CA0D15567024D2E0B557DC0CF3E962B37999A79.address 199.249.230.83
3CA0D15567024D2E0B557DC0CF3E962B37999A79.or_port 443
3CA0D15567024D2E0B557DC0CF3E962B37999A79.dir_port 80
3CA0D15567024D2E0B557DC0CF3E962B37999A79.nickname QuintexAirVPN30
3CA0D15567024D2E0B557DC0CF3E962B37999A79.has_extrainfo false
3CA0D15567024D2E0B557DC0CF3E962B37999A79.orport6_address 2620:7:6001::ffff:c759:e653
3CA0D15567024D2E0B557DC0CF3E962B37999A79.orport6_port 80
3CB4193EF4E239FCEDC4DC43468E0B0D6B67ACC3.address 51.38.65.160
3CB4193EF4E239FCEDC4DC43468E0B0D6B67ACC3.or_port 9001
3CB4193EF4E239FCEDC4DC43468E0B0D6B67ACC3.dir_port 9030
3CB4193EF4E239FCEDC4DC43468E0B0D6B67ACC3.nickname rofltor10
3CB4193EF4E239FCEDC4DC43468E0B0D6B67ACC3.has_extrainfo false
3CB4193EF4E239FCEDC4DC43468E0B0D6B67ACC3.orport6_address 2001:41d0:801:2000::f6e
3CB4193EF4E239FCEDC4DC43468E0B0D6B67ACC3.orport6_port 9001
3E53D3979DB07EFD736661C934A1DED14127B684.address 217.79.179.177
3E53D3979DB07EFD736661C934A1DED14127B684.or_port 9001
3E53D3979DB07EFD736661C934A1DED14127B684.dir_port 9030
3E53D3979DB07EFD736661C934A1DED14127B684.nickname Unnamed
3E53D3979DB07EFD736661C934A1DED14127B684.has_extrainfo false
3E53D3979DB07EFD736661C934A1DED14127B684.orport6_address 2001:4ba0:fff9:131:6c4f::90d3
3E53D3979DB07EFD736661C934A1DED14127B684.orport6_port 9001
3F092986E9B87D3FDA09B71FA3A602378285C77A.address 66.111.2.16
3F092986E9B87D3FDA09B71FA3A602378285C77A.or_port 9001
3F092986E9B87D3FDA09B71FA3A602378285C77A.dir_port 9030
3F092986E9B87D3FDA09B71FA3A602378285C77A.nickname NYCBUG1
3F092986E9B87D3FDA09B71FA3A602378285C77A.has_extrainfo false
3F092986E9B87D3FDA09B71FA3A602378285C77A.orport6_address 2610:1c0:0:5::16
3F092986E9B87D3FDA09B71FA3A602378285C77A.orport6_port 9001
4061C553CA88021B8302F0814365070AAE617270.address 185.100.85.101
4061C553CA88021B8302F0814365070AAE617270.or_port 9001
4061C553CA88021B8302F0814365070AAE617270.dir_port 9030
4061C553CA88021B8302F0814365070AAE617270.nickname TorExitRomania
4061C553CA88021B8302F0814365070AAE617270.has_extrainfo false
4623A9EC53BFD83155929E56D6F7B55B5E718C24.address 163.172.157.213
4623A9EC53BFD83155929E56D6F7B55B5E718C24.or_port 443
4623A9EC53BFD83155929E56D6F7B55B5E718C24.dir_port 8080
4623A9EC53BFD83155929E56D6F7B55B5E718C24.nickname Cotopaxi
4623A9EC53BFD83155929E56D6F7B55B5E718C24.has_extrainfo false
465D17C6FC297E3857B5C6F152006A1E212944EA.address 195.123.245.141
465D17C6FC297E3857B5C6F152006A1E212944EA.or_port 443
465D17C6FC297E3857B5C6F152006A1E212944EA.dir_port 9030
465D17C6FC297E3857B5C6F152006A1E212944EA.nickname angeltest14
465D17C6FC297E3857B5C6F152006A1E212944EA.has_extrainfo false
46791D156C9B6C255C2665D4D8393EC7DBAA7798.address 31.31.78.49
46791D156C9B6C255C2665D4D8393EC7DBAA7798.or_port 443
46791D156C9B6C255C2665D4D8393EC7DBAA7798.dir_port 80
46791D156C9B6C255C2665D4D8393EC7DBAA7798.nickname KrigHaBandolo
46791D156C9B6C255C2665D4D8393EC7DBAA7798.has_extrainfo false
484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.address 193.70.43.76
484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.or_port 9001
484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.dir_port 9030
484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.nickname Aerodynamik03
484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.has_extrainfo false
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.address 37.187.102.186
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.or_port 9001
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.dir_port 9030
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.nickname txtfileTorNode65536
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.has_extrainfo false
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.orport6_address 2001:41d0:a:26ba::1
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.orport6_port 9001
4EB55679FA91363B97372554F8DC7C63F4E5B101.address 81.7.13.84
4EB55679FA91363B97372554F8DC7C63F4E5B101.or_port 443
4EB55679FA91363B97372554F8DC7C63F4E5B101.dir_port 80
4EB55679FA91363B97372554F8DC7C63F4E5B101.nickname torpidsDEisppro
4EB55679FA91363B97372554F8DC7C63F4E5B101.has_extrainfo false
4EB55679FA91363B97372554F8DC7C63F4E5B101.orport6_address 2a02:180:1:1::5b8f:538c
4EB55679FA91363B97372554F8DC7C63F4E5B101.orport6_port 443
4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.address 108.53.208.157
4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.or_port 443
4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.dir_port 80
4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.nickname Binnacle
4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.has_extrainfo true
509EAB4C5D10C9A9A24B4EA0CE402C047A2D64E6.address 5.9.158.75
509EAB4C5D10C9A9A24B4EA0CE402C047A2D64E6.or_port 9001
509EAB4C5D10C9A9A24B4EA0CE402C047A2D64E6.dir_port 9030
509EAB4C5D10C9A9A24B4EA0CE402C047A2D64E6.nickname zwiebeltoralf2
509EAB4C5D10C9A9A24B4EA0CE402C047A2D64E6.has_extrainfo true
509EAB4C5D10C9A9A24B4EA0CE402C047A2D64E6.orport6_address 2a01:4f8:190:514a::2
509EAB4C5D10C9A9A24B4EA0CE402C047A2D64E6.orport6_port 9001
51E1CF613FD6F9F11FE24743C91D6F9981807D82.address 81.7.16.182
51E1CF613FD6F9F11FE24743C91D6F9981807D82.or_port 443
51E1CF613FD6F9F11FE24743C91D6F9981807D82.dir_port 80
51E1CF613FD6F9F11FE24743C91D6F9981807D82.nickname torpidsDEisppro3
51E1CF613FD6F9F11FE24743C91D6F9981807D82.has_extrainfo false
51E1CF613FD6F9F11FE24743C91D6F9981807D82.orport6_address 2a02:180:1:1::517:10b6
51E1CF613FD6F9F11FE24743C91D6F9981807D82.orport6_port 993
547DA56F6B88B6C596B3E3086803CDA4F0EF8F21.address 192.160.102.166
547DA56F6B88B6C596B3E3086803CDA4F0EF8F21.or_port 9001
547DA56F6B88B6C596B3E3086803CDA4F0EF8F21.dir_port 80
547DA56F6B88B6C596B3E3086803CDA4F0EF8F21.nickname chaucer
547DA56F6B88B6C596B3E3086803CDA4F0EF8F21.has_extrainfo false
547DA56F6B88B6C596B3E3086803CDA4F0EF8F21.orport6_address 2620:132:300c:c01d::6
547DA56F6B88B6C596B3E3086803CDA4F0EF8F21.orport6_port 9002
557ACEC850F54EEE65839F83CACE2B0825BE811E.address 192.160.102.170
557ACEC850F54EEE65839F83CACE2B0825BE811E.or_port 9001
557ACEC850F54EEE65839F83CACE2B0825BE811E.dir_port 80
557ACEC850F54EEE65839F83CACE2B0825BE811E.nickname ogopogo
557ACEC850F54EEE65839F83CACE2B0825BE811E.has_extrainfo false
557ACEC850F54EEE65839F83CACE2B0825BE811E.orport6_address 2620:132:300c:c01d::a
557ACEC850F54EEE65839F83CACE2B0825BE811E.orport6_port 9002
5BF17163CBE73D8CD9FDBE030C944EA05707DA93.address 50.7.74.170
5BF17163CBE73D8CD9FDBE030C944EA05707DA93.or_port 443
5BF17163CBE73D8CD9FDBE030C944EA05707DA93.dir_port 80
5BF17163CBE73D8CD9FDBE030C944EA05707DA93.nickname theia8
5BF17163CBE73D8CD9FDBE030C944EA05707DA93.has_extrainfo false
5BF17163CBE73D8CD9FDBE030C944EA05707DA93.orport6_address 2001:49f0:d002:2::58
5BF17163CBE73D8CD9FDBE030C944EA05707DA93.orport6_port 443
5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.address 172.98.193.43
5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.or_port 443
5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.dir_port 80
5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.nickname Backplane
5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.has_extrainfo false
616081EC829593AF4232550DE6FFAA1D75B37A90.address 95.128.43.164
616081EC829593AF4232550DE6FFAA1D75B37A90.or_port 443
616081EC829593AF4232550DE6FFAA1D75B37A90.dir_port 80
616081EC829593AF4232550DE6FFAA1D75B37A90.nickname AquaRayTerminus
616081EC829593AF4232550DE6FFAA1D75B37A90.has_extrainfo false
616081EC829593AF4232550DE6FFAA1D75B37A90.orport6_address 2a02:ec0:209:10::4
616081EC829593AF4232550DE6FFAA1D75B37A90.orport6_port 443
68F175CCABE727AA2D2309BCD8789499CEE36ED7.address 163.172.139.104
68F175CCABE727AA2D2309BCD8789499CEE36ED7.or_port 443
68F175CCABE727AA2D2309BCD8789499CEE36ED7.dir_port 8080
68F175CCABE727AA2D2309BCD8789499CEE36ED7.nickname Pichincha
68F175CCABE727AA2D2309BCD8789499CEE36ED7.has_extrainfo false
6A7551EEE18F78A9813096E82BF84F740D32B911.address 94.130.186.5
6A7551EEE18F78A9813096E82BF84F740D32B911.or_port 443
6A7551EEE18F78A9813096E82BF84F740D32B911.dir_port 80
6A7551EEE18F78A9813096E82BF84F740D32B911.nickname TorMachine
6A7551EEE18F78A9813096E82BF84F740D32B911.has_extrainfo false
6A7551EEE18F78A9813096E82BF84F740D32B911.orport6_address 2a01:4f8:1c0c:45f7::1
6A7551EEE18F78A9813096E82BF84F740D32B911.orport6_port 443
6EF897645B79B6CB35E853B32506375014DE3621.address 80.127.137.19
6EF897645B79B6CB35E853B32506375014DE3621.or_port 443
6EF897645B79B6CB35E853B32506375014DE3621.dir_port 80
6EF897645B79B6CB35E853B32506375014DE3621.nickname d6relay
6EF897645B79B6CB35E853B32506375014DE3621.has_extrainfo false
6EF897645B79B6CB35E853B32506375014DE3621.orport6_address 2001:981:47c1:1::6
6EF897645B79B6CB35E853B32506375014DE3621.orport6_port 443
7088D485934E8A403B81531F8C90BDC75FA43C98.address 37.139.8.104
7088D485934E8A403B81531F8C90BDC75FA43C98.or_port 9001
7088D485934E8A403B81531F8C90BDC75FA43C98.dir_port 9030
7088D485934E8A403B81531F8C90BDC75FA43C98.nickname Basil
7088D485934E8A403B81531F8C90BDC75FA43C98.has_extrainfo false
7088D485934E8A403B81531F8C90BDC75FA43C98.orport6_address 2a03:b0c0:0:1010::24c:1001
7088D485934E8A403B81531F8C90BDC75FA43C98.orport6_port 9001
70C55A114C0EF3DC5784A4FAEE64388434A3398F.address 188.138.88.42
70C55A114C0EF3DC5784A4FAEE64388434A3398F.or_port 443
70C55A114C0EF3DC5784A4FAEE64388434A3398F.dir_port 80
70C55A114C0EF3DC5784A4FAEE64388434A3398F.nickname torpidsFRplusserver
70C55A114C0EF3DC5784A4FAEE64388434A3398F.has_extrainfo false
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.address 85.235.250.88
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.or_port 443
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.dir_port 80
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.nickname TykRelay01
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.has_extrainfo false
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.orport6_address 2a01:3a0:1:1900:85:235:250:88
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.orport6_port 443
742C45F2D9004AADE0077E528A4418A6A81BC2BA.address 178.17.170.23
742C45F2D9004AADE0077E528A4418A6A81BC2BA.or_port 9001
742C45F2D9004AADE0077E528A4418A6A81BC2BA.dir_port 9030
742C45F2D9004AADE0077E528A4418A6A81BC2BA.nickname TorExitMoldova2
742C45F2D9004AADE0077E528A4418A6A81BC2BA.has_extrainfo false
742C45F2D9004AADE0077E528A4418A6A81BC2BA.orport6_address 2a00:1dc0:caff:7d::8254
742C45F2D9004AADE0077E528A4418A6A81BC2BA.orport6_port 9001
745369332749021C6FAF100D327BC3BF1DF4707B.address 50.7.74.173
745369332749021C6FAF100D327BC3BF1DF4707B.or_port 9001
745369332749021C6FAF100D327BC3BF1DF4707B.dir_port 9030
745369332749021C6FAF100D327BC3BF1DF4707B.nickname theia5
745369332749021C6FAF100D327BC3BF1DF4707B.has_extrainfo false
745369332749021C6FAF100D327BC3BF1DF4707B.orport6_address 2001:49f0:d002:2::55
745369332749021C6FAF100D327BC3BF1DF4707B.orport6_port 443
77131D7E2EC1CA9B8D737502256DA9103599CE51.address 77.247.181.166
77131D7E2EC1CA9B8D737502256DA9103599CE51.or_port 443
77131D7E2EC1CA9B8D737502256DA9103599CE51.dir_port 80
77131D7E2EC1CA9B8D737502256DA9103599CE51.nickname CriticalMass
77131D7E2EC1CA9B8D737502256DA9103599CE51.has_extrainfo false
775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.address 5.196.23.64
775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.or_port 9001
775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.dir_port 9030
775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.nickname Aerodynamik01
775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.has_extrainfo false
79509683AB4C8DDAF90A120C69A4179C6CD5A387.address 185.244.193.141
79509683AB4C8DDAF90A120C69A4179C6CD5A387.or_port 9001
79509683AB4C8DDAF90A120C69A4179C6CD5A387.dir_port 9030
79509683AB4C8DDAF90A120C69A4179C6CD5A387.nickname DerDickeReloaded
79509683AB4C8DDAF90A120C69A4179C6CD5A387.has_extrainfo false
79509683AB4C8DDAF90A120C69A4179C6CD5A387.orport6_address 2a03:4000:27:192:24:12:1984:4
79509683AB4C8DDAF90A120C69A4179C6CD5A387.orport6_port 9001
7BB70F8585DFC27E75D692970C0EEB0F22983A63.address 51.254.136.195
7BB70F8585DFC27E75D692970C0EEB0F22983A63.or_port 443
7BB70F8585DFC27E75D692970C0EEB0F22983A63.dir_port 80
7BB70F8585DFC27E75D692970C0EEB0F22983A63.nickname torproxy02
7BB70F8585DFC27E75D692970C0EEB0F22983A63.has_extrainfo false
7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.address 77.247.181.162
7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.or_port 443
7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.dir_port 80
7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.nickname sofia
7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.has_extrainfo false
7E281CD2C315C4F7A84BC7C8721C3BC974DDBFA3.address 185.220.101.48
7E281CD2C315C4F7A84BC7C8721C3BC974DDBFA3.or_port 20048
7E281CD2C315C4F7A84BC7C8721C3BC974DDBFA3.dir_port 10048
7E281CD2C315C4F7A84BC7C8721C3BC974DDBFA3.nickname niftyporcupine
7E281CD2C315C4F7A84BC7C8721C3BC974DDBFA3.has_extrainfo false
80AAF8D5956A43C197104CEF2550CD42D165C6FB.address 193.11.114.45
80AAF8D5956A43C197104CEF2550CD42D165C6FB.or_port 9002
80AAF8D5956A43C197104CEF2550CD42D165C6FB.dir_port 9031
80AAF8D5956A43C197104CEF2550CD42D165C6FB.nickname mdfnet2
80AAF8D5956A43C197104CEF2550CD42D165C6FB.has_extrainfo false
8101421BEFCCF4C271D5483C5AABCAAD245BBB9D.address 51.254.96.208
8101421BEFCCF4C271D5483C5AABCAAD245BBB9D.or_port 9001
8101421BEFCCF4C271D5483C5AABCAAD245BBB9D.dir_port 9030
8101421BEFCCF4C271D5483C5AABCAAD245BBB9D.nickname rofltor01
8101421BEFCCF4C271D5483C5AABCAAD245BBB9D.has_extrainfo false
8101421BEFCCF4C271D5483C5AABCAAD245BBB9D.orport6_address 2001:41d0:401:3100::30dc
8101421BEFCCF4C271D5483C5AABCAAD245BBB9D.orport6_port 9001
81B75D534F91BFB7C57AB67DA10BCEF622582AE8.address 192.42.116.16
81B75D534F91BFB7C57AB67DA10BCEF622582AE8.or_port 443
81B75D534F91BFB7C57AB67DA10BCEF622582AE8.dir_port 80
81B75D534F91BFB7C57AB67DA10BCEF622582AE8.nickname hviv104
81B75D534F91BFB7C57AB67DA10BCEF622582AE8.has_extrainfo false
823AA81E277F366505545522CEDC2F529CE4DC3F.address 192.160.102.164
823AA81E277F366505545522CEDC2F529CE4DC3F.or_port 9001
823AA81E277F366505545522CEDC2F529CE4DC3F.dir_port 80
823AA81E277F366505545522CEDC2F529CE4DC3F.nickname snowfall
823AA81E277F366505545522CEDC2F529CE4DC3F.has_extrainfo false
823AA81E277F366505545522CEDC2F529CE4DC3F.orport6_address 2620:132:300c:c01d::4
823AA81E277F366505545522CEDC2F529CE4DC3F.orport6_port 9002
844AE9CAD04325E955E2BE1521563B79FE7094B7.address 192.87.28.82
844AE9CAD04325E955E2BE1521563B79FE7094B7.or_port 9001
844AE9CAD04325E955E2BE1521563B79FE7094B7.dir_port 9030
844AE9CAD04325E955E2BE1521563B79FE7094B7.nickname Smeerboel
844AE9CAD04325E955E2BE1521563B79FE7094B7.has_extrainfo false
844AE9CAD04325E955E2BE1521563B79FE7094B7.orport6_address 2001:678:230:3028:192:87:28:82
844AE9CAD04325E955E2BE1521563B79FE7094B7.orport6_port 9001
8456DFA94161CDD99E480C2A2992C366C6564410.address 62.210.254.132
8456DFA94161CDD99E480C2A2992C366C6564410.or_port 443
8456DFA94161CDD99E480C2A2992C366C6564410.dir_port 80
8456DFA94161CDD99E480C2A2992C366C6564410.nickname turingmachine
8456DFA94161CDD99E480C2A2992C366C6564410.has_extrainfo false
855BC2DABE24C861CD887DB9B2E950424B49FC34.address 85.230.178.139
855BC2DABE24C861CD887DB9B2E950424B49FC34.or_port 443
855BC2DABE24C861CD887DB9B2E950424B49FC34.dir_port 9030
855BC2DABE24C861CD887DB9B2E950424B49FC34.nickname Logforme
855BC2DABE24C861CD887DB9B2E950424B49FC34.has_extrainfo false
85A885433E50B1874F11CEC9BE98451E24660976.address 178.254.7.88
85A885433E50B1874F11CEC9BE98451E24660976.or_port 8443
85A885433E50B1874F11CEC9BE98451E24660976.dir_port 8080
85A885433E50B1874F11CEC9BE98451E24660976.nickname wr3ck3d0ni0n01
85A885433E50B1874F11CEC9BE98451E24660976.has_extrainfo false
86C281AD135058238D7A337D546C902BE8505DDE.address 185.96.88.29
86C281AD135058238D7A337D546C902BE8505DDE.or_port 443
86C281AD135058238D7A337D546C902BE8505DDE.dir_port 80
86C281AD135058238D7A337D546C902BE8505DDE.nickname TykRelay05
86C281AD135058238D7A337D546C902BE8505DDE.has_extrainfo false
86C281AD135058238D7A337D546C902BE8505DDE.orport6_address 2a00:4020::185:96:88:29
86C281AD135058238D7A337D546C902BE8505DDE.orport6_port 443
8C00FA7369A7A308F6A137600F0FA07990D9D451.address 163.172.194.53
8C00FA7369A7A308F6A137600F0FA07990D9D451.or_port 9001
8C00FA7369A7A308F6A137600F0FA07990D9D451.dir_port 9030
8C00FA7369A7A308F6A137600F0FA07990D9D451.nickname GrmmlLitavis
8C00FA7369A7A308F6A137600F0FA07990D9D451.has_extrainfo false
8C00FA7369A7A308F6A137600F0FA07990D9D451.orport6_address 2001:bc8:225f:142:6c69:7461:7669:73
8C00FA7369A7A308F6A137600F0FA07990D9D451.orport6_port 9001
8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.address 5.189.169.190
8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.or_port 8080
8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.dir_port 8030
8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.nickname thanatosDE
8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.has_extrainfo false
8FA37B93397015B2BC5A525C908485260BE9F422.address 81.7.11.96
8FA37B93397015B2BC5A525C908485260BE9F422.or_port 9001
8FA37B93397015B2BC5A525C908485260BE9F422.dir_port 9030
8FA37B93397015B2BC5A525C908485260BE9F422.nickname Doedel22
8FA37B93397015B2BC5A525C908485260BE9F422.has_extrainfo false
90A5D1355C4B5840E950EB61E673863A6AE3ACA1.address 54.37.139.118
90A5D1355C4B5840E950EB61E673863A6AE3ACA1.or_port 9001
90A5D1355C4B5840E950EB61E673863A6AE3ACA1.dir_port 9030
90A5D1355C4B5840E950EB61E673863A6AE3ACA1.nickname rofltor09
90A5D1355C4B5840E950EB61E673863A6AE3ACA1.has_extrainfo false
90A5D1355C4B5840E950EB61E673863A6AE3ACA1.orport6_address 2001:41d0:601:1100::1b8
90A5D1355C4B5840E950EB61E673863A6AE3ACA1.orport6_port 9001
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.address 37.187.20.59
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.or_port 443
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.dir_port 80
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.nickname torpidsFRovh
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.has_extrainfo false
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.orport6_address 2001:41d0:a:143b::1
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.orport6_port 993
91E4015E1F82DAF0121D62267E54A1F661AB6DC7.address 173.255.245.116
91E4015E1F82DAF0121D62267E54A1F661AB6DC7.or_port 9001
91E4015E1F82DAF0121D62267E54A1F661AB6DC7.dir_port 9030
91E4015E1F82DAF0121D62267E54A1F661AB6DC7.nickname IWorshipHisShadow
91E4015E1F82DAF0121D62267E54A1F661AB6DC7.has_extrainfo false
924B24AFA7F075D059E8EEB284CC400B33D3D036.address 96.253.78.108
924B24AFA7F075D059E8EEB284CC400B33D3D036.or_port 443
924B24AFA7F075D059E8EEB284CC400B33D3D036.dir_port 80
924B24AFA7F075D059E8EEB284CC400B33D3D036.nickname NSDFreedom
924B24AFA7F075D059E8EEB284CC400B33D3D036.has_extrainfo false
9288B75B5FF8861EFF32A6BE8825CC38A4F9F8C2.address 92.38.163.21
9288B75B5FF8861EFF32A6BE8825CC38A4F9F8C2.or_port 443
9288B75B5FF8861EFF32A6BE8825CC38A4F9F8C2.dir_port 9030
9288B75B5FF8861EFF32A6BE8825CC38A4F9F8C2.nickname angeltest9
9288B75B5FF8861EFF32A6BE8825CC38A4F9F8C2.has_extrainfo false
935F589545B8A271A722E330445BB99F67DBB058.address 163.172.53.84
935F589545B8A271A722E330445BB99F67DBB058.or_port 443
935F589545B8A271A722E330445BB99F67DBB058.dir_port 80
935F589545B8A271A722E330445BB99F67DBB058.nickname Multivac0
935F589545B8A271A722E330445BB99F67DBB058.has_extrainfo false
935F589545B8A271A722E330445BB99F67DBB058.orport6_address 2001:bc8:24f8::
935F589545B8A271A722E330445BB99F67DBB058.orport6_port 443
94C4B7B8C50C86A92B6A20107539EE2678CF9A28.address 204.8.156.142
94C4B7B8C50C86A92B6A20107539EE2678CF9A28.or_port 443
94C4B7B8C50C86A92B6A20107539EE2678CF9A28.dir_port 80
94C4B7B8C50C86A92B6A20107539EE2678CF9A28.nickname BostonUCompSci
94C4B7B8C50C86A92B6A20107539EE2678CF9A28.has_extrainfo false
9772EFB535397C942C3AB8804FB35CFFAD012438.address 37.153.1.10
9772EFB535397C942C3AB8804FB35CFFAD012438.or_port 9001
9772EFB535397C942C3AB8804FB35CFFAD012438.dir_port 9030
9772EFB535397C942C3AB8804FB35CFFAD012438.nickname smallsweatnode
9772EFB535397C942C3AB8804FB35CFFAD012438.has_extrainfo false
99E246DB480B313A3012BC3363093CC26CD209C7.address 173.212.254.192
99E246DB480B313A3012BC3363093CC26CD209C7.or_port 31337
99E246DB480B313A3012BC3363093CC26CD209C7.dir_port 31336
99E246DB480B313A3012BC3363093CC26CD209C7.nickname ViDiSrv
99E246DB480B313A3012BC3363093CC26CD209C7.has_extrainfo false
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.address 185.100.86.128
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.or_port 9001
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.dir_port 9030
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.nickname TorExitFinland
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.has_extrainfo false
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.orport6_address 2a06:1700:1::11
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.orport6_port 9001
9B816A5B3EB20B8E4E9B9D1FBA299BD3F40F0320.address 185.220.101.49
9B816A5B3EB20B8E4E9B9D1FBA299BD3F40F0320.or_port 20049
9B816A5B3EB20B8E4E9B9D1FBA299BD3F40F0320.dir_port 10049
9B816A5B3EB20B8E4E9B9D1FBA299BD3F40F0320.nickname niftypygmyjerboa
9B816A5B3EB20B8E4E9B9D1FBA299BD3F40F0320.has_extrainfo false
9C900A7F6F5DD034CFFD192DAEC9CCAA813DB022.address 86.105.212.130
9C900A7F6F5DD034CFFD192DAEC9CCAA813DB022.or_port 443
9C900A7F6F5DD034CFFD192DAEC9CCAA813DB022.dir_port 9030
9C900A7F6F5DD034CFFD192DAEC9CCAA813DB022.nickname firstor2
9C900A7F6F5DD034CFFD192DAEC9CCAA813DB022.has_extrainfo false
9EAD5B2D3DBD96DBC80DCE423B0C345E920A758D.address 31.185.104.19
9EAD5B2D3DBD96DBC80DCE423B0C345E920A758D.or_port 443
9EAD5B2D3DBD96DBC80DCE423B0C345E920A758D.dir_port 80
9EAD5B2D3DBD96DBC80DCE423B0C345E920A758D.nickname Digitalcourage3ip1
9EAD5B2D3DBD96DBC80DCE423B0C345E920A758D.has_extrainfo false
9F7D6E6420183C2B76D3CE99624EBC98A21A967E.address 46.28.110.244
9F7D6E6420183C2B76D3CE99624EBC98A21A967E.or_port 443
9F7D6E6420183C2B76D3CE99624EBC98A21A967E.dir_port 80
9F7D6E6420183C2B76D3CE99624EBC98A21A967E.nickname Nivrim
9F7D6E6420183C2B76D3CE99624EBC98A21A967E.has_extrainfo false
A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.address 46.165.230.5
A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.or_port 443
A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.dir_port 80
A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.nickname Dhalgren
A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.has_extrainfo true
A2E6BB5C391CD46B38C55B4329C35304540771F1.address 81.7.3.67
A2E6BB5C391CD46B38C55B4329C35304540771F1.or_port 443
A2E6BB5C391CD46B38C55B4329C35304540771F1.dir_port 993
A2E6BB5C391CD46B38C55B4329C35304540771F1.nickname BeastieJoy62
A2E6BB5C391CD46B38C55B4329C35304540771F1.has_extrainfo true
A53C46F5B157DD83366D45A8E99A244934A14C46.address 128.31.0.13
A53C46F5B157DD83366D45A8E99A244934A14C46.or_port 443
A53C46F5B157DD83366D45A8E99A244934A14C46.dir_port 80
A53C46F5B157DD83366D45A8E99A244934A14C46.nickname csailmitexit
A53C46F5B157DD83366D45A8E99A244934A14C46.has_extrainfo false
A86EC24F5B8B964F67AC7C27CE92842025983274.address 185.246.152.22
A86EC24F5B8B964F67AC7C27CE92842025983274.or_port 443
A86EC24F5B8B964F67AC7C27CE92842025983274.dir_port 9030
A86EC24F5B8B964F67AC7C27CE92842025983274.nickname angeltest19
A86EC24F5B8B964F67AC7C27CE92842025983274.has_extrainfo false
A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.address 163.172.149.122
A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.or_port 443
A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.dir_port 80
A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.nickname niij03
A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.has_extrainfo false
AC2BEDD0BAC72838EA7E6F113F856C4E8018ACDB.address 176.10.107.180
AC2BEDD0BAC72838EA7E6F113F856C4E8018ACDB.or_port 9001
AC2BEDD0BAC72838EA7E6F113F856C4E8018ACDB.dir_port 9030
AC2BEDD0BAC72838EA7E6F113F856C4E8018ACDB.nickname schokomilch
AC2BEDD0BAC72838EA7E6F113F856C4E8018ACDB.has_extrainfo false
ACDD9E85A05B127BA010466C13C8C47212E8A38F.address 185.129.62.62
ACDD9E85A05B127BA010466C13C8C47212E8A38F.or_port 9001
ACDD9E85A05B127BA010466C13C8C47212E8A38F.dir_port 9030
ACDD9E85A05B127BA010466C13C8C47212E8A38F.nickname kramse
ACDD9E85A05B127BA010466C13C8C47212E8A38F.has_extrainfo false
ACDD9E85A05B127BA010466C13C8C47212E8A38F.orport6_address 2a06:d380:0:3700::62
ACDD9E85A05B127BA010466C13C8C47212E8A38F.orport6_port 9001
ADB2C26629643DBB9F8FE0096E7D16F9414B4F8D.address 31.185.104.20
ADB2C26629643DBB9F8FE0096E7D16F9414B4F8D.or_port 443
ADB2C26629643DBB9F8FE0096E7D16F9414B4F8D.dir_port 80
ADB2C26629643DBB9F8FE0096E7D16F9414B4F8D.nickname Digitalcourage3ip2
ADB2C26629643DBB9F8FE0096E7D16F9414B4F8D.has_extrainfo false
AEDAC7081AE14B8D241ECF0FF17A2858AB4383D0.address 45.79.108.130
AEDAC7081AE14B8D241ECF0FF17A2858AB4383D0.or_port 9001
AEDAC7081AE14B8D241ECF0FF17A2858AB4383D0.dir_port 9030
AEDAC7081AE14B8D241ECF0FF17A2858AB4383D0.nickname linss
AEDAC7081AE14B8D241ECF0FF17A2858AB4383D0.has_extrainfo false
AEDAC7081AE14B8D241ECF0FF17A2858AB4383D0.orport6_address 2600:3c01:e000:131::8000:0
AEDAC7081AE14B8D241ECF0FF17A2858AB4383D0.orport6_port 9001
B0553175AADB0501E5A61FC61CEA3970BE130FF2.address 5.9.147.226
B0553175AADB0501E5A61FC61CEA3970BE130FF2.or_port 9001
B0553175AADB0501E5A61FC61CEA3970BE130FF2.dir_port 9030
B0553175AADB0501E5A61FC61CEA3970BE130FF2.nickname zwiubel
B0553175AADB0501E5A61FC61CEA3970BE130FF2.has_extrainfo false
B0553175AADB0501E5A61FC61CEA3970BE130FF2.orport6_address 2a01:4f8:190:30e1::2
B0553175AADB0501E5A61FC61CEA3970BE130FF2.orport6_port 9001
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.address 178.17.174.14
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.or_port 9001
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.dir_port 9030
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.nickname TorExitMoldova
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.has_extrainfo false
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.orport6_address 2a00:1dc0:caff:8b::5b9a
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.orport6_port 9001
B143D439B72D239A419F8DCE07B8A8EB1B486FA7.address 212.129.62.232
B143D439B72D239A419F8DCE07B8A8EB1B486FA7.or_port 443
B143D439B72D239A419F8DCE07B8A8EB1B486FA7.dir_port 80
B143D439B72D239A419F8DCE07B8A8EB1B486FA7.nickname wardsback
B143D439B72D239A419F8DCE07B8A8EB1B486FA7.has_extrainfo false
B2197C23A4FF5D1C49EE45BA7688BA8BCCD89A0B.address 199.249.230.64
B2197C23A4FF5D1C49EE45BA7688BA8BCCD89A0B.or_port 443
B2197C23A4FF5D1C49EE45BA7688BA8BCCD89A0B.dir_port 80
B2197C23A4FF5D1C49EE45BA7688BA8BCCD89A0B.nickname Quintex41
B2197C23A4FF5D1C49EE45BA7688BA8BCCD89A0B.has_extrainfo false
B2197C23A4FF5D1C49EE45BA7688BA8BCCD89A0B.orport6_address 2620:7:6001::ffff:c759:e640
B2197C23A4FF5D1C49EE45BA7688BA8BCCD89A0B.orport6_port 80
B291D30517D23299AD7CEE3E60DFE60D0E3A4664.address 136.243.214.137
B291D30517D23299AD7CEE3E60DFE60D0E3A4664.or_port 443
B291D30517D23299AD7CEE3E60DFE60D0E3A4664.dir_port 80
B291D30517D23299AD7CEE3E60DFE60D0E3A4664.nickname TorKIT
B291D30517D23299AD7CEE3E60DFE60D0E3A4664.has_extrainfo false
B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.address 212.47.233.86
B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.or_port 9001
B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.dir_port 9030
B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.nickname netimanmu
B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.has_extrainfo false
B5212DB685A2A0FCFBAE425738E478D12361710D.address 93.115.97.242
B5212DB685A2A0FCFBAE425738E478D12361710D.or_port 9001
B5212DB685A2A0FCFBAE425738E478D12361710D.dir_port 9030
B5212DB685A2A0FCFBAE425738E478D12361710D.nickname firstor
B5212DB685A2A0FCFBAE425738E478D12361710D.has_extrainfo false
B57A87009FA838471FB2227DDE68165AB2A2FCC4.address 51.38.134.104
B57A87009FA838471FB2227DDE68165AB2A2FCC4.or_port 443
B57A87009FA838471FB2227DDE68165AB2A2FCC4.dir_port 9030
B57A87009FA838471FB2227DDE68165AB2A2FCC4.nickname angeltest5
B57A87009FA838471FB2227DDE68165AB2A2FCC4.has_extrainfo false
B83DC1558F0D34353BB992EF93AFEAFDB226A73E.address 193.11.114.46
B83DC1558F0D34353BB992EF93AFEAFDB226A73E.or_port 9003
B83DC1558F0D34353BB992EF93AFEAFDB226A73E.dir_port 9032
B83DC1558F0D34353BB992EF93AFEAFDB226A73E.nickname mdfnet3
B83DC1558F0D34353BB992EF93AFEAFDB226A73E.has_extrainfo false
B84F248233FEA90CAD439F292556A3139F6E1B82.address 85.248.227.164
B84F248233FEA90CAD439F292556A3139F6E1B82.or_port 9002
B84F248233FEA90CAD439F292556A3139F6E1B82.dir_port 444
B84F248233FEA90CAD439F292556A3139F6E1B82.nickname tollana
B84F248233FEA90CAD439F292556A3139F6E1B82.has_extrainfo false
B84F248233FEA90CAD439F292556A3139F6E1B82.orport6_address 2a00:1298:8011:212::164
B84F248233FEA90CAD439F292556A3139F6E1B82.orport6_port 9004
B86137AE9681701901C6720E55C16805B46BD8E3.address 81.7.11.186
B86137AE9681701901C6720E55C16805B46BD8E3.or_port 443
B86137AE9681701901C6720E55C16805B46BD8E3.dir_port 1080
B86137AE9681701901C6720E55C16805B46BD8E3.nickname BeastieJoy60
B86137AE9681701901C6720E55C16805B46BD8E3.has_extrainfo true
BB60F5BA113A0B8B44B7B37DE3567FE561E92F78.address 51.15.179.153
BB60F5BA113A0B8B44B7B37DE3567FE561E92F78.or_port 995
BB60F5BA113A0B8B44B7B37DE3567FE561E92F78.dir_port 110
BB60F5BA113A0B8B44B7B37DE3567FE561E92F78.nickname Casper04
BB60F5BA113A0B8B44B7B37DE3567FE561E92F78.has_extrainfo false
BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.address 198.96.155.3
BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.or_port 5001
BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.dir_port 8080
BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.nickname gurgle
BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.has_extrainfo false
BCEF908195805E03E92CCFE669C48738E556B9C5.address 128.199.55.207
BCEF908195805E03E92CCFE669C48738E556B9C5.or_port 9001
BCEF908195805E03E92CCFE669C48738E556B9C5.dir_port 9030
BCEF908195805E03E92CCFE669C48738E556B9C5.nickname EldritchReaper
BCEF908195805E03E92CCFE669C48738E556B9C5.has_extrainfo false
BCEF908195805E03E92CCFE669C48738E556B9C5.orport6_address 2a03:b0c0:2:d0::158:3001
BCEF908195805E03E92CCFE669C48738E556B9C5.orport6_port 9001
BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.address 213.141.138.174
BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.or_port 9001
BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.dir_port 9030
BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.nickname Schakalium
BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.has_extrainfo false
BF0FB582E37F738CD33C3651125F2772705BB8E8.address 148.251.190.229
BF0FB582E37F738CD33C3651125F2772705BB8E8.or_port 9010
BF0FB582E37F738CD33C3651125F2772705BB8E8.dir_port 9030
BF0FB582E37F738CD33C3651125F2772705BB8E8.nickname quadhead
BF0FB582E37F738CD33C3651125F2772705BB8E8.has_extrainfo false
BF0FB582E37F738CD33C3651125F2772705BB8E8.orport6_address 2a01:4f8:211:c68::2
BF0FB582E37F738CD33C3651125F2772705BB8E8.orport6_port 9010
BF735F669481EE1CCC348F0731551C933D1E2278.address 212.47.233.250
BF735F669481EE1CCC348F0731551C933D1E2278.or_port 9001
BF735F669481EE1CCC348F0731551C933D1E2278.dir_port 9030
BF735F669481EE1CCC348F0731551C933D1E2278.nickname FreewaySca
BF735F669481EE1CCC348F0731551C933D1E2278.has_extrainfo false
BF735F669481EE1CCC348F0731551C933D1E2278.orport6_address 2001:bc8:4400:2b00::1c:629
BF735F669481EE1CCC348F0731551C933D1E2278.orport6_port 9001
C0192FF43E777250084175F4E59AC1BA2290CE38.address 192.160.102.169
C0192FF43E777250084175F4E59AC1BA2290CE38.or_port 9001
C0192FF43E777250084175F4E59AC1BA2290CE38.dir_port 80
C0192FF43E777250084175F4E59AC1BA2290CE38.nickname manipogo
C0192FF43E777250084175F4E59AC1BA2290CE38.has_extrainfo false
C0192FF43E777250084175F4E59AC1BA2290CE38.orport6_address 2620:132:300c:c01d::9
C0192FF43E777250084175F4E59AC1BA2290CE38.orport6_port 9002
C0C4F339046EB824999F711D178472FDF53BE7F5.address 132.248.241.5
C0C4F339046EB824999F711D178472FDF53BE7F5.or_port 9101
C0C4F339046EB824999F711D178472FDF53BE7F5.dir_port 9130
C0C4F339046EB824999F711D178472FDF53BE7F5.nickname toritounam2
C0C4F339046EB824999F711D178472FDF53BE7F5.has_extrainfo false
C2AAB088555850FC434E68943F551072042B85F1.address 31.185.104.21
C2AAB088555850FC434E68943F551072042B85F1.or_port 443
C2AAB088555850FC434E68943F551072042B85F1.dir_port 80
C2AAB088555850FC434E68943F551072042B85F1.nickname Digitalcourage3ip3
C2AAB088555850FC434E68943F551072042B85F1.has_extrainfo false
C36A434DB54C66E1A97A5653858CE36024352C4D.address 50.7.74.170
C36A434DB54C66E1A97A5653858CE36024352C4D.or_port 9001
C36A434DB54C66E1A97A5653858CE36024352C4D.dir_port 9030
C36A434DB54C66E1A97A5653858CE36024352C4D.nickname theia9
C36A434DB54C66E1A97A5653858CE36024352C4D.has_extrainfo false
C36A434DB54C66E1A97A5653858CE36024352C4D.orport6_address 2001:49f0:d002:2::59
C36A434DB54C66E1A97A5653858CE36024352C4D.orport6_port 443
C793AB88565DDD3C9E4C6F15CCB9D8C7EF964CE9.address 85.248.227.163
C793AB88565DDD3C9E4C6F15CCB9D8C7EF964CE9.or_port 9001
C793AB88565DDD3C9E4C6F15CCB9D8C7EF964CE9.dir_port 443
C793AB88565DDD3C9E4C6F15CCB9D8C7EF964CE9.nickname ori
C793AB88565DDD3C9E4C6F15CCB9D8C7EF964CE9.has_extrainfo false
C793AB88565DDD3C9E4C6F15CCB9D8C7EF964CE9.orport6_address 2a00:1298:8011:212::163
C793AB88565DDD3C9E4C6F15CCB9D8C7EF964CE9.orport6_port 9003
C90CA3B7FE01A146B8268D56977DC4A2C024B9EA.address 192.160.102.165
C90CA3B7FE01A146B8268D56977DC4A2C024B9EA.or_port 9001
C90CA3B7FE01A146B8268D56977DC4A2C024B9EA.dir_port 80
C90CA3B7FE01A146B8268D56977DC4A2C024B9EA.nickname cowcat
C90CA3B7FE01A146B8268D56977DC4A2C024B9EA.has_extrainfo false
C90CA3B7FE01A146B8268D56977DC4A2C024B9EA.orport6_address 2620:132:300c:c01d::5
C90CA3B7FE01A146B8268D56977DC4A2C024B9EA.orport6_port 9002
CBD0D1BD110EC52963082D839AC6A89D0AE243E7.address 176.31.103.150
CBD0D1BD110EC52963082D839AC6A89D0AE243E7.or_port 9001
CBD0D1BD110EC52963082D839AC6A89D0AE243E7.dir_port 9030
CBD0D1BD110EC52963082D839AC6A89D0AE243E7.nickname UV74S7mjxRcYVrGsAMw
CBD0D1BD110EC52963082D839AC6A89D0AE243E7.has_extrainfo false
D15AFF44BE641368B958A32FB6B071AC2136B8B1.address 51.254.147.57
D15AFF44BE641368B958A32FB6B071AC2136B8B1.or_port 443
D15AFF44BE641368B958A32FB6B071AC2136B8B1.dir_port 80
D15AFF44BE641368B958A32FB6B071AC2136B8B1.nickname Cosworth01
D15AFF44BE641368B958A32FB6B071AC2136B8B1.has_extrainfo false
D1AFBF3117B308B6D1A7AA762B1315FD86A6B8AF.address 50.7.74.172
D1AFBF3117B308B6D1A7AA762B1315FD86A6B8AF.or_port 443
D1AFBF3117B308B6D1A7AA762B1315FD86A6B8AF.dir_port 80
D1AFBF3117B308B6D1A7AA762B1315FD86A6B8AF.nickname theia2
D1AFBF3117B308B6D1A7AA762B1315FD86A6B8AF.has_extrainfo false
D1AFBF3117B308B6D1A7AA762B1315FD86A6B8AF.orport6_address 2001:49f0:d002:2::52
D1AFBF3117B308B6D1A7AA762B1315FD86A6B8AF.orport6_port 443
D379A1CB8285748FFF64AE94296CA89878F25B22.address 62.141.38.69
D379A1CB8285748FFF64AE94296CA89878F25B22.or_port 443
D379A1CB8285748FFF64AE94296CA89878F25B22.dir_port 9030
D379A1CB8285748FFF64AE94296CA89878F25B22.nickname angeltest3
D379A1CB8285748FFF64AE94296CA89878F25B22.has_extrainfo false
D379A1CB8285748FFF64AE94296CA89878F25B22.orport6_address 2001:4ba0:cafe:ac5::1
D379A1CB8285748FFF64AE94296CA89878F25B22.orport6_port 443
D405FCCF06ADEDF898DF2F29C9348DCB623031BA.address 5.45.111.149
D405FCCF06ADEDF898DF2F29C9348DCB623031BA.or_port 443
D405FCCF06ADEDF898DF2F29C9348DCB623031BA.dir_port 80
D405FCCF06ADEDF898DF2F29C9348DCB623031BA.nickname gGDHjdcC6zAlM8k08lY
D405FCCF06ADEDF898DF2F29C9348DCB623031BA.has_extrainfo false
D405FCCF06ADEDF898DF2F29C9348DCB623031BA.orport6_address 2a03:4000:6:2388:df98:15f9:b34d:443
D405FCCF06ADEDF898DF2F29C9348DCB623031BA.orport6_port 443
D50101A2ABD09DC245F7E96C0818D003CDD62351.address 50.7.74.174
D50101A2ABD09DC245F7E96C0818D003CDD62351.or_port 443
D50101A2ABD09DC245F7E96C0818D003CDD62351.dir_port 80
D50101A2ABD09DC245F7E96C0818D003CDD62351.nickname theia6
D50101A2ABD09DC245F7E96C0818D003CDD62351.has_extrainfo false
D50101A2ABD09DC245F7E96C0818D003CDD62351.orport6_address 2001:49f0:d002:2::56
D50101A2ABD09DC245F7E96C0818D003CDD62351.orport6_port 443
D5039E1EBFD96D9A3F9846BF99EC9F75EDDE902A.address 37.187.115.157
D5039E1EBFD96D9A3F9846BF99EC9F75EDDE902A.or_port 9001
D5039E1EBFD96D9A3F9846BF99EC9F75EDDE902A.dir_port 9030
D5039E1EBFD96D9A3F9846BF99EC9F75EDDE902A.nickname Janky328891
D5039E1EBFD96D9A3F9846BF99EC9F75EDDE902A.has_extrainfo false
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.address 85.10.201.47
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.or_port 9001
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.dir_port 9030
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.nickname sif
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.has_extrainfo false
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.orport6_address 2a01:4f8:a0:43eb::beef
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.orport6_port 9001
DAA39FC00B196B353C2A271459C305C429AF09E4.address 193.35.52.53
DAA39FC00B196B353C2A271459C305C429AF09E4.or_port 9001
DAA39FC00B196B353C2A271459C305C429AF09E4.dir_port 9030
DAA39FC00B196B353C2A271459C305C429AF09E4.nickname Arne
DAA39FC00B196B353C2A271459C305C429AF09E4.has_extrainfo false
DB2682153AC0CCAECD2BD1E9EBE99C6815807A1E.address 54.36.237.163
DB2682153AC0CCAECD2BD1E9EBE99C6815807A1E.or_port 443
DB2682153AC0CCAECD2BD1E9EBE99C6815807A1E.dir_port 80
DB2682153AC0CCAECD2BD1E9EBE99C6815807A1E.nickname GermanCraft2
DB2682153AC0CCAECD2BD1E9EBE99C6815807A1E.has_extrainfo false
DC163DDEF4B6F0C6BC226F9F6656A5A30C5C5686.address 176.158.236.102
DC163DDEF4B6F0C6BC226F9F6656A5A30C5C5686.or_port 9001
DC163DDEF4B6F0C6BC226F9F6656A5A30C5C5686.dir_port 9030
DC163DDEF4B6F0C6BC226F9F6656A5A30C5C5686.nickname Underworld
DC163DDEF4B6F0C6BC226F9F6656A5A30C5C5686.has_extrainfo false
DD823AFB415380A802DCAEB9461AE637604107FB.address 178.33.183.251
DD823AFB415380A802DCAEB9461AE637604107FB.or_port 443
DD823AFB415380A802DCAEB9461AE637604107FB.dir_port 80
DD823AFB415380A802DCAEB9461AE637604107FB.nickname grenouille
DD823AFB415380A802DCAEB9461AE637604107FB.has_extrainfo false
DD823AFB415380A802DCAEB9461AE637604107FB.orport6_address 2001:41d0:2:a683::251
DD823AFB415380A802DCAEB9461AE637604107FB.orport6_port 443
DD8BD7307017407FCC36F8D04A688F74A0774C02.address 171.25.193.20
DD8BD7307017407FCC36F8D04A688F74A0774C02.or_port 443
DD8BD7307017407FCC36F8D04A688F74A0774C02.dir_port 80
DD8BD7307017407FCC36F8D04A688F74A0774C02.nickname DFRI0
DD8BD7307017407FCC36F8D04A688F74A0774C02.has_extrainfo false
DD8BD7307017407FCC36F8D04A688F74A0774C02.orport6_address 2001:67c:289c::20
DD8BD7307017407FCC36F8D04A688F74A0774C02.orport6_port 443
DED6892FF89DBD737BA689698A171B2392EB3E82.address 92.222.38.67
DED6892FF89DBD737BA689698A171B2392EB3E82.or_port 443
DED6892FF89DBD737BA689698A171B2392EB3E82.dir_port 80
DED6892FF89DBD737BA689698A171B2392EB3E82.nickname ThorExit
DED6892FF89DBD737BA689698A171B2392EB3E82.has_extrainfo false
DED6892FF89DBD737BA689698A171B2392EB3E82.orport6_address 2001:41d0:52:100::112a
DED6892FF89DBD737BA689698A171B2392EB3E82.orport6_port 443
E41B16F7DDF52EBB1DB4268AB2FE340B37AD8904.address 166.70.207.2
E41B16F7DDF52EBB1DB4268AB2FE340B37AD8904.or_port 9101
E41B16F7DDF52EBB1DB4268AB2FE340B37AD8904.dir_port 9130
E41B16F7DDF52EBB1DB4268AB2FE340B37AD8904.nickname xmission1
E41B16F7DDF52EBB1DB4268AB2FE340B37AD8904.has_extrainfo false
E51620B90DCB310138ED89EDEDD0A5C361AAE24E.address 185.100.86.182
E51620B90DCB310138ED89EDEDD0A5C361AAE24E.or_port 8080
E51620B90DCB310138ED89EDEDD0A5C361AAE24E.dir_port 9030
E51620B90DCB310138ED89EDEDD0A5C361AAE24E.nickname NormalCitizen
E51620B90DCB310138ED89EDEDD0A5C361AAE24E.has_extrainfo false
E81EF60A73B3809F8964F73766B01BAA0A171E20.address 212.47.244.38
E81EF60A73B3809F8964F73766B01BAA0A171E20.or_port 443
E81EF60A73B3809F8964F73766B01BAA0A171E20.dir_port 8080
E81EF60A73B3809F8964F73766B01BAA0A171E20.nickname Chimborazo
E81EF60A73B3809F8964F73766B01BAA0A171E20.has_extrainfo false
E8D114B3C78D8E6E7FEB1004650DD632C2143C9E.address 185.4.132.148
E8D114B3C78D8E6E7FEB1004650DD632C2143C9E.or_port 443
E8D114B3C78D8E6E7FEB1004650DD632C2143C9E.dir_port 80
E8D114B3C78D8E6E7FEB1004650DD632C2143C9E.nickname libreonion1
E8D114B3C78D8E6E7FEB1004650DD632C2143C9E.has_extrainfo false
E8D114B3C78D8E6E7FEB1004650DD632C2143C9E.orport6_address 2a02:c500:2:f0::5492
E8D114B3C78D8E6E7FEB1004650DD632C2143C9E.orport6_port 443
EBE718E1A49EE229071702964F8DB1F318075FF8.address 131.188.40.188
EBE718E1A49EE229071702964F8DB1F318075FF8.or_port 80
EBE718E1A49EE229071702964F8DB1F318075FF8.dir_port 1443
EBE718E1A49EE229071702964F8DB1F318075FF8.nickname fluxe4
EBE718E1A49EE229071702964F8DB1F318075FF8.has_extrainfo true
EBE718E1A49EE229071702964F8DB1F318075FF8.orport6_address 2001:638:a000:4140::ffff:188
EBE718E1A49EE229071702964F8DB1F318075FF8.orport6_port 80
ED2338CAC2711B3E331392E1ED2831219B794024.address 192.87.28.28
ED2338CAC2711B3E331392E1ED2831219B794024.or_port 9001
ED2338CAC2711B3E331392E1ED2831219B794024.dir_port 9030
ED2338CAC2711B3E331392E1ED2831219B794024.nickname SEC6xFreeBSD64
ED2338CAC2711B3E331392E1ED2831219B794024.has_extrainfo false
ED2338CAC2711B3E331392E1ED2831219B794024.orport6_address 2001:678:230:3028:192:87:28:28
ED2338CAC2711B3E331392E1ED2831219B794024.orport6_port 9001
EE4AF632058F0734C1426B1AD689F47445CA2056.address 37.252.187.111
EE4AF632058F0734C1426B1AD689F47445CA2056.or_port 443
EE4AF632058F0734C1426B1AD689F47445CA2056.dir_port 9030
EE4AF632058F0734C1426B1AD689F47445CA2056.nickname angeltest7
EE4AF632058F0734C1426B1AD689F47445CA2056.has_extrainfo false
EE4AF632058F0734C1426B1AD689F47445CA2056.orport6_address 2a00:63c1:c:111::2
EE4AF632058F0734C1426B1AD689F47445CA2056.orport6_port 443
EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.address 217.182.75.181
EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.or_port 9001
EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.dir_port 9030
EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.nickname Aerodynamik02
EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.has_extrainfo false
F10BDE279AE71515DDCCCC61DC19AC8765F8A3CC.address 193.70.112.165
F10BDE279AE71515DDCCCC61DC19AC8765F8A3CC.or_port 443
F10BDE279AE71515DDCCCC61DC19AC8765F8A3CC.dir_port 80
F10BDE279AE71515DDCCCC61DC19AC8765F8A3CC.nickname ParkBenchInd001
F10BDE279AE71515DDCCCC61DC19AC8765F8A3CC.has_extrainfo false
F4263275CF54A6836EE7BD527B1328836A6F06E1.address 37.187.102.108
F4263275CF54A6836EE7BD527B1328836A6F06E1.or_port 443
F4263275CF54A6836EE7BD527B1328836A6F06E1.dir_port 80
F4263275CF54A6836EE7BD527B1328836A6F06E1.nickname EvilMoe
F4263275CF54A6836EE7BD527B1328836A6F06E1.has_extrainfo false
F4263275CF54A6836EE7BD527B1328836A6F06E1.orport6_address 2001:41d0:a:266c::1
F4263275CF54A6836EE7BD527B1328836A6F06E1.orport6_port 443
F4C0EDAA0BF0F7EC138746F8FEF1CE26C7860265.address 5.199.142.236
F4C0EDAA0BF0F7EC138746F8FEF1CE26C7860265.or_port 9001
F4C0EDAA0BF0F7EC138746F8FEF1CE26C7860265.dir_port 9030
F4C0EDAA0BF0F7EC138746F8FEF1CE26C7860265.nickname tornodenumber9004
F4C0EDAA0BF0F7EC138746F8FEF1CE26C7860265.has_extrainfo false
F6A358DD367B3282D6EF5824C9D45E1A19C7E815.address 192.160.102.168
F6A358DD367B3282D6EF5824C9D45E1A19C7E815.or_port 9001
F6A358DD367B3282D6EF5824C9D45E1A19C7E815.dir_port 80
F6A358DD367B3282D6EF5824C9D45E1A19C7E815.nickname prawksi
F6A358DD367B3282D6EF5824C9D45E1A19C7E815.has_extrainfo false
F6A358DD367B3282D6EF5824C9D45E1A19C7E815.orport6_address 2620:132:300c:c01d::8
F6A358DD367B3282D6EF5824C9D45E1A19C7E815.orport6_port 9002
F8D27B163B9247B232A2EEE68DD8B698695C28DE.address 78.47.18.110
F8D27B163B9247B232A2EEE68DD8B698695C28DE.or_port 80
F8D27B163B9247B232A2EEE68DD8B698695C28DE.dir_port 443
F8D27B163B9247B232A2EEE68DD8B698695C28DE.nickname fluxe3
F8D27B163B9247B232A2EEE68DD8B698695C28DE.has_extrainfo true
F8D27B163B9247B232A2EEE68DD8B698695C28DE.orport6_address 2a01:4f8:120:4023::110
F8D27B163B9247B232A2EEE68DD8B698695C28DE.orport6_port 80
F93D8F37E35C390BCAD9F9069E13085B745EC216.address 185.96.180.29
F93D8F37E35C390BCAD9F9069E13085B745EC216.or_port 443
F93D8F37E35C390BCAD9F9069E13085B745EC216.dir_port 80
F93D8F37E35C390BCAD9F9069E13085B745EC216.nickname TykRelay06
F93D8F37E35C390BCAD9F9069E13085B745EC216.has_extrainfo false
F93D8F37E35C390BCAD9F9069E13085B745EC216.orport6_address 2a00:4820::185:96:180:29
F93D8F37E35C390BCAD9F9069E13085B745EC216.orport6_port 443
FE296180018833AF03A8EACD5894A614623D3F76.address 149.56.45.200
FE296180018833AF03A8EACD5894A614623D3F76.or_port 9001
FE296180018833AF03A8EACD5894A614623D3F76.dir_port 9030
FE296180018833AF03A8EACD5894A614623D3F76.nickname PyotrTorpotkinOne
FE296180018833AF03A8EACD5894A614623D3F76.has_extrainfo false
FE296180018833AF03A8EACD5894A614623D3F76.orport6_address 2607:5300:201:3000::17d3
FE296180018833AF03A8EACD5894A614623D3F76.orport6_port 9002
FFA72BD683BC2FCF988356E6BEC1E490F313FB07.address 193.11.164.243
FFA72BD683BC2FCF988356E6BEC1E490F313FB07.or_port 9001
FFA72BD683BC2FCF988356E6BEC1E490F313FB07.dir_port 9030
FFA72BD683BC2FCF988356E6BEC1E490F313FB07.nickname Lule
FFA72BD683BC2FCF988356E6BEC1E490F313FB07.has_extrainfo false
FFA72BD683BC2FCF988356E6BEC1E490F313FB07.orport6_address 2001:6b0:7:125::243
FFA72BD683BC2FCF988356E6BEC1E490F313FB07.orport6_port 9001
stem-1.8.0/stem/connection.py 0000664 0001750 0001750 00000135747 13501272761 016720 0 ustar atagar atagar 0000000 0000000 # Copyright 2011-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Functions for connecting and authenticating to the tor process.
The :func:`~stem.connection.connect` function give an easy, one line
method for getting an authenticated control connection. This is handy for CLI
applications and the python interactive interpreter, but does several things
that makes it undesirable for applications (uses stdin/stdout, suppresses
exceptions, etc).
::
import sys
from stem.connection import connect
if __name__ == '__main__':
controller = connect()
if not controller:
sys.exit(1) # unable to get a connection
print 'Tor is running version %s' % controller.get_version()
controller.close()
::
% python example.py
Tor is running version 0.2.4.10-alpha-dev (git-8be6058d8f31e578)
... or if Tor isn't running...
::
% python example.py
[Errno 111] Connection refused
The :func:`~stem.connection.authenticate` function, however, gives easy but
fine-grained control over the authentication process. For instance...
::
import sys
import getpass
import stem.connection
import stem.socket
try:
control_socket = stem.socket.ControlPort(port = 9051)
except stem.SocketError as exc:
print 'Unable to connect to port 9051 (%s)' % exc
sys.exit(1)
try:
stem.connection.authenticate(control_socket)
except stem.connection.IncorrectSocketType:
print 'Please check in your torrc that 9051 is the ControlPort.'
print 'Maybe you configured it to be the ORPort or SocksPort instead?'
sys.exit(1)
except stem.connection.MissingPassword:
controller_password = getpass.getpass('Controller password: ')
try:
stem.connection.authenticate_password(control_socket, controller_password)
except stem.connection.PasswordAuthFailed:
print 'Unable to authenticate, password is incorrect'
sys.exit(1)
except stem.connection.AuthenticationFailure as exc:
print 'Unable to authenticate: %s' % exc
sys.exit(1)
**Module Overview:**
::
connect - Simple method for getting authenticated control connection
authenticate - Main method for authenticating to a control socket
authenticate_none - Authenticates to an open control socket
authenticate_password - Authenticates to a socket supporting password auth
authenticate_cookie - Authenticates to a socket supporting cookie auth
authenticate_safecookie - Authenticates to a socket supporting safecookie auth
get_protocolinfo - Issues a PROTOCOLINFO query
AuthenticationFailure - Base exception raised for authentication failures
|- UnrecognizedAuthMethods - Authentication methods are unsupported
|- IncorrectSocketType - Socket does not speak the tor control protocol
|
|- OpenAuthFailed - Failure when authenticating by an open socket
| +- OpenAuthRejected - Tor rejected this method of authentication
|
|- PasswordAuthFailed - Failure when authenticating by a password
| |- PasswordAuthRejected - Tor rejected this method of authentication
| |- IncorrectPassword - Password was rejected
| +- MissingPassword - Socket supports password auth but wasn't attempted
|
|- CookieAuthFailed - Failure when authenticating by a cookie
| |- CookieAuthRejected - Tor rejected this method of authentication
| |- IncorrectCookieValue - Authentication cookie was rejected
| |- IncorrectCookieSize - Size of the cookie file is incorrect
| |- UnreadableCookieFile - Unable to read the contents of the auth cookie
| +- AuthChallengeFailed - Failure completing the authchallenge request
| |- AuthChallengeUnsupported - Tor doesn't recognize the AUTHCHALLENGE command
| |- AuthSecurityFailure - Server provided the wrong nonce credentials
| |- InvalidClientNonce - The client nonce is invalid
| +- UnrecognizedAuthChallengeMethod - AUTHCHALLENGE does not support the given methods.
|
+- MissingAuthInfo - Unexpected PROTOCOLINFO response, missing auth info
|- NoAuthMethods - Missing any methods for authenticating
+- NoAuthCookie - Supports cookie auth but doesn't have its path
.. data:: AuthMethod (enum)
Enumeration of PROTOCOLINFO responses for supported authentication methods.
============== ===========
AuthMethod Description
============== ===========
**NONE** No authentication required.
**PASSWORD** Password required, see tor's HashedControlPassword option.
**COOKIE** Contents of the cookie file required, see tor's CookieAuthentication option.
**SAFECOOKIE** Need to reply to a hmac challenge using the contents of the cookie file.
**UNKNOWN** Tor provided one or more authentication methods that we don't recognize, probably something new.
============== ===========
"""
import binascii
import getpass
import hashlib
import hmac
import os
import stem.control
import stem.response
import stem.socket
import stem.util.connection
import stem.util.enum
import stem.util.str_tools
import stem.util.system
import stem.version
from stem.util import log
AuthMethod = stem.util.enum.Enum('NONE', 'PASSWORD', 'COOKIE', 'SAFECOOKIE', 'UNKNOWN')
CLIENT_HASH_CONSTANT = b'Tor safe cookie authentication controller-to-server hash'
SERVER_HASH_CONSTANT = b'Tor safe cookie authentication server-to-controller hash'
CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE = os.urandom(32)
MISSING_PASSWORD_BUG_MSG = """
BUG: You provided a password but despite this stem reported that it was
missing. This shouldn't happen - please let us know about it!
http://bugs.torproject.org
"""
UNRECOGNIZED_AUTH_TYPE_MSG = """
Tor is using a type of authentication we do not recognize...
{auth_methods}
Please check that stem is up to date and if there is an existing issue on
'http://bugs.torproject.org'. If there isn't one then let us know!
"""
UNREADABLE_COOKIE_FILE_MSG = """
We were unable to read tor's authentication cookie...
Path: {path}
Issue: {issue}
"""
WRONG_PORT_TYPE_MSG = """
Please check in your torrc that {port} is the ControlPort. Maybe you
configured it to be the ORPort or SocksPort instead?
"""
WRONG_SOCKET_TYPE_MSG = """
Unable to connect to tor. Are you sure the interface you specified belongs to
tor?
"""
CONNECT_MESSAGES = {
'general_auth_failure': 'Unable to authenticate: {error}',
'incorrect_password': 'Incorrect password',
'no_control_port': "Unable to connect to tor. Maybe it's running without a ControlPort?",
'password_prompt': 'Tor controller password:',
'needs_password': 'Tor requires a password to authenticate',
'socket_doesnt_exist': "The socket file you specified ({path}) doesn't exist",
'tor_isnt_running': "Unable to connect to tor. Are you sure it's running?",
'unable_to_use_port': 'Unable to connect to {address}:{port}: {error}',
'unable_to_use_socket': "Unable to connect to '{path}': {error}",
'missing_password_bug': MISSING_PASSWORD_BUG_MSG.strip(),
'uncrcognized_auth_type': UNRECOGNIZED_AUTH_TYPE_MSG.strip(),
'unreadable_cookie_file': UNREADABLE_COOKIE_FILE_MSG.strip(),
'wrong_port_type': WRONG_PORT_TYPE_MSG.strip(),
'wrong_socket_type': WRONG_SOCKET_TYPE_MSG.strip(),
}
COMMON_TOR_COMMANDS = (
'tor',
'tor.real', # TBB command ran
'/usr/local/bin/tor', # FreeBSD expands the whole path, this is the default location
)
def connect(control_port = ('127.0.0.1', 'default'), control_socket = '/var/run/tor/control', password = None, password_prompt = False, chroot_path = None, controller = stem.control.Controller):
"""
Convenience function for quickly getting a control connection. This is very
handy for debugging or CLI setup, handling setup and prompting for a password
if necessary (and none is provided). If any issues arise this prints a
description of the problem and returns **None**.
If both a **control_port** and **control_socket** are provided then the
**control_socket** is tried first, and this provides a generic error message
if they're both unavailable.
In much the same vein as git porcelain commands, users should not rely on
details of how this works. Messages and details of this function's behavior
could change in the future.
If the **port** is **'default'** then this checks on both 9051 (default for
relays) and 9151 (default for the Tor Browser). This default may change in
the future.
.. versionadded:: 1.2.0
.. versionchanged:: 1.5.0
Use both port 9051 and 9151 by default.
:param tuple contol_port: address and port tuple, for instance **('127.0.0.1', 9051)**
:param str path: path where the control socket is located
:param str password: passphrase to authenticate to the socket
:param bool password_prompt: prompt for the controller password if it wasn't
supplied
:param str chroot_path: path prefix if in a chroot environment
:param Class controller: :class:`~stem.control.BaseController` subclass to be
returned, this provides a :class:`~stem.socket.ControlSocket` if **None**
:returns: authenticated control connection, the type based on the controller argument
:raises: **ValueError** if given an invalid control_port, or both
**control_port** and **control_socket** are **None**
"""
if control_port is None and control_socket is None:
raise ValueError('Neither a control port nor control socket were provided. Nothing to connect to.')
elif control_port:
if len(control_port) != 2:
raise ValueError('The control_port argument for connect() should be an (address, port) tuple.')
elif not stem.util.connection.is_valid_ipv4_address(control_port[0]):
raise ValueError("'%s' isn't a vaid IPv4 address" % control_port[0])
elif control_port[1] != 'default' and not stem.util.connection.is_valid_port(control_port[1]):
raise ValueError("'%s' isn't a valid port" % control_port[1])
control_connection, error_msg = None, ''
if control_socket:
if os.path.exists(control_socket):
try:
control_connection = stem.socket.ControlSocketFile(control_socket)
except stem.SocketError as exc:
error_msg = CONNECT_MESSAGES['unable_to_use_socket'].format(path = control_socket, error = exc)
else:
error_msg = CONNECT_MESSAGES['socket_doesnt_exist'].format(path = control_socket)
if control_port and not control_connection:
address, port = control_port
try:
if port == 'default':
control_connection = _connection_for_default_port(address)
else:
control_connection = stem.socket.ControlPort(address, int(port))
except stem.SocketError as exc:
error_msg = CONNECT_MESSAGES['unable_to_use_port'].format(address = address, port = port, error = exc)
# If unable to connect to either a control socket or port then finally fail
# out. If we only attempted to connect to one of them then provide the error
# output from that. Otherwise we provide a more generic error message.
if not control_connection:
if control_socket and control_port:
is_tor_running = stem.util.system.is_running(COMMON_TOR_COMMANDS)
error_msg = CONNECT_MESSAGES['no_control_port'] if is_tor_running else CONNECT_MESSAGES['tor_isnt_running']
print(error_msg)
return None
return _connect_auth(control_connection, password, password_prompt, chroot_path, controller)
def connect_port(address = '127.0.0.1', port = 9051, password = None, chroot_path = None, controller = stem.control.Controller):
"""
Convenience function for quickly getting a control connection. This is very
handy for debugging or CLI setup, handling setup and prompting for a password
if necessary (and none is provided). If any issues arise this prints a
description of the problem and returns **None**.
.. deprecated:: 1.2.0
Use :func:`~stem.connection.connect` instead.
:param str address: ip address of the controller
:param int port: port number of the controller
:param str password: passphrase to authenticate to the socket
:param str chroot_path: path prefix if in a chroot environment
:param Class controller: :class:`~stem.control.BaseController` subclass to be
returned, this provides a :class:`~stem.socket.ControlSocket` if **None**
:returns: authenticated control connection, the type based on the controller argument
"""
try:
control_port = stem.socket.ControlPort(address, port)
except stem.SocketError as exc:
print(exc)
return None
return _connect_auth(control_port, password, True, chroot_path, controller)
def connect_socket_file(path = '/var/run/tor/control', password = None, chroot_path = None, controller = stem.control.Controller):
"""
Convenience function for quickly getting a control connection. For more
information see the :func:`~stem.connection.connect_port` function.
In much the same vein as git porcelain commands, users should not rely on
details of how this works. Messages or details of this function's behavior
might change in the future.
.. deprecated:: 1.2.0
Use :func:`~stem.connection.connect` instead.
:param str path: path where the control socket is located
:param str password: passphrase to authenticate to the socket
:param str chroot_path: path prefix if in a chroot environment
:param Class controller: :class:`~stem.control.BaseController` subclass to be
returned, this provides a :class:`~stem.socket.ControlSocket` if **None**
:returns: authenticated control connection, the type based on the controller argument
"""
try:
control_socket = stem.socket.ControlSocketFile(path)
except stem.SocketError as exc:
print(exc)
return None
return _connect_auth(control_socket, password, True, chroot_path, controller)
def _connect_auth(control_socket, password, password_prompt, chroot_path, controller):
"""
Helper for the connect_* functions that authenticates the socket and
constructs the controller.
:param stem.socket.ControlSocket control_socket: socket being authenticated to
:param str password: passphrase to authenticate to the socket
:param bool password_prompt: prompt for the controller password if it wasn't
supplied
:param str chroot_path: path prefix if in a chroot environment
:param Class controller: :class:`~stem.control.BaseController` subclass to be
returned, this provides a :class:`~stem.socket.ControlSocket` if **None**
:returns: authenticated control connection, the type based on the controller argument
"""
try:
authenticate(control_socket, password, chroot_path)
if controller is None:
return control_socket
else:
return controller(control_socket, is_authenticated = True)
except IncorrectSocketType:
if isinstance(control_socket, stem.socket.ControlPort):
print(CONNECT_MESSAGES['wrong_port_type'].format(port = control_socket.port))
else:
print(CONNECT_MESSAGES['wrong_socket_type'])
control_socket.close()
return None
except UnrecognizedAuthMethods as exc:
print(CONNECT_MESSAGES['uncrcognized_auth_type'].format(auth_methods = ', '.join(exc.unknown_auth_methods)))
control_socket.close()
return None
except IncorrectPassword:
print(CONNECT_MESSAGES['incorrect_password'])
control_socket.close()
return None
except MissingPassword:
if password is not None:
control_socket.close()
raise ValueError(CONNECT_MESSAGES['missing_password_bug'])
if password_prompt:
try:
password = getpass.getpass(CONNECT_MESSAGES['password_prompt'] + ' ')
except KeyboardInterrupt:
control_socket.close()
return None
return _connect_auth(control_socket, password, password_prompt, chroot_path, controller)
else:
print(CONNECT_MESSAGES['needs_password'])
control_socket.close()
return None
except UnreadableCookieFile as exc:
print(CONNECT_MESSAGES['unreadable_cookie_file'].format(path = exc.cookie_path, issue = str(exc)))
control_socket.close()
return None
except AuthenticationFailure as exc:
print(CONNECT_MESSAGES['general_auth_failure'].format(error = exc))
control_socket.close()
return None
def authenticate(controller, password = None, chroot_path = None, protocolinfo_response = None):
"""
Authenticates to a control socket using the information provided by a
PROTOCOLINFO response. In practice this will often be all we need to
authenticate, raising an exception if all attempts to authenticate fail.
All exceptions are subclasses of AuthenticationFailure so, in practice,
callers should catch the types of authentication failure that they care
about, then have a :class:`~stem.connection.AuthenticationFailure` catch-all
at the end.
This can authenticate to either a :class:`~stem.control.BaseController` or
:class:`~stem.socket.ControlSocket`.
:param controller: tor controller or socket to be authenticated
:param str password: passphrase to present to the socket if it uses password
authentication (skips password auth if **None**)
:param str chroot_path: path prefix if in a chroot environment
:param stem.response.protocolinfo.ProtocolInfoResponse protocolinfo_response:
tor protocolinfo response, this is retrieved on our own if **None**
:raises: If all attempts to authenticate fails then this will raise a
:class:`~stem.connection.AuthenticationFailure` subclass. Since this may
try multiple authentication methods it may encounter multiple exceptions.
If so then the exception this raises is prioritized as follows...
* :class:`stem.connection.IncorrectSocketType`
The controller does not speak the tor control protocol. Most often this
happened because the user confused the SocksPort or ORPort with the
ControlPort.
* :class:`stem.connection.UnrecognizedAuthMethods`
All of the authentication methods tor will accept are new and
unrecognized. Please upgrade stem and, if that doesn't work, file a
ticket on 'trac.torproject.org' and I'd be happy to add support.
* :class:`stem.connection.MissingPassword`
We were unable to authenticate but didn't attempt password authentication
because none was provided. You should prompt the user for a password and
try again via 'authenticate_password'.
* :class:`stem.connection.IncorrectPassword`
We were provided with a password but it was incorrect.
* :class:`stem.connection.IncorrectCookieSize`
Tor allows for authentication by reading it a cookie file, but that file
is the wrong size to be an authentication cookie.
* :class:`stem.connection.UnreadableCookieFile`
Tor allows for authentication by reading it a cookie file, but we can't
read that file (probably due to permissions).
* **\\***:class:`stem.connection.IncorrectCookieValue`
Tor allows for authentication by reading it a cookie file, but rejected
the contents of that file.
* **\\***:class:`stem.connection.AuthChallengeUnsupported`
Tor doesn't recognize the AUTHCHALLENGE command. This is probably a Tor
version prior to SAFECOOKIE being implement, but this exception shouldn't
arise because we won't attempt SAFECOOKIE auth unless Tor claims to
support it.
* **\\***:class:`stem.connection.UnrecognizedAuthChallengeMethod`
Tor couldn't recognize the AUTHCHALLENGE method Stem sent to it. This
shouldn't happen at all.
* **\\***:class:`stem.connection.InvalidClientNonce`
Tor says that the client nonce provided by Stem during the AUTHCHALLENGE
process is invalid.
* **\\***:class:`stem.connection.AuthSecurityFailure`
Nonce value provided by the server was invalid.
* **\\***:class:`stem.connection.OpenAuthRejected`
Tor says that it allows for authentication without any credentials, but
then rejected our authentication attempt.
* **\\***:class:`stem.connection.MissingAuthInfo`
Tor provided us with a PROTOCOLINFO reply that is technically valid, but
missing the information we need to authenticate.
* **\\***:class:`stem.connection.AuthenticationFailure`
There are numerous other ways that authentication could have failed
including socket failures, malformed controller responses, etc. These
mostly constitute transient failures or bugs.
**\\*** In practice it is highly unusual for this to occur, being more of a
theoretical possibility rather than something you should expect. It's fine
to treat these as errors. If you have a use case where this commonly
happens, please file a ticket on 'trac.torproject.org'.
In the future new :class:`~stem.connection.AuthenticationFailure`
subclasses may be added to allow for better error handling.
"""
if not protocolinfo_response:
try:
protocolinfo_response = get_protocolinfo(controller)
except stem.ProtocolError:
raise IncorrectSocketType('unable to use the control socket')
except stem.SocketError as exc:
raise AuthenticationFailure('socket connection failed (%s)' % exc)
auth_methods = list(protocolinfo_response.auth_methods)
auth_exceptions = []
if len(auth_methods) == 0:
raise NoAuthMethods('our PROTOCOLINFO response did not have any methods for authenticating')
# remove authentication methods that are either unknown or for which we don't
# have an input
if AuthMethod.UNKNOWN in auth_methods:
auth_methods.remove(AuthMethod.UNKNOWN)
unknown_methods = protocolinfo_response.unknown_auth_methods
plural_label = 's' if len(unknown_methods) > 1 else ''
methods_label = ', '.join(unknown_methods)
# we... er, can't do anything with only unrecognized auth types
if not auth_methods:
exc_msg = 'unrecognized authentication method%s (%s)' % (plural_label, methods_label)
auth_exceptions.append(UnrecognizedAuthMethods(exc_msg, unknown_methods))
else:
log.debug('Authenticating to a socket with unrecognized auth method%s, ignoring them: %s' % (plural_label, methods_label))
if protocolinfo_response.cookie_path is None:
for cookie_auth_method in (AuthMethod.COOKIE, AuthMethod.SAFECOOKIE):
if cookie_auth_method in auth_methods:
auth_methods.remove(cookie_auth_method)
exc_msg = 'our PROTOCOLINFO response did not have the location of our authentication cookie'
auth_exceptions.append(NoAuthCookie(exc_msg, cookie_auth_method == AuthMethod.SAFECOOKIE))
if AuthMethod.PASSWORD in auth_methods and password is None:
auth_methods.remove(AuthMethod.PASSWORD)
auth_exceptions.append(MissingPassword('no passphrase provided'))
# iterating over AuthMethods so we can try them in this order
for auth_type in (AuthMethod.NONE, AuthMethod.PASSWORD, AuthMethod.SAFECOOKIE, AuthMethod.COOKIE):
if auth_type not in auth_methods:
continue
try:
if auth_type == AuthMethod.NONE:
authenticate_none(controller, False)
elif auth_type == AuthMethod.PASSWORD:
authenticate_password(controller, password, False)
elif auth_type in (AuthMethod.COOKIE, AuthMethod.SAFECOOKIE):
cookie_path = protocolinfo_response.cookie_path
if chroot_path:
cookie_path = os.path.join(chroot_path, cookie_path.lstrip(os.path.sep))
if auth_type == AuthMethod.SAFECOOKIE:
authenticate_safecookie(controller, cookie_path, False)
else:
authenticate_cookie(controller, cookie_path, False)
if isinstance(controller, stem.control.BaseController):
controller._post_authentication()
return # success!
except OpenAuthRejected as exc:
auth_exceptions.append(exc)
except IncorrectPassword as exc:
auth_exceptions.append(exc)
except PasswordAuthRejected as exc:
# Since the PROTOCOLINFO says password auth is available we can assume
# that if PasswordAuthRejected is raised it's being raised in error.
log.debug('The authenticate_password method raised a PasswordAuthRejected when password auth should be available. Stem may need to be corrected to recognize this response: %s' % exc)
auth_exceptions.append(IncorrectPassword(str(exc)))
except AuthSecurityFailure as exc:
log.info('Tor failed to provide the nonce expected for safecookie authentication. (%s)' % exc)
auth_exceptions.append(exc)
except (InvalidClientNonce, UnrecognizedAuthChallengeMethod, AuthChallengeFailed) as exc:
auth_exceptions.append(exc)
except (IncorrectCookieSize, UnreadableCookieFile, IncorrectCookieValue) as exc:
auth_exceptions.append(exc)
except CookieAuthRejected as exc:
auth_func = 'authenticate_safecookie' if exc.is_safecookie else 'authenticate_cookie'
log.debug('The %s method raised a CookieAuthRejected when cookie auth should be available. Stem may need to be corrected to recognize this response: %s' % (auth_func, exc))
auth_exceptions.append(IncorrectCookieValue(str(exc), exc.cookie_path, exc.is_safecookie))
except stem.ControllerError as exc:
auth_exceptions.append(AuthenticationFailure(str(exc)))
# All authentication attempts failed. Raise the exception that takes priority
# according to our pydocs.
for exc_type in AUTHENTICATE_EXCEPTIONS:
for auth_exc in auth_exceptions:
if isinstance(auth_exc, exc_type):
raise auth_exc
# We really, really shouldn't get here. It means that auth_exceptions is
# either empty or contains something that isn't an AuthenticationFailure.
raise AssertionError('BUG: Authentication failed without providing a recognized exception: %s' % str(auth_exceptions))
def authenticate_none(controller, suppress_ctl_errors = True):
"""
Authenticates to an open control socket. All control connections need to
authenticate before they can be used, even if tor hasn't been configured to
use any authentication.
If authentication fails tor will disconnect and we'll make a best effort
attempt to re-establish the connection. This may not succeed, so check
:func:`~stem.socket.ControlSocket.is_alive` before using the socket further.
This can authenticate to either a :class:`~stem.control.BaseController` or
:class:`~stem.socket.ControlSocket`.
For general usage use the :func:`~stem.connection.authenticate` function
instead.
:param controller: tor controller or socket to be authenticated
:param bool suppress_ctl_errors: reports raised
:class:`~stem.ControllerError` as authentication rejection if
**True**, otherwise they're re-raised
:raises: :class:`stem.connection.OpenAuthRejected` if the empty authentication credentials aren't accepted
"""
try:
auth_response = _msg(controller, 'AUTHENTICATE')
# if we got anything but an OK response then error
if str(auth_response) != 'OK':
try:
controller.connect()
except:
pass
raise OpenAuthRejected(str(auth_response), auth_response)
except stem.ControllerError as exc:
try:
controller.connect()
except:
pass
if not suppress_ctl_errors:
raise
else:
raise OpenAuthRejected('Socket failed (%s)' % exc)
def authenticate_password(controller, password, suppress_ctl_errors = True):
"""
Authenticates to a control socket that uses a password (via the
HashedControlPassword torrc option). Quotes in the password are escaped.
If authentication fails tor will disconnect and we'll make a best effort
attempt to re-establish the connection. This may not succeed, so check
:func:`~stem.socket.ControlSocket.is_alive` before using the socket further.
If you use this function directly, rather than
:func:`~stem.connection.authenticate`, we may mistakenly raise a
PasswordAuthRejected rather than IncorrectPassword. This is because we rely
on tor's error messaging which is liable to change in future versions
(:trac:`4817`).
This can authenticate to either a :class:`~stem.control.BaseController` or
:class:`~stem.socket.ControlSocket`.
For general usage use the :func:`~stem.connection.authenticate` function
instead.
:param controller: tor controller or socket to be authenticated
:param str password: passphrase to present to the socket
:param bool suppress_ctl_errors: reports raised
:class:`~stem.ControllerError` as authentication rejection if
**True**, otherwise they're re-raised
:raises:
* :class:`stem.connection.PasswordAuthRejected` if the socket doesn't
accept password authentication
* :class:`stem.connection.IncorrectPassword` if the authentication
credentials aren't accepted
"""
# Escapes quotes. Tor can include those in the password hash, in which case
# it expects escaped quotes from the controller. For more information see...
# https://trac.torproject.org/projects/tor/ticket/4600
password = password.replace('"', '\\"')
try:
auth_response = _msg(controller, 'AUTHENTICATE "%s"' % password)
# if we got anything but an OK response then error
if str(auth_response) != 'OK':
try:
controller.connect()
except:
pass
# all we have to go on is the error message from tor...
# Password did not match HashedControlPassword value value from configuration...
# Password did not match HashedControlPassword *or*...
if 'Password did not match HashedControlPassword' in str(auth_response):
raise IncorrectPassword(str(auth_response), auth_response)
else:
raise PasswordAuthRejected(str(auth_response), auth_response)
except stem.ControllerError as exc:
try:
controller.connect()
except:
pass
if not suppress_ctl_errors:
raise
else:
raise PasswordAuthRejected('Socket failed (%s)' % exc)
def authenticate_cookie(controller, cookie_path, suppress_ctl_errors = True):
"""
Authenticates to a control socket that uses the contents of an authentication
cookie (generated via the CookieAuthentication torrc option). This does basic
validation that this is a cookie before presenting the contents to the
socket.
The :class:`~stem.connection.IncorrectCookieSize` and
:class:`~stem.connection.UnreadableCookieFile` exceptions take precedence
over the other types.
If authentication fails tor will disconnect and we'll make a best effort
attempt to re-establish the connection. This may not succeed, so check
:func:`~stem.socket.ControlSocket.is_alive` before using the socket further.
If you use this function directly, rather than
:func:`~stem.connection.authenticate`, we may mistakenly raise a
:class:`~stem.connection.CookieAuthRejected` rather than
:class:`~stem.connection.IncorrectCookieValue`. This is because we rely on
tor's error messaging which is liable to change in future versions
(:trac:`4817`).
This can authenticate to either a :class:`~stem.control.BaseController` or
:class:`~stem.socket.ControlSocket`.
For general usage use the :func:`~stem.connection.authenticate` function
instead.
:param controller: tor controller or socket to be authenticated
:param str cookie_path: path of the authentication cookie to send to tor
:param bool suppress_ctl_errors: reports raised
:class:`~stem.ControllerError` as authentication rejection if
**True**, otherwise they're re-raised
:raises:
* :class:`stem.connection.IncorrectCookieSize` if the cookie file's size
is wrong
* :class:`stem.connection.UnreadableCookieFile` if the cookie file doesn't
exist or we're unable to read it
* :class:`stem.connection.CookieAuthRejected` if cookie authentication is
attempted but the socket doesn't accept it
* :class:`stem.connection.IncorrectCookieValue` if the cookie file's value
is rejected
"""
cookie_data = _read_cookie(cookie_path, False)
try:
# binascii.b2a_hex() takes a byte string and returns one too. With python 3
# this is a problem because string formatting for byte strings includes the
# b'' wrapper...
#
# >>> "AUTHENTICATE %s" % b'content'
# "AUTHENTICATE b'content'"
#
# This seems dumb but oh well. Converting the result to unicode so it won't
# misbehave.
auth_token_hex = binascii.b2a_hex(stem.util.str_tools._to_bytes(cookie_data))
msg = 'AUTHENTICATE %s' % stem.util.str_tools._to_unicode(auth_token_hex)
auth_response = _msg(controller, msg)
# if we got anything but an OK response then error
if str(auth_response) != 'OK':
try:
controller.connect()
except:
pass
# all we have to go on is the error message from tor...
# ... Authentication cookie did not match expected value.
# ... *or* authentication cookie.
if '*or* authentication cookie.' in str(auth_response) or \
'Authentication cookie did not match expected value.' in str(auth_response):
raise IncorrectCookieValue(str(auth_response), cookie_path, False, auth_response)
else:
raise CookieAuthRejected(str(auth_response), cookie_path, False, auth_response)
except stem.ControllerError as exc:
try:
controller.connect()
except:
pass
if not suppress_ctl_errors:
raise
else:
raise CookieAuthRejected('Socket failed (%s)' % exc, cookie_path, False)
def authenticate_safecookie(controller, cookie_path, suppress_ctl_errors = True):
"""
Authenticates to a control socket using the safe cookie method, which is
enabled by setting the CookieAuthentication torrc option on Tor client's which
support it.
Authentication with this is a two-step process...
1. send a nonce to the server and receives a challenge from the server for
the cookie's contents
2. generate a hash digest using the challenge received in the first step, and
use it to authenticate the controller
The :class:`~stem.connection.IncorrectCookieSize` and
:class:`~stem.connection.UnreadableCookieFile` exceptions take precedence
over the other exception types.
The :class:`~stem.connection.AuthChallengeUnsupported`,
:class:`~stem.connection.UnrecognizedAuthChallengeMethod`,
:class:`~stem.connection.InvalidClientNonce` and
:class:`~stem.connection.CookieAuthRejected` exceptions are next in the order
of precedence. Depending on the reason, one of these is raised if the first
(AUTHCHALLENGE) step fails.
In the second (AUTHENTICATE) step,
:class:`~stem.connection.IncorrectCookieValue` or
:class:`~stem.connection.CookieAuthRejected` maybe raised.
If authentication fails tor will disconnect and we'll make a best effort
attempt to re-establish the connection. This may not succeed, so check
:func:`~stem.socket.ControlSocket.is_alive` before using the socket further.
For general usage use the :func:`~stem.connection.authenticate` function
instead.
:param controller: tor controller or socket to be authenticated
:param str cookie_path: path of the authentication cookie to send to tor
:param bool suppress_ctl_errors: reports raised
:class:`~stem.ControllerError` as authentication rejection if
**True**, otherwise they're re-raised
:raises:
* :class:`stem.connection.IncorrectCookieSize` if the cookie file's size
is wrong
* :class:`stem.connection.UnreadableCookieFile` if the cookie file doesn't
exist or we're unable to read it
* :class:`stem.connection.CookieAuthRejected` if cookie authentication is
attempted but the socket doesn't accept it
* :class:`stem.connection.IncorrectCookieValue` if the cookie file's value
is rejected
* :class:`stem.connection.UnrecognizedAuthChallengeMethod` if the Tor
client fails to recognize the AuthChallenge method
* :class:`stem.connection.AuthChallengeUnsupported` if AUTHCHALLENGE is
unimplemented, or if unable to parse AUTHCHALLENGE response
* :class:`stem.connection.AuthSecurityFailure` if AUTHCHALLENGE's response
looks like a security attack
* :class:`stem.connection.InvalidClientNonce` if stem's AUTHCHALLENGE
client nonce is rejected for being invalid
"""
cookie_data = _read_cookie(cookie_path, True)
client_nonce = os.urandom(32)
try:
client_nonce_hex = stem.util.str_tools._to_unicode(binascii.b2a_hex(client_nonce))
authchallenge_response = _msg(controller, 'AUTHCHALLENGE SAFECOOKIE %s' % client_nonce_hex)
if not authchallenge_response.is_ok():
try:
controller.connect()
except:
pass
authchallenge_response_str = str(authchallenge_response)
if 'Authentication required.' in authchallenge_response_str:
raise AuthChallengeUnsupported("SAFECOOKIE authentication isn't supported", cookie_path)
elif 'AUTHCHALLENGE only supports' in authchallenge_response_str:
raise UnrecognizedAuthChallengeMethod(authchallenge_response_str, cookie_path)
elif 'Invalid base16 client nonce' in authchallenge_response_str:
raise InvalidClientNonce(authchallenge_response_str, cookie_path)
elif 'Cookie authentication is disabled' in authchallenge_response_str:
raise CookieAuthRejected(authchallenge_response_str, cookie_path, True)
else:
raise AuthChallengeFailed(authchallenge_response, cookie_path)
except stem.ControllerError as exc:
try:
controller.connect()
except:
pass
if not suppress_ctl_errors:
raise
else:
raise AuthChallengeFailed('Socket failed (%s)' % exc, cookie_path, True)
try:
stem.response.convert('AUTHCHALLENGE', authchallenge_response)
except stem.ProtocolError as exc:
if not suppress_ctl_errors:
raise
else:
raise AuthChallengeFailed('Unable to parse AUTHCHALLENGE response: %s' % exc, cookie_path)
expected_server_hash = _hmac_sha256(
SERVER_HASH_CONSTANT,
cookie_data + client_nonce + authchallenge_response.server_nonce)
authchallenge_hmac = _hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, authchallenge_response.server_hash)
expected_hmac = _hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, expected_server_hash)
if authchallenge_hmac != expected_hmac:
raise AuthSecurityFailure('Tor provided the wrong server nonce', cookie_path)
try:
client_hash = _hmac_sha256(
CLIENT_HASH_CONSTANT,
cookie_data + client_nonce + authchallenge_response.server_nonce)
auth_response = _msg(controller, 'AUTHENTICATE %s' % stem.util.str_tools._to_unicode(binascii.b2a_hex(client_hash)))
except stem.ControllerError as exc:
try:
controller.connect()
except:
pass
if not suppress_ctl_errors:
raise
else:
raise CookieAuthRejected('Socket failed (%s)' % exc, cookie_path, True, auth_response)
# if we got anything but an OK response then err
if not auth_response.is_ok():
try:
controller.connect()
except:
pass
# all we have to go on is the error message from tor...
# ... Safe cookie response did not match expected value
# ... *or* authentication cookie.
if '*or* authentication cookie.' in str(auth_response) or \
'Safe cookie response did not match expected value' in str(auth_response):
raise IncorrectCookieValue(str(auth_response), cookie_path, True, auth_response)
else:
raise CookieAuthRejected(str(auth_response), cookie_path, True, auth_response)
def get_protocolinfo(controller):
"""
Issues a PROTOCOLINFO query to a control socket, getting information about
the tor process running on it. If the socket is already closed then it is
first reconnected.
This can authenticate to either a :class:`~stem.control.BaseController` or
:class:`~stem.socket.ControlSocket`.
:param controller: tor controller or socket to be queried
:returns: :class:`~stem.response.protocolinfo.ProtocolInfoResponse` provided by tor
:raises:
* :class:`stem.ProtocolError` if the PROTOCOLINFO response is
malformed
* :class:`stem.SocketError` if problems arise in establishing or
using the socket
"""
try:
protocolinfo_response = _msg(controller, 'PROTOCOLINFO 1')
except:
protocolinfo_response = None
# Tor hangs up on sockets after receiving a PROTOCOLINFO query if it isn't
# next followed by authentication. Transparently reconnect if that happens.
if not protocolinfo_response or str(protocolinfo_response) == 'Authentication required.':
controller.connect()
try:
protocolinfo_response = _msg(controller, 'PROTOCOLINFO 1')
except stem.SocketClosed as exc:
raise stem.SocketError(exc)
stem.response.convert('PROTOCOLINFO', protocolinfo_response)
return protocolinfo_response
def _msg(controller, message):
"""
Sends and receives a message with either a
:class:`~stem.socket.ControlSocket` or :class:`~stem.control.BaseController`.
"""
if isinstance(controller, stem.socket.ControlSocket):
controller.send(message)
return controller.recv()
else:
return controller.msg(message)
def _connection_for_default_port(address):
"""
Attempts to provide a controller connection for either port 9051 (default for
relays) or 9151 (default for Tor Browser). If both fail then this raises the
exception for port 9051.
:param str address: address to connect to
:returns: :class:`~stem.socket.ControlPort` for the controller conneciton
:raises: :class:`stem.SocketError` if we're unable to establish a connection
"""
try:
return stem.socket.ControlPort(address, 9051)
except stem.SocketError as exc:
try:
return stem.socket.ControlPort(address, 9151)
except stem.SocketError:
raise exc
def _read_cookie(cookie_path, is_safecookie):
"""
Provides the contents of a given cookie file.
:param str cookie_path: absolute path of the cookie file
:param bool is_safecookie: **True** if this was for SAFECOOKIE
authentication, **False** if for COOKIE
:raises:
* :class:`stem.connection.UnreadableCookieFile` if the cookie file is
unreadable
* :class:`stem.connection.IncorrectCookieSize` if the cookie size is
incorrect (not 32 bytes)
"""
if not os.path.exists(cookie_path):
exc_msg = "Authentication failed: '%s' doesn't exist" % cookie_path
raise UnreadableCookieFile(exc_msg, cookie_path, is_safecookie)
# Abort if the file isn't 32 bytes long. This is to avoid exposing arbitrary
# file content to the port.
#
# Without this a malicious socket could, for instance, claim that
# '~/.bash_history' or '~/.ssh/id_rsa' was its authentication cookie to trick
# us into reading it for them with our current permissions.
#
# https://trac.torproject.org/projects/tor/ticket/4303
auth_cookie_size = os.path.getsize(cookie_path)
if auth_cookie_size != 32:
exc_msg = "Authentication failed: authentication cookie '%s' is the wrong size (%i bytes instead of 32)" % (cookie_path, auth_cookie_size)
raise IncorrectCookieSize(exc_msg, cookie_path, is_safecookie)
try:
with open(cookie_path, 'rb', 0) as f:
return f.read()
except IOError as exc:
exc_msg = "Authentication failed: unable to read '%s' (%s)" % (cookie_path, exc)
raise UnreadableCookieFile(exc_msg, cookie_path, is_safecookie)
def _hmac_sha256(key, msg):
"""
Generates a sha256 digest using the given key and message.
:param str key: starting key for the hash
:param str msg: message to be hashed
:returns: sha256 digest of msg as bytes, hashed using the given key
"""
return hmac.new(key, msg, hashlib.sha256).digest()
class AuthenticationFailure(Exception):
"""
Base error for authentication failures.
:var stem.socket.ControlMessage auth_response: AUTHENTICATE response from the
control socket, **None** if one wasn't received
"""
def __init__(self, message, auth_response = None):
super(AuthenticationFailure, self).__init__(message)
self.auth_response = auth_response
class UnrecognizedAuthMethods(AuthenticationFailure):
"""
All methods for authenticating aren't recognized.
:var list unknown_auth_methods: authentication methods that weren't recognized
"""
def __init__(self, message, unknown_auth_methods):
super(UnrecognizedAuthMethods, self).__init__(message)
self.unknown_auth_methods = unknown_auth_methods
class IncorrectSocketType(AuthenticationFailure):
'Socket does not speak the control protocol.'
class OpenAuthFailed(AuthenticationFailure):
'Failure to authenticate to an open socket.'
class OpenAuthRejected(OpenAuthFailed):
'Attempt to connect to an open control socket was rejected.'
class PasswordAuthFailed(AuthenticationFailure):
'Failure to authenticate with a password.'
class PasswordAuthRejected(PasswordAuthFailed):
'Socket does not support password authentication.'
class IncorrectPassword(PasswordAuthFailed):
'Authentication password incorrect.'
class MissingPassword(PasswordAuthFailed):
"Password authentication is supported but we weren't provided with one."
class CookieAuthFailed(AuthenticationFailure):
"""
Failure to authenticate with an authentication cookie.
:param str cookie_path: location of the authentication cookie we attempted
:param bool is_safecookie: **True** if this was for SAFECOOKIE
authentication, **False** if for COOKIE
:param stem.response.ControlMessage auth_response: reply to our
authentication attempt
"""
def __init__(self, message, cookie_path, is_safecookie, auth_response = None):
super(CookieAuthFailed, self).__init__(message, auth_response)
self.is_safecookie = is_safecookie
self.cookie_path = cookie_path
class CookieAuthRejected(CookieAuthFailed):
'Socket does not support password authentication.'
class IncorrectCookieValue(CookieAuthFailed):
'Authentication cookie value was rejected.'
class IncorrectCookieSize(CookieAuthFailed):
'Aborted because the cookie file is the wrong size.'
class UnreadableCookieFile(CookieAuthFailed):
'Error arose in reading the authentication cookie.'
class AuthChallengeFailed(CookieAuthFailed):
"""
AUTHCHALLENGE command has failed.
"""
def __init__(self, message, cookie_path):
super(AuthChallengeFailed, self).__init__(message, cookie_path, True)
class AuthChallengeUnsupported(AuthChallengeFailed):
"""
AUTHCHALLENGE isn't implemented.
"""
class UnrecognizedAuthChallengeMethod(AuthChallengeFailed):
"""
Tor couldn't recognize our AUTHCHALLENGE method.
:var str authchallenge_method: AUTHCHALLENGE method that Tor couldn't recognize
"""
def __init__(self, message, cookie_path, authchallenge_method):
super(UnrecognizedAuthChallengeMethod, self).__init__(message, cookie_path)
self.authchallenge_method = authchallenge_method
class AuthSecurityFailure(AuthChallengeFailed):
'AUTHCHALLENGE response is invalid.'
class InvalidClientNonce(AuthChallengeFailed):
'AUTHCHALLENGE request contains an invalid client nonce.'
class MissingAuthInfo(AuthenticationFailure):
"""
The PROTOCOLINFO response didn't have enough information to authenticate.
These are valid control responses but really shouldn't happen in practice.
"""
class NoAuthMethods(MissingAuthInfo):
"PROTOCOLINFO response didn't have any methods for authenticating."
class NoAuthCookie(MissingAuthInfo):
"""
PROTOCOLINFO response supports cookie auth but doesn't have its path.
:param bool is_safecookie: **True** if this was for SAFECOOKIE
authentication, **False** if for COOKIE
"""
def __init__(self, message, is_safecookie):
super(NoAuthCookie, self).__init__(message)
self.is_safecookie = is_safecookie
# authentication exceptions ordered as per the authenticate function's pydocs
AUTHENTICATE_EXCEPTIONS = (
IncorrectSocketType,
UnrecognizedAuthMethods,
MissingPassword,
IncorrectPassword,
IncorrectCookieSize,
UnreadableCookieFile,
IncorrectCookieValue,
AuthChallengeUnsupported,
UnrecognizedAuthChallengeMethod,
InvalidClientNonce,
AuthSecurityFailure,
OpenAuthRejected,
MissingAuthInfo,
AuthenticationFailure
)
stem-1.8.0/stem/cached_manual.sqlite 0000664 0001750 0001750 00000756000 13601502033 020152 0 ustar atagar atagar 0000000 0000000 SQLite format 3 @ ÷ -ñ¸
6
Ì Â>™ÿj‘å Ìf ‚
tabletorrctorrc
CREATE TABLE torrc(key TEXT PRIMARY KEY, name TEXT, category TEXT, usage TEXT, summary TEXT, description TEXT, position INTEGER))
= indexsqlite_autoindex_torrc_1torrcRtablefilesfilesCREATE TABLE files(name TEXT PRIMARY KEY, description TEXT))= indexsqlite_autoindex_files_1files XtablesignalssignalsCREATE TABLE signals(name TEXT PRIMARY KEY, description TEXT)-A indexsqlite_autoindex_signals_1signalsd##tablecommandlinecommandlineCREATE TABLE commandline(name TEXT PRIMARY KEY, description TEXT)5I# indexsqlite_autoindex_commandline_1commandline UtablemetadatametadataCREATE TABLE metadata(name TEXT, synopsis TEXT, description TEXT, man_commit TEXT, stem_commit TEXT)<UtableschemaschemaCREATE TABLE schema(version INTEGER)
ü ü
’ ’ ˆ"]7Ž)]]tor - The second-generation onion routertor [OPTION value]...Tor is a connection-oriented anonym ì ûöñì
ÿ 0¹ÿž‰aJîp¢¾9Ññ+â =--hash-password PASSWORD!-h, --help
--version6q--service install [--options command-line options]5--list-torrc-options5--keygen [--newpass];--passphrase-fd FILEDES)--quiet|--hush
C--service remove|start|stop+--verify-config%--nt-service
7--allow-missing-torrc 7--defaults-torrc FILE1--list-fingerprint?--list-deprecated-optionsA--key-expiration [purpose]9--ignore-missing-torrc(U--dump-config short|full|non-builtin-f FILE) --list-modules
{ ^õ7Q Ë { NSIGXFSZIf this signal exists on your platform, Tor catches and ignores it.QSIGTERMTor will catch this, clean up and sync to disk if necessary, and exit.1[SIGPIPETor catches this signal and ignores it.cƒ?SIGINTTor clients behave as with SIGTERM; but Tor servers will do a controlled slow shutdown, closing listeners and waiting 30 seconds before exiting. (The delay can be configured with the ShutdownWaitLength config option.)V#SIGUSR1Log statistics about current connections, past connections, and throughput.d?SIGCHLDTor receives this signal when one of its helper processes has exited, so it can clean up.gESIGUSR2Switch all logs to loglevel debug. You can go back to the old loglevels by sending a SIGHUP.‚7SIGHUPThe signal instructs Tor to reload its configuration (including closing and reopening logs), and kill and restart its helper processes if applicable.
£ ÞöÇ»¯Òê£ SIGXFSZSIGTERMSIGPIPE
SIGINTSIGUSR1SIGCHLDSIGUSR2 SIGHUP É ûöñìçâÝØÓÎÉ . + ( %
Ù Ù "IDataDirectory/stats/conn-stats ú Öú Õ8 Ñ ô|Xçé?ÍÑ Ú+TRUNCATELOGFILEO |'QHIDDENSERVICEENABLEINTRODOSDEFENSE “;OUTBOUNDBINDADDRESSEXIT9X ˜ ä!HTTPSPROXY.é ë ;CCOOKIEAUTHFILEGROUPREADABLE :3CIRCUITBUILDTIMEOUTU Â+YTESTINGCLIENTMAXINTERVALWITHOUTREQUEST/ Ÿ-REPHISTTRACKTIME à V-EXCLUDEEXITNODES^ izing communication service. Users choose a source-routed path through a set of nodes, and negotiate a "virtual circuit" through the network. Each node in a virtual circuit knows its predecessor and successor nodes, but no other nodes. Traffic flowing down the circuit is unwrapped by a symmetric key at each node, which reveals the downstream node.
Basically, Tor provides a distributed network of servers or relays ("onion routers"). Users bounce their TCP streams, including web traffic, ftp, ssh, etc., around the network, so that recipients, observers, and even the relays themselves have difficulty tracking the source of the stream.
Note
By default, tor acts as a client only. To help the network by providing bandwidth as a relay, change the ORPort configuration option as mentioned below. Please also consult the documentation on the Tor Project's website.616c0f823c7a65c04de7e03a2c0da1543e2be9e6fde3201470ed1b74916c893ceb8ecd6a065651bf
b nŠ6 b Q9‚{--ignore-missing-torrcSpecify that Tor should treat a missing torrc file as though it were empty. Ordinarily, Tor does this for missing default torrc files, but not for those specified on the command line.‚QU„_--dump-config short|full|non-builtinWrite a complete list of Tor's configured options to standard output. When the short flag is selected, only write the options that are different from their default values. When non-builtin is selected, write options that are not zero or the empty string. When full is selected, write every option.aƒ9-f FILESpecify a new configuration file to contain further Tor configuration options, or pass - to make Tor read its configuration from standard input. (Default: @CONFDIR@/torrc, or $HOME/.torrc if that file is not found))‚--list-modulesList whether each optional module has been compiled into Tor. (Any module not listed is not optional in this version of Tor.)
B "9 B t7ƒC--defaults-torrc FILESpecify a file in which to find default values for Tor options. The contents of this file are overridden by those in the regular configuration file, and by those on the command line. (Default: @CONFDIR@/torrc-defaults.)R1--list-fingerprintGenerate your keys and output your nickname and fingerprint.?w--list-deprecated-optionsList all valid options that are scheduled to become obsolete in a future version. (This is a warning, not a promise.)ƒ[A‡--key-expiration [purpose]The purpose specifies which type of key certificate to determine the expiration of. The only currently recognised purpose is "sign". Running tor --key-expiration sign will attempt to find your signing key certificate and will output, both in the logs as well as to stdout, the signing key certificate's expiration time in ISO-8601 format. For example, the output sent to stdout will be of the form: "signing-cert-expiry: 2017-07-25 08:30:15 UTC"
´ s3𘠴 ƒa
)‡+--quiet|--hushOverride the default console logging behavior. By default, Tor starts out logging messages at level "notice" and higher to the console. It stops doing so after it parses its configuration, if the configuration tells it to log anywhere else. These options override the default console logging behavior. Use the --hush option if you want Tor to log only warnings and errors to the console, or use the --quiet option if you want Tor not to log to the console at all.VC}--service remove|start|stopRemove, start, or stop a configured Tor Windows service.A+k--verify-configVerify whether the configuration file is valid.>
%k--nt-serviceUsed internally to implement a Windows service.
7o--allow-missing-torrcAllow the configuration file specified by -f to be missing, if the defaults-torrc file (see below) is accessible.
J ÊK7 J 3!Y-h, --helpDisplay a short help message and exit.5‚]--versionDisplay Tor version and exit. The output is a single line of the format "Tor version [version number]." (The version number format is as specified in version-spec.txt.)aq‚c--service install [--options command-line options]Install an instance of Tor as a Windows service, with the provided command-line options. Current instructions can be found at https://www.torproject.org/docs/faq#NTService.5;--list-torrc-optionsList all valid options.ˆt5‘E--keygen [--newpass]Running tor --keygen creates a new ed25519 master identity key for a relay, or only a fresh temp ‚3;„=--passphrase-fd FILEDESFile descriptor to read the passphrase from. Note that unlike with the tor-gencert program, the entire file contents are read and used as the passphrase, including any trailing newlines. If the file descriptor is not specified, the passphrase is read from the terminal by default. orary signing key and certificate, if you already have a master key. Optionally, you can encrypt the master identity key with a passphrase. When Tor asks you for a passphrase and you don't want to encrypt the master key, just don't enter any passphrase when asked. Use the --newpass option with --keygen only when you need to add, change, or remove a passphrase on an existing ed25519 master identity key. You will be prompted for the old passphase (if any), and the new passphrase (if any). Note When generating a master key, you may want to use --DataDirectory to control where the keys and certificates will be stored, and --SigningKeyLifetime to control their lifetimes. See the server options section to learn more about the behavior of these options. You must have write access to the specified DataDirectory. To use the generated files, you must copy them to the DataDirectory/keys directory of your Tor daemon, and make sure that they are owned by the user actually running the Tor daemon on your system.
° ° N=s--hash-password PASSWORDGenerate a hashed password for control port access.
[ 6€ðA [ $S‚CacheDirectory/unverified-consensusContains a network consensus document that has been downloaded, but which we didn't have the right certificates to check yet.=+c@CONFDIR@/torrcDefault location of the configuration file.,M‚DataDirectory/hashed-fingerprintOnly used by bridges. Contains the hashed fingerprint of the bridge's identity key. (That is, the hash of the hash of the identity key.)
M_DataDirectory/stats/dirreq-statsOnly used by directory caches and authorities. This file is used to collect directory request statistics.31‚GDataDirectory/lockThis file is used to prevent two Tor instances from using the same data directory. If access to this file is locked, data directory is already in use by Tor.Gg‚9CacheDirectory/unverified-microdesc-consensusContains a microdescriptor-flavored network consensus document that has been downloaded, but which we didn't have the right certificates to check yet.
F g¹F ‚p }„uCacheDirectory/cached-extrainfo and cached-extrainfo.newSimilar to cached-descriptors, but holds optionally-downloaded "extra-info" documents. Relays use these documents to send inessential information about statistics, bandwidth history, and network health to the authorities. They aren't fetched by default. See the DownloadExtraInfo option for more information.+C‚%CacheDirectory/cached-certsContains downloaded directory key certificates that are used to verify authenticity of documents generated by the Tor directory authorities.OoKeyDirectory/ed25519_signing_certThe certificate which authenticates "ed25519_signing_secret_key" as having been signed by the Ed25519 master key.
j Á# Ø j l
MDataDirectory/stats/buffer-statsOnly used by servers. This file is used to collect buffer usage history.‚H„CacheDirectory/cached-microdescs and cached-microdescs.newThese files hold downloaded microdescriptors. Lines beginning with @-signs are annotations that contain more information about a given router. The .new file is an append-only journal; when it gets too large, all entries are merged into a new cached-microdescs file.G‚DataDirectory/v3-status-votesOnly for v3 authoritative directory servers. This file contains status votes from all the authoritative directory servers.‚<
O„;DataDirectory/key-pinning-journalUsed by authorities. A line-based file that records mappings between RSA1024 and Ed25519 identity keys. Authorities enforce these mappings, so that once a relay has picked an Ed25519 key, stealing or factoring the RSA1024 key will no longer let an attacker impersonate the relay.
$ ö" · $ KgKeyDirectory/legacy_certificateAs authority_certificate; used only when V3AuthUseLegacyKey is set. See documentation for V3AuthUseLegacyKey.i?%DataDirectory/fingerprintOnly used by servers. Contains the fingerprint of the server's identity key.QQ‚cKeyDirectory/authority_signing_keyOnly directory authorities use this file. A v3 directory authority's signing key that is used to sign votes and consensuses. Corresponds to the authority_certificate cert.„I‡WDataDirectory/approved-routersOnly used by authoritative directory servers. This file lists the status of routers by their identity fingerprint. Each line lists a status and a fingerprint separated by whitespace. See your fingerprint file in the DataDirectory for an example line. If the status is !reject, then the descriptors from the given identity (fingerprint) are rejected by this server. If it is !invalid, then the descriptors are accepted but marked in the directory as not valid, that is, not recommended.
) xìLÿS ) ‚'S„
KeyDirectory/authority_identity_keyA v3 directory authority's master identity key, used to authenticate its signing key. Tor doesn't use this while it's running. The tor-gencert program uses this. If you're running an authority, you should keep this key offline, and not put it in this file.)A‚#KeyDirectory/secret_id_keyA relay's RSA1024 permanent identity key, including private and public components. Used to sign router descriptors, and to sign other keys.K=m@LOCALSTATEDIR@/lib/tor/The tor process stores keys and other data here.I‚DataDirectory/stats/conn-statsOnly used by servers. This file is used to collect approximate connection history (number of active connections over time). I[DataDirectory/unparseable-descOnion server descriptors that Tor was unable to parse are dumped to this file. Only used for debugging.QKHiddenServiceDirectory/client_keysContains authorization data for a hidden service that is only accessible by authorized clients.
à u2§ à aQƒKeyDirectory/authority_certificateOnly directory authorities use this file. A v3 directory authority's certificate which authenticates the authority's current vote- and consensus-signing key using its master identity key.IYDataDirectory/stats/exit-statsOnly used by servers. This file is used to collect outgoing connection statistics by Tor exit routers.‚@
„KeyDirectory/secret_onion_key_ntor and secret_onion_key_ntor.oldA relay's Curve25519 short-term onion key. Used to handle modern ("ntor") circuit extension requests. The .old file holds the previously generated key, which the relay uses to handle any requests that were made by clients that didn't have the new one.KWDataDirectory/stats/entry-statsOnly used by servers. This file is used to collect incoming connection statistics by Tor entry nodes.
@fZ ìì ª¯'2Ïߘ @šn‰¸ w fqº ; %ODataDirectory/control_auth_cookie.9DataDirectory/sr-state,(UDataDirectory/networkstatus-bridges`*ACacheDirectory/cached-descriptors and cached-descriptors.new)=@LOCALSTATEDIR@/lib/tor/g FCacheDirectory/cached-consensus and/or cached-microdesc-consensus$%$HOME/.torrc"IDataDirectory/router-stability?DataDirectory/fingerprintg$MDataDirectory/stats/bridge-stats%"IDataDirectory/approved-routers$MDataDirectory/stats/buffer-stats
?CacheDirectory/cached-microdescs and cached-microdescs.new
3DataDirectory/state&%ODataDirectory/key-pinning-journal
<}CacheDirectory/cached-extrainfo and cached-extrainfo.new CCacheDirectory/cached-certs“ ?CacheDirectory/diff-cache('SCacheDirectory/unverified-consensus+@CONFDIR@/torrc$MDataDirectory/hashed-fingerprint CDataDirectory/bw_accounting+1DataDirectory/lock0g CacheDirectory/unverified-microdesc-consensus
t Û·”tqO( ˜M Ù èÕ³ ¼ tpš* #KKeyDirectory/legacy_signing_key/#KHiddenServiceDirectory/hostname-+[KeyDirectory/ed25519_signing_secret_key'-_KeyDirectory/ed25519_master_id_public_key#6qHiddenServiceDirectory/onion_service_non_anonymous"&QHiddenServiceDirectory/private_key!%ODataDirectory/stats/hidserv-stats :yKeyDirectory/secret_onion_key and secret_onion_key.old-_KeyDirectory/ed25519_master_id_secret_key&QKeyDirectory/authority_certificateE
KeyDirectory/secret_onion_key_ntor and secret_onion_key_ntor.oldAKeyDirectory/secret_id_key#KKeyDirectory/legacy_certificate%OKeyDirectory/ed25519_signing_cert&QKeyDirectory/authority_signing_key'SKeyDirectory/authority_identity_key&QHiddenServiceDirectory/client_keys!GDataDirectory/v3-status-votes"IDataDirectory/unparseable-desc"IDataDirectory/stats/exit-stats#KDataDirectory/stats/entry-stats$MDataDirectory/stats/dirreq-stats
] · ¬ ] M%$HOME/.torrcFallback location for torrc, if @CONFDIR@/torrc is not found.PI‚iDataDirectory/router-stabilityOnly used by authoritative directory servers. Tracks measurements for router mean-time-between-failures so that authorities have a fair idea of how to set their Stable flags.‚5y„KeyDirectory/secret_onion_key and secret_onion_key.oldA relay's RSA1024 short-term onion key. Used to decrypt old-style ("TAP") circuit extension requests. The .old file holds the previously generated key, which the relay uses to handle any requests that were made by clients that didn't have the new one.‚F_„?KeyDirectory/ed25519_master_id_secret_keyThe private part of a relay's Ed25519 permanent identity key. This key is used to sign the medium-term ed25519 signing key. This file can be kept offline or encrypted. If so, Tor will not be able to generate new signing keys automatically; you'll need to use tor --keygen to do so.
¤ ¶#¸, ¤ %MODataDirectory/stats/bridge-statsOnly used by servers. This file is used to collect incoming connection statistics by Tor bridges. $CacheDirectory/cached-consensus and/or cached-microdesc-consensusThe most recent consensus network status document we've downloaded.i#_KeyDirectory/ed25519_master_id_public_keyThe public part of a relay's Ed25519 permanent identity key."qAHiddenServiceDirectory/onion_service_non_anonymousThis file is present if a hidden service key was created in HiddenServiceNonAnonymousMode.V!QoHiddenServiceDirectory/private_keyContains the private key for this hidden service.o Oƒ!DataDirectory/stats/hidserv-statsOnly used by servers. This file is used to collect approximate counts of what fraction of the traffic is hidden service rendezvous traffic, and approximately how many hidden services the relay has seen.
€ ¨µ € ‚2(?„7CacheDirectory/diff-cacheDirectory cache only. Holds older consensuses and diffs from oldest to the most recent consensus of each type compressed in various ways. Each file contains a set of key-value arguments describing its contents, followed by a single NUL byte, followed by the main file contents.p'[ƒKeyDirectory/ed25519_signing_secret_keyThe private and public components of a relay's medium-term Ed25519 signing key. This key is authenticated by the Ed25519 master key, which in turn authenticates other keys (and router descriptors).‚U&3… DataDirectory/stateContains a set of persistent key-value mappings. These include: o the current entry guards and their status. o the current bandwidth accounting values. o when the file was last written o what version of Tor generated the state file o a short history of bandwidth usage, as produced in the server descriptors.
œ M• œ v+Cƒ;DataDirectory/bw_accountingThis file is obsolete and the data is now stored in the state file instead. Used to track bandwidth accounting values (when the current period starts and ends; how much has been read and written so far this period).5*U‚'DataDirectory/networkstatus-bridges`Only used by authoritative bridge directories. Contains information about bridges that have self-reported themselves to the bridge authority.ƒ0)…kCacheDirectory/cached-descriptors and cached-descriptors.newThese files contain the downloaded router statuses. Some routers may appear more than once; if so, the most recently published descriptor is used. Lines beginning with @-signs are annotations that contain more information about a given router. The .new file is an append-only journal; when it gets too large, all entries are merged into a new cached-descriptors file.
Y j‚ Y ‚&.O„DataDirectory/control_auth_cookieThis file can be used only when cookie authentication is enabled. Used for cookie authentication with the controller. Location can be overridden by the CookieAuthFile configuration option. Regenerated on startup. See control-spec.txt in torspec for details.ƒe-K‡HiddenServiceDirectory/hostnameThe .onion domain name for this hidden service. If the hidden service is restricted to authorized clients only, this file also contains authorization data for all clients. Note The clients will ignore any extra subdomains prepended to a hidden service hostname. Supposing you have "xyz.onion" as your hostname, you can ask your clients to connect to "www.xyz.onion" or "irc.xyz.onion" for virtual-hosting purposes.,9DataDirectory/sr-stateAuthority only. This file is used to record information about the current status of the shared-random-value voting state.
m m /KgKeyDirectory/legacy_signing_keyAs authority_signing_key: used only when V3AuthUseLegacyKey is set. See documentation for V3AuthUseLegacyKey.
Y ÿY ƒ# c…1 ACCELNAMEAccelNameGeneralNAMEOpenSSL engine name for crypto accelerationWhen using OpenSSL hardware crypto acceleration attempt to load the dynamic engine of this name. This must be used for any dynamic hardware engine. Names can be verified with the openssl engine command. Can not be changed while tor is running.
If the engine name is prefixed with a "!", then Tor will exit if the engine cannot be loaded.~ MƒACCELDIRAccelDirGeneralDIRCrypto acceleration library pathSpecify this option if using dynamic hardware acceleration and the engine implementation library resides somewhere other than the OpenSSL default. Can not be changed while tor is running.
ƒz ==mm…?ALTERNATEBRIDGEAUTHORITYAlternateBridgeAuthorityGeneral[nickname] [flags] ipv4address:port fingerprintAlternative directory authorities (bridges only)These options behave as DirAuthority, but they replace fewer of the default directory authorities. Using AlternateDirAuthority replaces the default Tor directory authorities, but leaves the default bridge authorities in place. Similarly, AlternateBridgeAuthority replaces the default bridge authority, but leaves the directory authorities alone.
ó ó ‚ 11W‚wANDROIDIDENTITYTAGAndroidIdentityTagGeneraltagTag when logging to android subsystemWhen logging to Android's logging subsystem, adds a tag to the log identity such that log entries are marked with "Tor-tag". Can not be changed while tor is running. (Default: none)ƒu 77kq…?ALTERNATEDIRAUTHORITYAlternateDirAuthorityGeneral[nickname] [flags] ipv4address:port fingerprintAlternative directory authorities (consensus only)These options behave as DirAuthority, but they replace fewer of the default directory authorities. Using AlternateDirAuthority replaces the default Tor directory authorities, but leaves the default bridge authorities in place. Similarly, AlternateBridgeAuthority replaces the default bridge authority, but leaves the directory authorities alone.
J ÛÒ J Š}
''G“BANDWIDTHRATEBandwidthRateGeneralN bytes|KBytes|MBytes|GBytes|TBytes|KBits|MBits|GBits|TBitsAverage bandwidth usage limitA token bucket limits the average incoming bandwidth usage on this node to the specified number of bytes per second, and the average outgoing bandwidth usage to that same value. If you want to run a relay in the public network, this needs to be at the ve %‚
))G‚
BANDWIDTHBURSTBandwidthBurstGeneralN bytes|KBytes|MBytes|GBytes|TBytes|KBits|MBits|GBits|TBitsMaximum bandwidth usage limitLimit the maximum token bucket size (also known as the burst) to the given number of bytes in each direction. (Default: 1 GByte)‚" ++mƒ
AVOIDDISKWRITESAvoidDiskWritesGeneral0|1Toggles if tor avoids frequently writing to diskIf non-zero, try to write to disk less frequently than we would otherwise. This is useful when running on flash memory or other media that support only a limited number of writes. (Default: 0) ry least 75 KBytes for a relay (that is, 600 kbits) or 50 KBytes for a bridge (400 kbits) -- but of course, more is better; we recommend at least 250 KBytes (2 mbits) if possible. (Default: 1 GByte)
Note that this option, and other bandwidth-limiting options, apply to TCP data only: They do not count TCP headers or DNS traffic.
Tor uses powers of two, not powers of ten, so 1 GByte is 1024*1024*1024 bytes as opposed to 1 billion bytes.
With this option, and in other options that take arguments in bytes, KBytes, and so on, other formats are also supported. Notably, "KBytes" can also be written as "kilobytes" or "kb"; "MBytes" can be written as "megabytes" or "MB"; "kbits" can be written as "kilobits"; and so forth. Case doesn't matter. Tor also accepts "byte" and "bit" in the singular. The prefixes "tera" and "T" are also recognized. If no units are given, we default to bytes. To avoid confusion, we recommend writing "bytes" or "bits" explicitly, since it's easy to forget that "B" means bytes, not bits.
] 0] ƒP
CCi…3CACHEDIRECTORYGROUPREADABLECacheDirectoryGroupReadableGeneral0|1|autoGroup read permissions for the cache directoryIf this option is set to 0, don't allow the filesystem group to read the CacheDirectory. If the option is set to 1, make the CacheDirectory readable by the default GID. If the option is "auto", then we use the setting for DataDirectoryGroupReadable when the CacheDirectory is the same as the DataDirectory, and 0 otherwise. (Default: auto) M ))W}CACHEDIRECTORYCacheDirectoryGeneralDIRDirectory where information is cachedStore cached directory data in DIR. Can not be changed while tor is running. (Default: uses the value of DataDirectory.)
Y Y …$
;;ˆICIRCUITPRIORITYHALFLIFECircuitPriorityHalflifeGeneralNUMOverwrite method for prioritizing traffic among relayed connectionsIf this value is set, we override the default algorithm for choosing which circuit's cell to deliver or relay next. It is delivered first to the circuit that has the lowest weighted cell count, where cells are weighted exponentially according to this value (in seconds). If the value is -1, it is taken from the consensus if possible else it will fallback to the default value of 30. Minimum: 1, Maximum: 2147483647. This can be defined as a float value. This is an advanced option; you generally shouldn't have to mess with it. (Default: -1)
ü ü † 77[a‰wCLIENTTRANSPORTPLUGINClientTransportPluginGeneraltransport exec path-to-binary [options]Proxy when establishing bridge connectionsIn its first form, when set along with a corresponding Bridge line, the Tor client forwards its traffic to a SOCKS-speaking proxy on "IP:PORT". (IPv4 addresses should written as-is; IPv6 addresses should be wrapped in square brackets.) It's the duty of that proxy to properly forward the traffic to the bridge.
In its second form, when set along with a corresponding Bridge line, the Tor client launches the pluggable transport proxy executable in path-to-binary using options as its command-line options, and forwards its traffic to it. It's the duty of that proxy to properly forward the traffic to the bridge. (Default: none)
‹ ˆ? 11YOCONSTRAINEDSOCKETSConstrainedSocketsGeneral0|1Shrinks sockets to ConstrainedSockSizeIf set, To *†r
sŒ?CONNLIMITConnLimitGeneralNUMMinimum number of file descriptors for Tor to startThe minimum number of file descriptors that must be available to the Tor process before it will start. Tor will ask the OS for as many file descriptors as the OS will allow (you can find this by "ulimit -H -n"). If this number is less than ConnLimit, then Tor will refuse to start.
Tor relays need thousands of sockets, to connect to every other relay. If you are running a private bridge, you can reduce the number of sockets that Tor uses. For example, to limit Tor to 500 sockets, run "ulimit -n 500" in a shell. Then start tor in the same shell, with ConnLimit 500. You may also need to set DisableOOSCheck 0.
Unless you have severely limited sockets, you probably don't need to adjust ConnLimit itself. It has no effect on Windows, since that platform lacks getrlimit(). (Default: 1000) r will tell the kernel to attempt to shrink the buffers for all sockets to the size specified in ConstrainedSockSize. This is useful for virtual servers and other environments where system level TCP buffers may be limited. If you're on a virtual server, and you encounter the "Error creating network socket: No buffer space available" message, you are likely experiencing this problem.
The preferred solution is to have the admin increase the buffer pool for the host itself via /proc/sys/net/ipv4/tcp_mem or equivalent facility; this configuration option is a second-resort.
The DirPort option should also not be used if TCP buffers are scarce. The cached directory requests consume additional sockets which exacerbates the problem.
You should not enable this feature unless you encounter the "no buffer space available" issue. Reducing the TCP buffers affects window size for the TCP stream and will reduce throughput in proportion to round trip time on long paths. (Default: 0)
‘ ²Ë ‘ ‚7 EEmƒCONTROLPORTFILEGROUPREADABLEControlPortFileGroupReadableGeneral0|1Group read permissions for the control port fileIf this option is set to 0, don't allow the filesystem group to read the control port file. If the option is set to 1, make the control port file readable by the default GID. (Default: 0)‰\
##W‘3CONTROLPORTControlPortGeneral[address:]port|unix:path|auto [flags]Port providing access to tor controllers (nyx, vidalia, etc)If set, Tor will accept connections on this port and allow those connections to control ,‚K 33)yƒ-CONSTRAINEDSOCKSIZEConstrainedSockSizeGeneralN bytes|KBytesLimit for the received and transmit buffers of socketsWhen ConstrainedSockets is enabled the receive and transmit buffers for all sockets will be set to this limit. Must be a value between 2048 and 262144, in 1024 byte increments. Default of 8192 is recommended. the Tor process using the Tor Control Protocol (described in control-spec.txt in torspec). Note: unless you also specify one or more of HashedControlPassword or CookieAuthentication, setting this option will cause Tor to allow any process on the local host to control it. (Setting both authentication methods means either method is sufficient to authenticate to Tor.) This option is required for many Tor controllers; most use the value of 9051. If a unix domain socket is used, you may quote the path using standard C escape sequences. You can specify this directive multiple times, to bind to multiple address/port pairs. Set it to "auto" to have Tor pick a port for you. (Default: 0)
Recognized flags are:
GroupWritable
Unix domain sockets only: makes the socket get created as group-writable.
WorldWritable
Unix domain sockets only: makes the socket get created as world-writable.
RelaxDirModeCheck
Unix domain sockets only: Do not insist that the directory that holds the socket be read-restricted.
“ ×è “ ‚R CCgƒCCONTROLSOCKETSGROUPWRITABLEControlSocketsGroupWritableGeneral0|1Group read permissions for the control socketIf this option is set to 0, don't allow the filesystem group to read and write unix sockets (e.g. ControlSocket). If the option is set to 1, make the control socket readable and writable by the default GID. (Default: 0)l ''Q‚CCONTROLSOCKETControlSocketGeneralPathSocket providing controller accessLike ControlPort, but listens on a Unix domain socket, rather than a TCP socket. 0 disables ControlSocket. (Unix and Unix-like systems only.) (Default: 0)‚& 99y‚kCONTROLPORTWRITETOFILEControlPortWriteToFileGeneralPathPath for a file tor writes containing its control portIf set, Tor writes the address and port of any control port it opens to this address. Usable by controllers to learn the actual control port when ControlPort is set to "auto".
— i— O ))WCOOKIEAUTHFILECookieAuthFileGeneralPathLocation of the authentication cookieIf set, this option overrides the default location and file name for Tor's cookie file. (See CookieAuthentication above.)ƒ 55i„aCOOKIEAUTHENTICATIONCookieAuthenticationGeneral0|1If set, authenticates controllers via a cookieIf this option is set to 1, allow connections on the control port when the connecting process knows the contents of a file named "control_auth_cookie", which Tor will create in its data directory. This authentication method should only be used on systems with good filesystem security. (Default: 0)
_ ‚K 77gƒMCOUNTPRIVATEBANDWIDTHCountPrivateBandwidthGeneral0|1Applies rate limiting to private IP addressesIf this option is set, then Tor's rate-limiting applies not only to remote connections, but also to connections to private addresses like 127.0.0.1 or 10.0.0.1. This is mostly useful for debugging rate-limiting. (Default: 0)ƒ CCu„MCOOKIEAUTHFILEGROUPREADABLECookieAuthFileGroupReadableGeneral0|1Group read permissions for the authentication cookieIf this option is set to 0, don't allow the filesystem group to read the cookie file. If the option is set to 1, make the cookie file readable by the default GID. [Making the file readable by other groups is not yet implemented; let us know if you need this for some reason.] (Default: 0)
ž Éž ‚( AAg‚sDATADIRECTORYGROUPREADABLEDataDirectoryGroupReadableGeneral0|1Group read permissions for the data directoryIf this option is set to 0, don't allow the filesystem group to read the DataDirectory. If the option is set to 1, make the DataDirectory readable by the default GID. (Default: 0)‚4 ''uƒ1DATADIRECTORYDataDirectoryGeneralDIRLocation for storing runtime data (state, keys, etc)Store working data in DIR. Can not be changed while tor is running. (Default: ~/.tor if your home directory is not /; otherwise, @LOCALSTATEDIR@/lib/tor. On Windows, the default is your ApplicationData folder.) e directory server is chosen randomly with probability proportional to that weight (default 1.0). If a flag "v3ident=fp" is given, the dirserver is a v3 directory authority whose v3 long-term signing key has the fingerprint fp. Lastly, if an "ipv6=[ipv6address]:orport" flag is present, then the directory authority is listening for IPv6 connections on the indicated IPv6 address and OR Port.
Tor will contact the authority at ipv4address to download directory documents. Clients always use the ORPort. Relays usually use the DirPort, but will use the ORPort in some circumstances. If an IPv6 ORPort is supplied, clients will also download directory documents at the IPv6 ORPort, if they are configured to use IPv6.
If no DirAuthority line is given, Tor will use the default directory authorities. NOTE: this option is intended for setting up a private Tor network with its own directory authorities. If you use it, you will be distinguishable from other users, because you won't believe the same authorities they do.
ê ê Ž %%qOš+DIRAUTHORITYDirAuthorityGeneral[nickname] [flags] ipv4address:dirport fingerprintAlternative directory authoritiesUse a nonstandard authoritative directory server at the provided address and port, with the specified key fingerprint. This option can be repeated many times, for multiple authoritative directory servers. Flags are separated by spaces, and determine what kind of an authority this directory is. By default, an authority is not authoritative for any directory style or version unless an appropriate flag is given.
Tor will use this authority as a bridge authoritative directory if the "bridge" flag is set. If a flag "orport=orport" is given, Tor will use the given port when opening encrypted tunnels to the dirserver. If a flag "weight=num" is given, then th 1
, g , „8 ))w‡3DISABLEALLSWAPDisableAllSwapGeneral0|1Locks all allocated memory so they can't be paged outIf set to 1, Tor will attempt to lock all current and future memory pages, so that memory cannot be paged out. Windows, OS X and Solaris are currently not supported. We believe that this feature works on modern Gnu/Linux distributions, and that it should work on *BSD systems (untested). This option requires that you start your Tor as root, and you should use the User option to properly reduce Tor's privileges. Can not be changed while tor is running. (Default: 0)ƒ ==[„cDIRAUTHORITYFALLBACKRATEDirAuthorityFallbackRateGeneralNUMRate at which to use fallback directoryWhen configured to use both directory authorities and fallback directories, the directory authorities also work as fallbacks. They are chosen with their regular weights, multiplied by this number, which should be 1.0 or less. The default is less than 1, to reduce load on authorities. (Default: 0.1) n retrieve about the processIf set to 1, Tor will attempt to prevent basic debugging attachment attempts by other processes. This may also keep Tor from generating core files if it crashes. It has no impact for users who wish to attach if they have CAP_SYS_PTRACE or if they are root. We believe that this feature works on modern Gnu/Linux distributions, and that it may also work on *BSD systems (untested). Some modern Gnu/Linux systems such as Ubuntu have the kernel.yama.ptrace_scope sysctl and by default enable it as an attempt to limit the PTRACE scope for all user processes by default. This feature will attempt to limit the PTRACE scope for Tor specifically - it will not attempt to alter the system wide ptrace scope as it may not even exist. If you wish to attach to Tor with a debugger such as gdb or strace you will want to set this to 0 for the duration of your debugging. Normal users should leave it on. Disabling this option while Tor is running is prohibited. (Default: 1)
™ ’™ ƒv ))[†KDISABLENETWORKDisableNetworkGeneral0|1Don't accept non-controller connectionsWhen this option is set, we don't listen for or accept any connections other than controller connections, and we close (and don't reattempt) any outbound connections. Controllers sometimes use this option to avoid using the network until Tor is fully configured. Tor will make still certain network-related calls (like DNS lookups) as a part of its configuration process, even if DisableNetwork is set. (Default: 0)ˆG
??DISABLEDEBUGGERATTACHMENTDisableDebuggerAttachmentGeneral0|1Limit information applications ca 4
% Sd % ‚<" ;;oƒEXTORPORTCOOKIEAUTHFILEExtORPortCookieAuthFileGeneralPathLocation of the ExtORPort's authentication cookieIf set, this option overrides the default location and file name for the Extended ORPort's cookie file -- the cookie file is needed for pluggable transports to communicate through the Extended ORPort.!l! 3]‚)EXTORPORTExtORPortGeneral[address:]port|autoEndpoint for extended ORPort connectionsOpen this port to listen for Extended ORPort connections from your pluggable transports.
(Default: DataDirectory/extended_orport_auth_cookie) ƒ* //q…EXTENDBYED25519IDExtendByEd25519IDGeneral0|1|autoInclude Ed25519 identifier when extending circuitsIf this option is set to 1, we always try to include a relay's Ed25519 ID when telling the proceeding relay in a circuit to extend to it. If this option is set to 0, we never include Ed25519 IDs when extending circuits. If the option is set to "default", we obey a parameter in the consensus document. (Default: auto)
‰ /Á ‰ ‚5%
//ƒFETCHDIRINFOEARLYFetchDirInfoEarlyGeneral0|1Keeps consensus information up to date, even if unnecessaryIf set to 1, Tor will always fetch directory information like other directory caches, even if you don't meet the normal criteria for fetching early. Normal users should leave it off. (Default: 0)$‡^$
##?{ŒYFALLBACKDIRFallbackDirGeneralipv4address:dirport orport=orport id=fingerprint [weight=num] [i 8ƒN#
UU
„oEXTORPORTCOOKIEAUTHFILEGROUPREADABLEExtORPortCookieAuthFileGroupReadableGeneral0|1Group read permissions for the ExtORPort's authentication cookieIf this option is set to 0, don't allow the filesystem group to read the Extended OR Port cookie file. If the option is set to 1, make the cookie file readable by the default GID. [Making the file readable by other groups is not yet implemented; let us know if you need this for some reason.] (Default: 0)" pv6=[ipv6address]:orport]Fallback when unable to retrieve descriptor informationWhen tor is unable to connect to any directory cache for directory info (usually because it doesn't know about any yet) it tries a hard-coded directory. Relays try one directory authority at a time. Clients try multiple directory authorities and FallbackDirs, to avoid hangs on startup if a hard-coded directory is down. Clients wait for a few seconds between each attempt, and retry FallbackDirs more often than directory authorities, to reduce the load on the directory authorities.
FallbackDirs should be stable relays with stable IP addresses, ports, and identity keys. They must have a DirPort.
By default, the directory authorities are also FallbackDirs. Specifying a FallbackDir replaces Tor's default hard-coded FallbackDirs (if any). (See the DirAuthority entry for an explanation of each flag.)#
E ¨E ‚`'
;;ƒ;FETCHHIDSERVDESCRIPTORSFetchHidServDescriptorsGeneral0|1Toggles if hidden service descriptors are fetched automatically or notIf set to 0, Tor will never fetch any hidden service descriptors from the rendezvous directories. This option is only useful if you're using a Tor controller that handles hidden service fetches for you. (Default: 1)&‚U& 99{ƒIFETCHDIRINFOEXTRAEARLYFetchDirInfoExtraEarlyGeneral0|1Updates consensus information when it's first availableIf set to 1, Tor will fetch directory information before other directory caches. It will attempt to download directory information closer to the start of the consensus period. Normal users should leave it off. (Default: 0)%
s ( _ ôæÓ¿ª€cI2 òØ» ~§wYE2ÿßèŽzgUJ. _ ö å Ò ² ž s )BRIDGEPASSWORD ý AAUTHDIRHASIPV6CONNECTIVITY)CELLSTATISTICS ÑCCACHEDIRECTORYGROUPREADABLE
)CACHEDIRECTORY #BRIDGERELAY °ABRIDGERECORDUSAGEBYCOUNTRY Í1BRIDGEDISTRIBUTION ±9BRIDGEAUTHORITATIVEDIR ë
BRIDGES'BANDWIDTHRATE)BANDWIDTHBURST+AVOIDDISKWRITES5AUTOMAPHOSTSSUFFIXES ‹7AUTOMAPHOSTSONRESOLVE Š9AUTHORITATIVEDIRECTORY æAAUTHDIRTESTED25519LINKKEYS ü;AUTHDIRSHAREDRANDOMNESS û-AUTHDIRREJECTCCS õ'AUTHDIRREJECT ò)AUTHDIRPINKEYS ú=AUTHDIRMAXSERVERSPERADDR ÷3AUTHDIRLISTBADEXITS ö/AUTHDIRINVALIDCCS ô)AUTHDIRINVALID ñ;AUTHDIRGUARDBWGUARANTEE ù5AUTHDIRFASTGUARANTEE ø/AUTHDIRBADEXITCCS ó)AUTHDIRBADEXIT ð+ASSUMEREACHABLE ¯1ANDROIDIDENTITYTAG7ALTERNATEDIRAUTHORITY=ALTERNATEBRIDGEAUTHORITY;ALLOWNONRFC953HOSTNAMES …ADDRESS ®+ACCOUNTINGSTART Å)ACCOUNTINGRULE Ä'ACCOUNTINGMAX Ã
ACCELNAME ACCELDIR
u íÑ´›FÍw`Q4õÛȵ˜‚t_H0 î Ó Á ¡ ˆ u UŠt Ö » ’ )COOKIEAUTHFILE5COOKIEAUTHENTICATIONCCONTROLSOCKETSGROUPWRITABLE'CONTROLSOCKET9CONTROLPORTWRITETOFILE ECONTROLPORTFILEGROUPREADABLE#CONTROLPORT#CONTACTINFO ²3CONSTRAINEDSOCKSIZE1CONSTRAINEDSOCKETS+CONSENSUSPARAMS î
CONNLIMIT
/CONNECTIONPADDINGY;CONNDIRECTIONSTATISTICS Ö'CLIENTUSEIPV6 ¡'CLIENTUSEIPV4 7CLIENTTRANSPORTPLUGIN"GCLIENTREJECTINTERNALADDRESSES Ž9CLIENTPREFERIPV6ORPORT £;CLIENTPREFERIPV6DIRPORT ¢!CLIENTONLYX1CLIENTONIONAUTHDIRj%MCLIENTDNSREJECTINTERNALADDRESSES /aCLIENTBOOTSTRAPCONSENSUSMAXINPROGRESSTRIES ©9uCLIENTBOOTSTRAPCONSENSUSFALLBACKDOWNLOADINITIALDELAY §>CLIENTBOOTSTRAPCONSENSUSAUTHORITYONLYDOWNLOADINITIALDELAY ¨:wCLIENTBOOTSTRAPCONSENSUSAUTHORITYDOWNLOADINITIALDELAY ¦5CLIENTAUTOIPV6ORPORT ¤5CIRCUITSTREAMTIMEOUTW=CIRCUITSAVAILABLETIMEOUTV;CIRCUITPRIORITYHALFLIFE)CIRCUITPADDING[
E ¤6E n* ''u‚%HARDWAREACCELHardwareAccelGeneral0|1Toggles if tor attempts to use hardware accelerationIf non-zero, try to use built-in (static) crypto hardware acceleration when available. Can not be changed while tor is running. (Default: 0))ˆM)
;;% FETCHUSELESSDESCRIPTORSFetchUselessDescriptorsGeneral0|1Toggles if relay descriptors are fetc =‚Y( 99}ƒOFETCHSERVERDESCRIPTORSFetchServerDescriptorsGeneral0|1Toggles if the consensus is fetched automatically or notIf set to 0, Tor will never fetch any network status summaries or server descriptors from the directory servers. This option is only useful if you're using a Tor controller that handles directory fetches for you. (Default: 1)' hed when they aren't strictly necessaryIf set to 1, Tor will fetch every consensus flavor, and all server descriptors and authority certificates referenced by those consensuses, except for extra info descriptors. When this option is 1, Tor will also keep fetching descriptors, even when idle. If set to 0, Tor will avoid fetching useless descriptors: flavors that it is not using to build circuits, and authority certificates it does not trust. When Tor hasn't built any application circuits, it will go idle, and stop fetching descriptors. This option is useful if you're using a tor client with an external parser that uses a full consensus. This option fetches all documents except extrainfo descriptors, DirCache fetches and serves all documents except extrainfo descriptors, DownloadExtraInfo* fetches extrainfo documents, and serves them if DirCache is on, and UseMicrodescriptors changes the flavour of consensues and descriptors that is fetched and used for building circuits. (Default: 0)(
. \. ‚+, #MƒGHTTPPROXYHTTPProxyGeneralhost[:port]HTTP proxy for connecting to torTor will make all its directory requests through this host:port (or host:80 if port is not specified), rather than connecting directly to any directory servers. (DEPRECATED: As of 0.3.1.0-alpha you should use HTTPSProxy.)+ƒ!+
77+„CHASHEDCONTROLPASSWORDHashedControlPasswordGeneralhashed_passwordHash of the password for authenticating to the control portAllow connections on the control port if they present the password whose one-way hash is hashed_password. You can compute the hash of a password by running "tor --hash-password password". You can provide several acceptable passwords by using more than one HashedControlPassword line.*
¸ K ¸ ƒ. !!#K…HTTPSPROXYHTTPSProxyGeneralhost[:port]SSL proxy for connecting to torTor will make all its OR (SSL) connections through this host:port (or host:443 if port is not specified), via HTTP CONNECT rather than connecting directly to servers. You may want to set FascistFirewall to restrict the set of ports you might try to connect to, if your HTTPS proxy only allows connecting to certain ports.-ƒ2- 99/]…HTTPPROXYAUTHENTICATORHTTPProxyAuthenticatorGeneralusername:passwordAuthentication credentials for HTTPProxyIf defined, Tor will use this username:password for Basic HTTP proxy authentication, as in RFC 2617. This is currently the only form of HTTP proxy authentication that Tor supports; feel free to submit a patch if you want it to support others. (DEPRECATED: As of 0.3.1.0-alpha you should use HTTPSProxyAuthenticator.),
ž ž o0 ++[‚9KEEPALIVEPERIODKeepalivePeriodGeneralNUMRate at which to send keepalive packetsTo keep firewalls from expiring connections, send a padding keepalive cell every NUM seconds on open connections that are in use. (Default: 5 minutes)/‚m/ ;;/_ƒuHTTPSPROXYAUTHENTICATORHTTPSProxyAuthenticatorGeneralusername:passwordAuthentication credentials for HTTPSProxyIf defined, Tor will use this username:password for Basic HTTPS proxy authentication, as in RFC 2617. This is currently the only form of HTTPS proxy authentication that Tor supports; feel free to submit a patch if you want it to support others..
ð ‰a2
Y‘WLOGLogGeneral[domain,...]minSeverity[-maxSeverity] ... stderr|stdout|syslogRunlevels and location for tor loggingAs above, but select messages by range of log severity and by a set of "logging domains". Each logging dom B„
1 55o†CKEEPBINDCAPABILITIESKeepBindCapabilitiesGeneral0|1|autoRetain permission for binding to low valued portsOn Linux, when we are started as root and we switch our identity using the User option, the KeepBindCapabilities option tells us whether to try to retain our ability to bind to low ports. If this value is 1, we try to keep the capability; if it is 0 we do not; and if it is auto, we keep the capability only if we are configured to listen on a low port. Can not be changed while tor is running. (Default: auto.)0 ain corresponds to an area of functionality inside Tor. You can specify any number of severity ranges for a single log statement, each of them prefixed by a comma-separated list of logging domains. You can prefix a domain with ~ to indicate negation, and use * to indicate "all domains". If you specify a severity range without a list of domains, it matches all domains.
This is an advanced feature which is most useful for debugging one or two of Tor's subsystems at a time.
The currently recognized domains are: general, crypto, net, config, fs, protocol, mm, http, app, control, circ, rend, bug, dir, dirserv, or, edge, acct, hist, handshake, heartbeat, channel, sched, guard, consdiff, dos, process, pt, btrack, and mesg. Domain names are case-insensitive.
For example, "Log [handshake]debug [~net,~mm]info notice stdout" sends to stdout: all handshake messages of any severity, all info-and-higher messages from domains other than networking and memory management, and all messages of severity notice or higher.1
× × × ƒ}4 11e†?LOGTIMEGRANULARITYLogTimeGranularityGeneralNUMlimits granularity of log message timestampsSet the resolution of timestamps in Tor's logs to NUM milliseconds. NUM must be positive and either a divisor or a multiple of 1 second. Note that this option only controls the granularity written by Tor to a file or console log. Tor does not (for example) "batch up" log messages to affect times logged by a controller, times attached to syslog messages, or the mtime fields on log files. (Default: 1 second)3‚&3 //[ƒLOGMESSAGEDOMAINSLogMessageDomainsGeneral0|1Includes a domain when logging messagesIf 1, Tor includes message domains with each log message. Every log message currently has at least one domain; most currently have exactly one. This doesn't affect controller log messages. (Default: 0)2
ö ö „599…MAXADVERTISEDBANDWIDTHMaxAdvertisedBandwidthGeneralN bytes|KBytes|MBytes|GBytes|TBytes|KBits|MBits|GBits|TBitsLimit for the bandwidth we advertise as being available for relayingIf set, we will not advertise more than this amount of bandwidth for our BandwidthRate. Server operators who want to reduce the number of clients who ask to build circuits through them (since this is proportional to advertised bandwidth rate) can thus reduce the CPU demands on their server without impacting network performance.4
´ û ´ ‚D7 ]„NOEXECNoExecGeneral0|1Prevents any launch of other executablesIf this option is set to 1, then Tor will never launch another executable, regardless of the settings of ClientTransportPlugin or ServerTransportPlugin. Once this option has been set to 1, it cannot be set back to 0 without restarting Tor. (Default: 0)6„6 CCSw…SMAXUNPARSEABLEDESCSIZETOLOGMaxUnparseableDescSizeToLogGeneralN bytes|KBytes|MBytes|GBytes|TBytesSize of the dedicated log for unparseable descriptorsUnparseable descriptors (e.g. for votes, consensuses, routers) are logged in separate files by hash, up to the specified size in total. Note that only files logged during the lifetime of this Tor process count toward the total; this is intended to be used to debug problems without opening live servers to resource exhaustion attacks. (Default: 10 MBytes)5
À À „=8 33Y‡IOUTBOUNDBINDADDRESSOutboundBindAddressGeneralIPSets the IP used for connecting to torMake all outbound connections originate from the IP address specified. This is only useful when you have multiple network interfaces, and you want all of Tor's outgoing connections to use a single one. This option may be used twice, once with an IPv4 address and once with an IPv6 address. IPv6 addresses should be wrapped in square brackets. This setting will be ignored for connections to the loopback addresses (127.0.0.0/8 and ::1), and is not used for DNS requests as well.7
ƒu9
;;…OUTBOUNDBINDADDRESSEXITOutboundBindAddressExitGeneralIPMake outbound exit connections originate from this addressMake all outbound exit connections originate from the IP address specified. This option overrides OutboundBindAddress for the same IP version. This option may be used twice, once with an IPv4 address and once with an IPv6 address. IPv6 addresses should be wrapped in square brackets. This setting will be ignored for connections to the loopback addresses (127.0.0.0/8 and ::1).8
® ò ® ‚A;
))i‚aPERCONNBWBURSTPerConnBWBurstGeneralN bytes|KBytes|MBytes|GBytes|TBytes|KBits|MBits|GBits|TBitsMaximum relayed bandwidth limit per connectionIf this option is set manually, or via the "perconnbwburst" consensus field, Tor will use it for separate rate limiting for each connection from a non-relay. (Default: 0):„:
77 †+OUTBOUNDBINDADDRESSOROutboundBindAddressORGeneralIPMake outbound non-exit connections originate from this addressMake all outbound non-exit (relay and other) connections originate from the IP address specified. This option overrides OutboundBindAddress for the same IP version. This option may be used twice, once with an IPv4 address and once with an IPv6 address. IPv6 addresses should be wrapped in square brackets. This setting will be ignored for connections to the loopback addresses (127.0.0.0/8 and ::1).9
ë ¿ú ë ‚> --k‚_PROTOCOLWARNINGSProtocolWarningsGeneral0|1Toggles if protocol errors give warnings or notIf 1, Tor will log with severity 'warn' various cases of other parties not following the Tor specification. Otherwise, they are logged with severity 'info'. (Default: 0)=B= ucPIDFILEPidFileGeneralFILEPath for a file tor writes containing its process idOn startup, write our PID to FILE. On clean shutdown, remove FILE. Can not be changed while tor is running.<‚><
''i‚_PERCONNBWRATEPerConnBWRateGeneralN bytes|KBytes|MBytes|GBytes|TBytes|KBits|MBits|GBits|TBitsAverage relayed bandwidth limit per connectionIf this option is set manually, or via the "perconnbwrate" consensus field, Tor will use it for separate rate limiting for each connection from a non-relay. (Default: 0);
C C ƒ:?
33a„GRELAYBANDWIDTHBURSTRelayBandwidthBurstGeneralN bytes|KBytes|MBytes|GBytes|TBytes|KBits|MBits|GBits|TBitsMaximum bandwidth usage limit for relayingIf not 0, limit the maximum token bucket size (also known as the burst) for _relayed traffic_ to the given number of bytes in each direction. They do not include directory fetches by the relay (from authority or other relays), because that is considered "client" activity. (Default: 0)>
c ~ c ‚A ##[ƒRUNASDAEMONRunAsDaemonGeneral0|1Toggles if tor runs as a daemon processIf 1, Tor forks and daemonizes to the background. This option has no effect on Windows; instead you should use the --service command-line option. Can not be changed while tor is running. (Default: 0)@„@
11a‡URELAYBANDWIDTHRATERelayBandwidthRateGeneralN bytes|KBytes|MBytes|GBytes|TBytes|KBits|MBits|GBits|TBitsAverage bandwidth usage limit for relayingIf not 0, a separate token bucket limits the average incoming bandwidth usage for _relayed traffic_ on this node to the specified number of bytes per second, and the average outgoing bandwidth usage to that same value. Relayed traffic currently is calculated to include answers to directory requests, but that may change in future versions. They do not include directory fetches by the relay (from authority or other relays), because that is considered "client" activity. (Default: 0)?
…`B ##wŠSAFELOGGINGSafeLoggingGeneral0|1|relayToggles if logs are scrubbed of sensitive informationTor can scrub potentially sensitive strings from log messages (e.g. addresses) by replacing them with the string [scrubbed]. This way logs can still be useful, but they don't leave behind personally identifying information about what sites a user might have visited.
If this option is set to 0, Tor will not perform any scrubbing, if it is set to 1, all potentially sensitive strings are replaced. If it is set to relay, all log messages generated when acting as a relay are sanitized, but all messages generated when acting as a client are not. Note: Tor may not heed this option when logging at log levels below Notice. (Default: 1)A
8 8 ‡EC EŽSANDBOXSandboxGeneral0|1Run within a syscall sandboxIf set to 1, Tor will run securely through the use of a syscall sandbox. Otherwise the sandbox will be disabled. The option is currently an experimental feature. It only works on Linux-based operating systems, and only when Tor has been built with the libseccomp library. This option can not be changed while tor is running.
When the Sandbox is 1, the following options can not be changed when tor is running: Address, ConnLimit, CookieAuthFile, DirPortFrontPage, ExtORPortCookieAuthFile, Logs, ServerDNSResolvConfFile, ClientOnionAuthDir (and any files in it won't reload on HUP signal).
Launching new Onion Services through the control port is not supported with current syscall sandboxing implementation.
Tor must remain in client or server mode (some changes to ClientOnly and ORPort are not allowed). Currently, if Sandbox is 1, ControlPort command "GETINFO address" will not work.
(Default: 0)B stly effects relays, and most operators should leave it set to its default value. (Default: KIST,KISTLite,Vanilla)
The possible scheduler types are:
KIST: Kernel-Informed Socket Transport. Tor will use TCP information from the kernel to make informed decisions regarding how much data to send and when to send it. KIST also handles traffic in batches (see KISTSchedRunInterval) in order to improve traffic prioritization decisions. As implemented, KIST will only work on Linux kernel version 2.6.39 or higher.
KISTLite: Same as KIST but without kernel support. Tor will use all the same mechanics as with KIST, including the batching, but its decisions regarding how much data to send will not be as good. KISTLite will work on all kernels and operating systems, and the majority of the benefits of KIST are still realized with KISTLite.
Vanilla: The scheduler that Tor used before KIST was implemented. It sends as much data as possible, as soon as possible. Vanilla will work on all kernels and operating systems.C
Ä C Ä ‚|E 55O„AKISTSCHEDRUNINTERVALKISTSchedRunIntervalGeneralNUM msecScheduling interval if using KISTIf KIST or KISTLite is used in the Schedulers option, this controls at which interval the scheduler tick is. If the value is 0 msec, the value is taken from the consensus if possible else it will fallback to the default 10 msec. Maximum possible value is 100 msec. (Default: 0 msec)D‹2D !!7s•SCHEDULERSSchedulersGeneralKIST|KISTLite|VanillaScheduling algorithm by which to send outbound dataSpecify the scheduler type that tor should use. The scheduler is responsible for moving data around within a Tor process. This is an ordered list by priority which means that the first value will be tried first and if unavailable, the second one is tried and so on. It is possible to change these values at runtime. This option mo N
4 ˜ 4 ‚aH 997[ƒ]SERVERTRANSPORTOPTIONSServerTransportOptionsGeneraltransport k=v k=v ...Additional arguments for bridge's proxyWhen this option is set, Tor will pass the k=v parameters to any pluggable transport proxy that tries to launch transport.
(Example: ServerTransportOptions obfs45 shared-secret=bridgepasswd cache=/var/lib/tor/cache) (Default: none)G‚zG ??/kƒ{SERVERTRANSPORTLISTENADDRServerTransportListenAddrGeneraltransport IP:PORTEndpoint for bridge's pluggable transport proxyWhen this option is set, Tor will suggest IP:PORT as the listening address of any pluggable transport proxy that tries to launch transport. (IPv4 addresses should written as-is; IPv6 addresses should be wrapped in square brackets.) (Default: none)FhF 77g‚KISTSOCKBUFSIZEFACTORKISTSockBufSizeFactorGeneralNUMMultiplier for per-socket limit if using KISTIf KIST is used in Schedulers, this is a multiplier of the per-socket limit calculation of the KIST algorithm. (Default: 1.0)E
È ¼ô2 È hL33a
SOCKS5PROXYUSERNAMESocks5ProxyUsernameGeneralusernameUsername for connecting to the Socks5ProxyK?K ###GmSOCKS5PROXYSocks5ProxyGeneralhost[:port]SOCKS 5 for connecting to torTor will make all OR connections through the SOCKS 5 proxy at host:port (or host:1080 if port is not specified).JEJ ###SmSOCKS4PROXYSocks4ProxyGeneralhost[:port]SOCKS 4 proxy for connecting to torTor will make all OR connections through the SOCKS 4 proxy at host:port (or host:1080 if port is not specified).I‚AI 77[[‚}SERVERTRANSPORTPLUGINServerTransportPluginGeneraltransport exec path-to-binary [options]Proxy when servicing bridge connectionsThe Tor relay launches the pluggable transport proxy in path-to-binary using options as its command-line options, and expects to receive proxied client traffic from it. (Default: none)H
îá ^O ++}uTRUNCATELOGFILETruncateLogFileGeneral0|1Overwrites log file rather than appending when restartedIf 1, Tor will overwrite logs at startup and in response to a HUP signal, instead of appending to them. (Default: 0)N‚
N //o‚SSYSLOGIDENTITYTAGSyslogIdentityTagGeneraltagTag logs appended to the syslog as being from torWhen logging to syslog, adds a tag to the syslog identity such that log entries are marked with "Tor-tag". Can not be changed while tor is running. (Default: none)M‚M 33a‚YSOCKS5PROXYPASSWORDSocks5ProxyPasswordGeneralpasswordPassword for connecting to the Socks5ProxyIf defined, authenticate to the SOCKS 5 server using username and password in accordance to RFC 1929. Both username and password must be between 1 and 255 characters.L
\ ·y Ê \ ˆIS S/BRIDGEBridgeClient[transport] IP:ORPort [fingerprint]Available bridgesWhen set along with UseB T,R McUSERUserGeneralUsernameUID for the process when startedOn startup, setuid to this user and setgid to their primary group. Can not be changed while tor is running.Q‚;Q 99ƒUSEDEFAULTFALLBACKDIRSUseDefaultFallbackDirsGeneral0|1Use hard-coded fallback directory authorities when neededUse Tor's default hard-coded FallbackDirs (if any). (When a FallbackDir line is present, it replaces the hard-coded FallbackDirs, regardless of the value of UseDefaultFallbackDirs.) (Default: 1)P‚FP 99eƒAUNIXSOCKSGROUPWRITABLEUnixSocksGroupWritableGeneral0|1Group write permissions for the socks socketIf this option is set to 0, don't allow the filesystem group to read and write unix sockets (e.g. SocksPort unix:). If the option is set to 1, make the Unix socket readable and writable by the default GID. (Default: 0)O ridges, instructs Tor to use the relay at "IP:ORPort" as a "bridge" relaying into the Tor network. If "fingerprint" is provided (using the same format as for DirAuthority), we will verify that the relay running at that location has the right fingerprint. We also use fingerprint to look up the bridge descriptor at the bridge authority, if it's provided and if UpdateBridgesFromAuthority is set too.
If "transport" is provided, it must match a ClientTransportPlugin line. We then use that pluggable transport's proxy to transfer data to the bridge, rather than connecting to the bridge directly. Some transports use a transport-specific method to work out the remote address to connect to. These transports typically ignore the "IP:ORPort" specified in the bridge line.
Tor passes any "key=val" settings to the pluggable transport proxy as per-connection arguments when connecting to the bridge. Consult the documentation of the pluggable transport for details of what arguments it supports.R
µ Gµ ƒU 33U„qCIRCUITBUILDTIMEOUTCircuitBuildTimeoutClientNUMInitial timeout for circuit creationTry for at most NUM seconds when building circuits. If the circuit isn't open in that time, give up on it. If LearnCircuitBuildTimeout is 1, this value serves as the initial value to use before a timeout is learned. If LearnCircuitBuildTimeout is 0, this value is the only value used. (Default: 60 seconds)T6T ==iLEARNCIRCUITBUILDTIMEOUTLearnCircuitBuildTimeoutClient0|1Toggles adaptive timeouts for circuit creationIf 0, CircuitBuildTimeout adaptive learning is disabled. (Default: 1)S
~ " ` Æ´•wfI;, ó௚oU:ô˨‰cG `- Š ñ Ö Ç ² Œ ~ =DOSCONNECTIONDEFENSETYPEà 'QDOSREFUSESINGLEHOPCLIENTRENDEZVOUS +ENTRYSTATISTICS Ô!ENTRYNODESb9ENFORCEDISTINCTSUBNETSq/DOWNLOADEXTRAINFO $KDOSCONNECTIONMAXCONCURRENTCOUNT5DOSCONNECTIONENABLED9DOSCIRCUITCREATIONRATE%MDOSCIRCUITCREATIONMINCONNECTIONS?DOSCIRCUITCREATIONENABLED"GDOSCIRCUITCREATIONDEFENSETYPE(SDOSCIRCUITCREATIONDEFENSETIMEPERIOD;DOSCIRCUITCREATIONBURST(SDORMANTTIMEOUTDISABLEDBYIDLESTREAMS «7DORMANTONFIRSTSTARTUP ¬5DORMANTCLIENTTIMEOUT ª=DORMANTCANCELEDBYSTARTUP DNSPORT Œ+DISABLEOOSCHECK Û)DISABLENETWORK?DISABLEDEBUGGERATTACHMENT)DISABLEALLSWAP-DIRREQSTATISTICS Ó-DIRPORTFRONTPAGE áDIRPORT âDIRPOLICY ã
DIRCACHE ä=DIRAUTHORITYFALLBACKRATE%DIRAUTHORITY=DIRALLOWPRIVATEADDRESSES ïADATADIRECTORYGROUPREADABLE'DATADIRECTORY7COUNTPRIVATEBANDWIDTH
5 • 5 ‚]W 55cƒ{CIRCUITSTREAMTIMEOUTCircuitStreamTimeoutClientNUMTimeout for shifting streams among circuitsIf non-zero, this option overrides our internal timeout schedule for how many seconds until we detach a stream from a circuit and try a new circuit. If your network is particularly slow, you might want to set this to a number like 60. (Default: 0)V„hV ==_ˆCIRCUITSAVAILABLETIMEOUTCircuitsAvailableTimeoutClientNUMTime to keep circuits open and unused forTor will attempt to keep at least one open, unused circuit available for this amount of time. This option governs how long idle circuits are kept open, as well as the amount of time Tor will keep a circuit open to each of the recently used ports. This way when the Tor client is entirely idle, it can expire all of its circuits, and then expire its TLS connections. Note that the actual timeout value is uniformly randomized from the specified value to twice that amount. (Default: 30 minutes; Max: 24 hours)U
ï ï „X
!!†eCLIENTONLYClientOnlyClient0|1Ensures that we aren't used as a relay or directory mirrorIf set to 1, Tor will not run as a relay or serve directory requests, even if the ORPort, ExtORPort, or DirPort options are set. (This config option is mostly unnecessary: we added it back when we were considering having Tor clients auto-promote themselves to being relays if they were stable and fast enough. The current behavior is simply that Tor is a client unless ORPort, ExtORPort, or DirPort are configured.) (Default: 0)W
¬ ƒZ
==„REDUCEDCONNECTIONPADDINGReducedConnectionPaddingClient0|1Reduce padding and increase circuit cycling for low bandidth connectionsIf set to 1, Tor will not not hold OR connections open for very long, and will send less padding on these connections. Only clients may set this option. This option should be offered via the UI to mobile users for use where bandwidth may be expensive. (Default: 0)Y„QY //k‡]CONNECTIONPADDINGConnectionPaddingClient0|1|autoPad traffic to help prevent correlation attacksThis option governs Tor's use of padding to defend against some forms of traffic analysis. If it is set to auto, Tor will send padding only if both the client and the relay support it. If it is set to 0, Tor will not send any padding cells. If it is set to 1, Tor will still send padding for client connections regardless of relay support. Only clients may set this option. This option should be offered via the UI to mobile users for use where bandwidth may be expensive. (Default: auto)X
Ü &