stem-1.7.1/ 0000775 0001750 0001750 00000000000 13411004021 013174 5 ustar atagar atagar 0000000 0000000 stem-1.7.1/cache_manual.py 0000775 0001750 0001750 00000004033 13341034346 016170 0 ustar atagar atagar 0000000 0000000 #!/usr/bin/env python
# Copyright 2015-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Caches tor's latest manual content. Run this to pick new man page changes.
"""
import re
import sys
import stem.manual
import stem.util.system
try:
# account for urllib's change between python 2.x and 3.x
import urllib.request as urllib
except ImportError:
import urllib2 as urllib
GITWEB_MAN_LOG = 'https://gitweb.torproject.org/tor.git/log/doc/tor.1.txt'
MAN_LOG_LINK = "href='/tor.git/commit/doc/tor.1.txt\?id=([^']*)'"
if __name__ == '__main__':
try:
man_log_page = urllib.urlopen(GITWEB_MAN_LOG).read()
man_commit = re.search(MAN_LOG_LINK, man_log_page).group(1)
except:
print("Unable to determine the latest commit to edit tor's man page: %s" % sys.exc_info()[1])
sys.exit(1)
try:
stem_commit = stem.util.system.call('git rev-parse HEAD')[0]
except IOError as exc:
print("Unable to determine stem's current commit: %s" % exc)
sys.exit(1)
print('Latest tor commit editing man page: %s' % man_commit)
print('Current stem commit: %s' % stem_commit)
print('')
try:
cached_manual = stem.manual.Manual.from_cache()
db_schema = cached_manual.schema
except stem.manual.SchemaMismatch as exc:
cached_manual, db_schema = None, exc.database_schema
except IOError:
cached_manual, db_schema = None, None # local copy has been deleted
if db_schema != stem.manual.SCHEMA_VERSION:
print('Cached database schema is out of date (was %s, but current version is %s)' % (db_schema, stem.manual.SCHEMA_VERSION))
cached_manual = None
latest_manual = stem.manual.Manual.from_remote()
if cached_manual:
if cached_manual == latest_manual:
print('Manual information is already up to date, nothing to do.')
sys.exit(0)
print('Differences detected...\n')
print(stem.manual._manual_differences(cached_manual, latest_manual))
latest_manual.man_commit = man_commit
latest_manual.stem_commit = stem_commit
latest_manual.save(stem.manual.CACHE_PATH)
stem-1.7.1/README.md 0000664 0001750 0001750 00000000637 13341474573 014512 0 ustar atagar atagar 0000000 0000000 ## Stem (Python Tor Library)
Stem is a Python controller library for **[Tor](https://www.torproject.org/)**. With it you can use Tor's [control protocol](https://gitweb.torproject.org/torspec.git/tree/control-spec.txt) to script against the Tor process, or build things such as [Nyx](https://nyx.torproject.org/).
Documentation and tutorials available at **[stem.torproject.org](https://stem.torproject.org/)**.
stem-1.7.1/cache_fallback_directories.py 0000775 0001750 0001750 00000003702 13341034346 021050 0 ustar atagar atagar 0000000 0000000 #!/usr/bin/env python
# Copyright 2016-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Caches tor's latest fallback directories.
"""
import re
import sys
import stem.descriptor.remote
import stem.util.system
try:
# account for urllib's change between python 2.x and 3.x
import urllib.request as urllib
except ImportError:
import urllib2 as urllib
GITWEB_MAN_LOG = 'https://gitweb.torproject.org/tor.git/log/src/or/fallback_dirs.inc'
FALLBACK_DIR_LINK = "href='/tor.git/commit/src/or/fallback_dirs.inc\?id=([^']*)'"
if __name__ == '__main__':
try:
fallback_dir_page = urllib.urlopen(GITWEB_MAN_LOG).read()
fallback_dir_commit = re.search(FALLBACK_DIR_LINK, fallback_dir_page).group(1)
except:
print("Unable to determine the latest commit to edit tor's fallback directories: %s" % sys.exc_info()[1])
sys.exit(1)
try:
stem_commit = stem.util.system.call('git rev-parse HEAD')[0]
except IOError as exc:
print("Unable to determine stem's current commit: %s" % exc)
sys.exit(1)
print('Latest tor commit editing fallback directories: %s' % fallback_dir_commit)
print('Current stem commit: %s' % stem_commit)
print('')
cached_fallback_directories = stem.descriptor.remote.FallbackDirectory.from_cache()
latest_fallback_directories = stem.descriptor.remote.FallbackDirectory.from_remote()
if cached_fallback_directories == latest_fallback_directories:
print('Fallback directories are already up to date, nothing to do.')
sys.exit(0)
# all fallbacks have the same header metadata, so just picking one
headers = latest_fallback_directories.values()[0].header if latest_fallback_directories else None
print('Differences detected...\n')
print(stem.descriptor.remote._fallback_directory_differences(cached_fallback_directories, latest_fallback_directories))
stem.descriptor.remote.FallbackDirectory._write(latest_fallback_directories, fallback_dir_commit, stem_commit, headers)
stem-1.7.1/setup.py 0000664 0001750 0001750 00000012536 13355452221 014735 0 ustar atagar atagar 0000000 0000000 #!/usr/bin/env python
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
#
# Release Checklist
# =================
#
# * Recache latest information (cache_manual.py and cache_fallback_directories.py)
#
# * Test with python2.6, python2.7, python3, and pypy.
# |- If using tox run...
# |
# | % tox -- --all --target RUN_ALL,ONLINE
# |
# | Otherwise, for each interpreter run...
# |
# | % [python_interpreter] run_tests.py --all --target RUN_ALL,ONLINE
# |
# |- Pypy test instructions for ubuntu are...
# |
# | % sudo apt-get install pypy
# | % wget https://bootstrap.pypa.io/get-pip.py
# | % pypy get-pip.py --user
# | % ~/.local/bin/pip install mock pycodestyle pyflakes --user
# | % pypy ./run_tests.py --all
# |
# +- Some version of python 3.x should be available in your platform's
# repositories. To test against a specific version on ubuntu try the
# following. In this example, Python 3.7...
#
# % sudo apt-get install build-essential python-dev python-setuptools python-pip python-smbus
# % sudo apt-get install libncursesw5-dev libgdbm-dev libc6-dev
# % sudo apt-get install zlib1g-dev libsqlite3-dev tk-dev
# % sudo apt-get install libssl-dev openssl libffi-dev
#
# % wget https://www.python.org/ftp/python/3.7.0/Python-3.7.0.tgz
# % tar -xzf Python-3.7.0.tgz
# % mv Python-3.7.0 ~
#
# % cd ~/Python-3.7.0
# % ./configure
# % make
#
# % cd /path/to/stem
# % ~/Python-3.7.0/python ./run_tests.py --all
#
# * Tag the release
# |- Bump stem's version (in stem/__init__.py and docs/index.rst).
# |- git commit -a -m "Stem release 1.0.0"
# |- git tag -u 9ABBEEC6 -m "stem release 1.0.0" 1.0.0 d0bb81a
# +- git push --tags
#
# * Dry-run release on https://pypi.python.org/pypi/stem/
# |- python setup.py sdist --dryrun
# |- gpg --detach-sig --armor dist/stem-dry-run-1.0.0.tar.gz
# |- twine upload dist/*
# +- Check that https://pypi.python.org/pypi/stem-dry-run/ looks correct, comparing it to https://pypi.python.org/pypi/stem/
# +- Don't worry about the 'Bug Tracker' being missing. That's an attribute of the project itself.
#
# * Final release
# |- rm dist/*
# |- python setup.py sdist
# |- gpg --detach-sig --armor dist/stem-1.0.0.tar.gz
# +- twine upload dist/*
#
# * Contact package maintainers
# * Announce the release (example: https://blog.torproject.org/blog/stem-release-11)
import distutils.core
import os
import sys
import stem
if '--dryrun' in sys.argv:
DRY_RUN = True
sys.argv.remove('--dryrun')
else:
DRY_RUN = False
SUMMARY = 'Stem is a Python controller library that allows applications to interact with Tor (https://www.torproject.org/).'
DRY_RUN_SUMMARY = 'Ignore this package. This is dry-run release creation to work around PyPI limitations (https://github.com/pypa/packaging-problems/issues/74#issuecomment-260716129).'
DESCRIPTION = """
For tutorials and API documentation see `Stem's homepage `_.
Quick Start
-----------
To install you can either use...
::
pip install stem
... or install from the source tarball. Stem supports both the python 2.x and 3.x series. To use its python3 counterpart you simply need to install using that version of python.
::
python3 setup.py install
After that, give some `tutorials `_ a try! For questions or to discuss project ideas we're available on `irc `_ and the `tor-dev@ email list `_.
""".strip()
MANIFEST = """
include cache_fallback_directories.py
include cache_manual.py
include LICENSE
include README.md
include MANIFEST.in
include requirements.txt
include run_tests.py
include tox.ini
graft docs
graft test
global-exclude __pycache__
global-exclude *.orig
global-exclude *.pyc
global-exclude *.swp
global-exclude *.swo
global-exclude .tox
global-exclude *~
recursive-exclude test/data *
recursive-exclude docs/_build *
""".strip()
# installation requires us to be in our setup.py's directory
os.chdir(os.path.dirname(os.path.abspath(__file__)))
with open('MANIFEST.in', 'w') as manifest_file:
manifest_file.write(MANIFEST)
try:
distutils.core.setup(
name = 'stem-dry-run' if DRY_RUN else 'stem',
version = stem.__version__,
description = DRY_RUN_SUMMARY if DRY_RUN else SUMMARY,
long_description = DESCRIPTION,
license = stem.__license__,
author = stem.__author__,
author_email = stem.__contact__,
url = stem.__url__,
packages = ['stem', 'stem.client', 'stem.descriptor', 'stem.interpreter', 'stem.response', 'stem.util'],
keywords = 'tor onion controller',
scripts = ['tor-prompt'],
package_data = {
'stem': ['cached_fallbacks.cfg', 'cached_manual.sqlite', 'settings.cfg'],
'stem.interpreter': ['settings.cfg'],
'stem.util': ['ports.cfg'],
}, classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Topic :: Security',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
finally:
if os.path.exists('MANIFEST.in'):
os.remove('MANIFEST.in')
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
stem-1.7.1/stem/ 0000775 0001750 0001750 00000000000 13411004021 014144 5 ustar atagar atagar 0000000 0000000 stem-1.7.1/stem/process.py 0000664 0001750 0001750 00000023526 13411002355 016214 0 ustar atagar atagar 0000000 0000000 # Copyright 2011-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Helper functions for working with tor as a process.
:NO_TORRC:
when provided as a torrc_path tor is ran with a blank configuration
:DEFAULT_INIT_TIMEOUT:
number of seconds before we time out our attempt to start a tor instance
**Module Overview:**
::
launch_tor - starts up a tor process
launch_tor_with_config - starts a tor process with a custom torrc
"""
import os
import re
import signal
import subprocess
import tempfile
import threading
import stem.prereq
import stem.util.str_tools
import stem.util.system
import stem.version
NO_TORRC = ''
DEFAULT_INIT_TIMEOUT = 90
def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False, close_output = True, stdin = None):
"""
Initializes a tor process. This blocks until initialization completes or we
error out.
If tor's data directory is missing or stale then bootstrapping will include
making several requests to the directory authorities which can take a little
while. Usually this is done in 50 seconds or so, but occasionally calls seem
to get stuck, taking well over the default timeout.
**To work to must log at NOTICE runlevel to stdout.** It does this by
default, but if you have a 'Log' entry in your torrc then you'll also need
'Log NOTICE stdout'.
Note: The timeout argument does not work on Windows or when outside the
main thread, and relies on the global state of the signal module.
.. versionchanged:: 1.6.0
Allowing the timeout argument to be a float.
.. versionchanged:: 1.7.0
Added the **close_output** argument.
:param str tor_cmd: command for starting tor
:param list args: additional arguments for tor
:param str torrc_path: location of the torrc for us to use
:param int completion_percent: percent of bootstrap completion at which
this'll return
:param functor init_msg_handler: optional functor that will be provided with
tor's initialization stdout as we get it
:param int timeout: time after which the attempt to start tor is aborted, no
timeouts are applied if **None**
:param bool take_ownership: asserts ownership over the tor process so it
aborts if this python process terminates or a :class:`~stem.control.Controller`
we establish to it disconnects
:param bool close_output: closes tor's stdout and stderr streams when
bootstrapping is complete if true
:param str stdin: content to provide on stdin
:returns: **subprocess.Popen** instance for the tor subprocess
:raises: **OSError** if we either fail to create the tor process or reached a
timeout without success
"""
if stem.util.system.is_windows():
if timeout is not None and timeout != DEFAULT_INIT_TIMEOUT:
raise OSError('You cannot launch tor with a timeout on Windows')
timeout = None
elif threading.current_thread().__class__.__name__ != '_MainThread':
if timeout is not None and timeout != DEFAULT_INIT_TIMEOUT:
raise OSError('Launching tor with a timeout can only be done in the main thread')
timeout = None
# sanity check that we got a tor binary
if os.path.sep in tor_cmd:
# got a path (either relative or absolute), check what it leads to
if os.path.isdir(tor_cmd):
raise OSError("'%s' is a directory, not the tor executable" % tor_cmd)
elif not os.path.isfile(tor_cmd):
raise OSError("'%s' doesn't exist" % tor_cmd)
elif not stem.util.system.is_available(tor_cmd):
raise OSError("'%s' isn't available on your system. Maybe it's not in your PATH?" % tor_cmd)
# double check that we have a torrc to work with
if torrc_path not in (None, NO_TORRC) and not os.path.exists(torrc_path):
raise OSError("torrc doesn't exist (%s)" % torrc_path)
# starts a tor subprocess, raising an OSError if it fails
runtime_args, temp_file = [tor_cmd], None
if args:
runtime_args += args
if torrc_path:
if torrc_path == NO_TORRC:
temp_file = tempfile.mkstemp(prefix = 'empty-torrc-', text = True)[1]
runtime_args += ['-f', temp_file]
else:
runtime_args += ['-f', torrc_path]
if take_ownership:
runtime_args += ['__OwningControllerProcess', str(os.getpid())]
tor_process = None
try:
tor_process = subprocess.Popen(runtime_args, stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.PIPE)
if stdin:
tor_process.stdin.write(stem.util.str_tools._to_bytes(stdin))
tor_process.stdin.close()
if timeout:
def timeout_handler(signum, frame):
raise OSError('reached a %i second timeout without success' % timeout)
signal.signal(signal.SIGALRM, timeout_handler)
signal.setitimer(signal.ITIMER_REAL, timeout)
bootstrap_line = re.compile('Bootstrapped ([0-9]+)%')
problem_line = re.compile('\[(warn|err)\] (.*)$')
last_problem = 'Timed out'
while True:
# Tor's stdout will be read as ASCII bytes. This is fine for python 2, but
# in python 3 that means it'll mismatch with other operations (for instance
# the bootstrap_line.search() call later will fail).
#
# It seems like python 2.x is perfectly happy for this to be unicode, so
# normalizing to that.
init_line = tor_process.stdout.readline().decode('utf-8', 'replace').strip()
# this will provide empty results if the process is terminated
if not init_line:
raise OSError('Process terminated: %s' % last_problem)
# provide the caller with the initialization message if they want it
if init_msg_handler:
init_msg_handler(init_line)
# return the process if we're done with bootstrapping
bootstrap_match = bootstrap_line.search(init_line)
problem_match = problem_line.search(init_line)
if bootstrap_match and int(bootstrap_match.group(1)) >= completion_percent:
return tor_process
elif problem_match:
runlevel, msg = problem_match.groups()
if 'see warnings above' not in msg:
if ': ' in msg:
msg = msg.split(': ')[-1].strip()
last_problem = msg
except:
if tor_process:
tor_process.kill() # don't leave a lingering process
tor_process.wait()
raise
finally:
if timeout:
signal.alarm(0) # stop alarm
if tor_process and close_output:
if tor_process.stdout:
tor_process.stdout.close()
if tor_process.stderr:
tor_process.stderr.close()
if temp_file:
try:
os.remove(temp_file)
except:
pass
def launch_tor_with_config(config, tor_cmd = 'tor', completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False, close_output = True):
"""
Initializes a tor process, like :func:`~stem.process.launch_tor`, but with a
customized configuration. This writes a temporary torrc to disk, launches
tor, then deletes the torrc.
For example...
::
tor_process = stem.process.launch_tor_with_config(
config = {
'ControlPort': '2778',
'Log': [
'NOTICE stdout',
'ERR file /tmp/tor_error_log',
],
},
)
.. versionchanged:: 1.7.0
Added the **close_output** argument.
:param dict config: configuration options, such as "{'ControlPort': '9051'}",
values can either be a **str** or **list of str** if for multiple values
:param str tor_cmd: command for starting tor
:param int completion_percent: percent of bootstrap completion at which
this'll return
:param functor init_msg_handler: optional functor that will be provided with
tor's initialization stdout as we get it
:param int timeout: time after which the attempt to start tor is aborted, no
timeouts are applied if **None**
:param bool take_ownership: asserts ownership over the tor process so it
aborts if this python process terminates or a :class:`~stem.control.Controller`
we establish to it disconnects
:param bool close_output: closes tor's stdout and stderr streams when
bootstrapping is complete if true
:returns: **subprocess.Popen** instance for the tor subprocess
:raises: **OSError** if we either fail to create the tor process or reached a
timeout without success
"""
# TODO: Drop this version check when tor 0.2.6.3 or higher is the only game
# in town.
try:
use_stdin = stem.version.get_system_tor_version(tor_cmd) >= stem.version.Requirement.TORRC_VIA_STDIN
except IOError:
use_stdin = False
# we need to be sure that we're logging to stdout to figure out when we're
# done bootstrapping
if 'Log' in config:
stdout_options = ['DEBUG stdout', 'INFO stdout', 'NOTICE stdout']
if isinstance(config['Log'], str):
config['Log'] = [config['Log']]
has_stdout = False
for log_config in config['Log']:
if log_config in stdout_options:
has_stdout = True
break
if not has_stdout:
config['Log'].append('NOTICE stdout')
config_str = ''
for key, values in list(config.items()):
if isinstance(values, str):
config_str += '%s %s\n' % (key, values)
else:
for value in values:
config_str += '%s %s\n' % (key, value)
if use_stdin:
return launch_tor(tor_cmd, ['-f', '-'], None, completion_percent, init_msg_handler, timeout, take_ownership, close_output, stdin = config_str)
else:
torrc_descriptor, torrc_path = tempfile.mkstemp(prefix = 'torrc-', text = True)
try:
with open(torrc_path, 'w') as torrc_file:
torrc_file.write(config_str)
# prevents tor from erroring out due to a missing torrc if it gets a sighup
args = ['__ReloadTorrcOnSIGHUP', '0']
return launch_tor(tor_cmd, args, torrc_path, completion_percent, init_msg_handler, timeout, take_ownership)
finally:
try:
os.close(torrc_descriptor)
os.remove(torrc_path)
except:
pass
stem-1.7.1/stem/exit_policy.py 0000664 0001750 0001750 00000105123 13341474573 017101 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Representation of tor exit policies. These can be easily used to check if
exiting to a destination is permissible or not. For instance...
::
>>> from stem.exit_policy import ExitPolicy, MicroExitPolicy
>>> policy = ExitPolicy('accept *:80', 'accept *:443', 'reject *:*')
>>> print(policy)
accept *:80, accept *:443, reject *:*
>>> print(policy.summary())
accept 80, 443
>>> policy.can_exit_to('75.119.206.243', 80)
True
>>> policy = MicroExitPolicy('accept 80,443')
>>> print(policy)
accept 80,443
>>> policy.can_exit_to('75.119.206.243', 80)
True
::
ExitPolicy - Exit policy for a Tor relay
|- MicroExitPolicy - Microdescriptor exit policy
|
|- can_exit_to - check if exiting to this destination is allowed or not
|- is_exiting_allowed - check if any exiting is allowed
|- summary - provides a short label, similar to a microdescriptor
|- has_private - checks if policy has anything expanded from the 'private' keyword
|- strip_private - provides a copy of the policy without 'private' entries
|- has_default - checks if policy ends with the defaultly appended suffix
|- strip_default - provides a copy of the policy without the default suffix
|- __str__ - string representation
+- __iter__ - ExitPolicyRule entries that this contains
ExitPolicyRule - Single rule of an exit policy chain
|- MicroExitPolicyRule - Single rule for a microdescriptor policy
|
|- is_address_wildcard - checks if we'll accept any address
|- is_port_wildcard - checks if we'll accept any port
|- get_address_type - provides the protocol our ip address belongs to
|- is_match - checks if we match a given destination
|- get_mask - provides the address representation of our mask
|- get_masked_bits - provides the bit representation of our mask
|- is_default - flag indicating if this was part of the default end of a policy
|- is_private - flag indicating if this was expanded from a 'private' keyword
+- __str__ - string representation for this rule
get_config_policy - provides the ExitPolicy based on torrc rules
.. data:: AddressType (enum)
Enumerations for IP address types that can be in an exit policy.
============ ===========
AddressType Description
============ ===========
**WILDCARD** any address of either IPv4 or IPv6
**IPv4** IPv4 address
**IPv6** IPv6 address
============ ===========
"""
from __future__ import absolute_import
import re
import socket
import zlib
import stem.prereq
import stem.util
import stem.util.connection
import stem.util.enum
import stem.util.str_tools
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
AddressType = stem.util.enum.Enum(('WILDCARD', 'Wildcard'), ('IPv4', 'IPv4'), ('IPv6', 'IPv6'))
# Addresses aliased by the 'private' policy. From the tor man page...
#
# To specify all internal and link-local networks (including 0.0.0.0/8,
# 169.254.0.0/16, 127.0.0.0/8, 192.168.0.0/16, 10.0.0.0/8, and 172.16.0.0/12),
# you can use the 'private' alias instead of an address.
PRIVATE_ADDRESSES = (
'0.0.0.0/8',
'169.254.0.0/16',
'127.0.0.0/8',
'192.168.0.0/16',
'10.0.0.0/8',
'172.16.0.0/12',
)
def get_config_policy(rules, ip_address = None):
"""
Converts an ExitPolicy found in a torrc to a proper exit pattern. This
accounts for...
* ports being optional
* the 'private' keyword
.. deprecated:: 1.7.0
Tor's torrc parameters lack a formal spec, making it difficult for this
method to be reliable. Callers are encouraged to move to
:func:`~stem.control.Controller.get_exit_policy` instead.
:param str,list rules: comma separated rules or list to be converted
:param str ip_address: this relay's IP address for the 'private' policy if
it's present, this defaults to the local address
:returns: :class:`~stem.exit_policy.ExitPolicy` reflected by the rules
:raises: **ValueError** if input isn't a valid tor exit policy
"""
if ip_address and not (stem.util.connection.is_valid_ipv4_address(ip_address) or stem.util.connection.is_valid_ipv6_address(ip_address, allow_brackets = True)):
raise ValueError("%s isn't a valid IP address" % ip_address)
elif ip_address and stem.util.connection.is_valid_ipv6_address(ip_address, allow_brackets = True) and not (ip_address[0] == '[' and ip_address[-1] == ']'):
ip_address = '[%s]' % ip_address # ExitPolicy validation expects IPv6 addresses to be bracketed
if stem.util._is_str(rules):
rules = rules.split(',')
result = []
for rule in rules:
rule = rule.strip()
if not rule:
continue
if not re.search(':[\d\-\*]+$', rule):
rule = '%s:*' % rule
if 'private' in rule:
acceptance = rule.split(' ', 1)[0]
port = rule.rsplit(':', 1)[1]
addresses = list(PRIVATE_ADDRESSES)
if ip_address:
addresses.append(ip_address)
else:
try:
addresses.append(socket.gethostbyname(socket.gethostname()))
except:
pass # we might not have a network connection
for private_addr in addresses:
result.append(ExitPolicyRule('%s %s:%s' % (acceptance, private_addr, port)))
else:
result.append(ExitPolicyRule(rule))
return ExitPolicy(*result)
def _flag_private_rules(rules):
"""
Determine if part of our policy was expanded from the 'private' keyword. This
doesn't differentiate if this actually came from the 'private' keyword or a
series of rules exactly matching it.
"""
matches = [] # find all possible starting indexes
for i, rule in enumerate(rules):
if i + len(PRIVATE_ADDRESSES) > len(rules):
break
rule_str = '%s/%s' % (rule.address, rule.get_masked_bits())
if rule_str == PRIVATE_ADDRESSES[0]:
matches.append(i)
for start_index in matches:
# To match the private policy the following must all be true...
#
# * series of addresses and bit masks match PRIVATE_ADDRESSES
# * all rules have the same port range
# * all rules have the same acceptance (all accept or reject entries)
#
# The last rule is dynamically based on the relay's public address. It may
# not be present if get_config_policy() created this policy and we couldn't
# resolve our address.
last_index = start_index + len(PRIVATE_ADDRESSES)
rule_set = rules[start_index:last_index]
last_rule = rules[last_index] if len(rules) > last_index else None
is_match = True
min_port, max_port = rule_set[0].min_port, rule_set[0].max_port
is_accept = rule_set[0].is_accept
for i, rule in enumerate(rule_set):
rule_str = '%s/%s' % (rule.address, rule.get_masked_bits())
if rule_str != PRIVATE_ADDRESSES[i] or rule.min_port != min_port or rule.max_port != max_port or rule.is_accept != is_accept:
is_match = False
break
if is_match:
for rule in rule_set:
rule._is_private = True
if last_rule and not last_rule.is_address_wildcard() and last_rule.min_port == min_port and last_rule.max_port == max_port and last_rule.is_accept == is_accept:
last_rule._is_private = True
def _flag_default_rules(rules):
"""
Determine if part of our policy ends with the defaultly appended suffix.
"""
if len(rules) >= len(DEFAULT_POLICY_RULES):
rules_suffix = tuple(rules[-len(DEFAULT_POLICY_RULES):])
if rules_suffix == DEFAULT_POLICY_RULES:
for rule in rules_suffix:
rule._is_default_suffix = True
class ExitPolicy(object):
"""
Policy for the destinations that a relay allows or denies exiting to. This
is, in effect, just a list of :class:`~stem.exit_policy.ExitPolicyRule`
entries.
:param list rules: **str** or :class:`~stem.exit_policy.ExitPolicyRule`
entries that make up this policy
"""
def __init__(self, *rules):
# sanity check the types
for rule in rules:
if not stem.util._is_str(rule) and not isinstance(rule, ExitPolicyRule):
raise TypeError('Exit policy rules can only contain strings or ExitPolicyRules, got a %s (%s)' % (type(rule), rules))
# Unparsed representation of the rules we were constructed with. Our
# _get_rules() method consumes this to provide ExitPolicyRule instances.
# This is lazily evaluated so we don't need to actually parse the exit
# policy if it's never used.
is_all_str = True
for rule in rules:
if not stem.util._is_str(rule):
is_all_str = False
if rules and is_all_str:
byte_rules = [stem.util.str_tools._to_bytes(r) for r in rules]
self._input_rules = zlib.compress(b','.join(byte_rules))
else:
self._input_rules = rules
self._rules = None
self._hash = None
# Result when no rules apply. According to the spec policies default to 'is
# allowed', but our microdescriptor policy subclass might want to change
# this.
self._is_allowed_default = True
@lru_cache()
def can_exit_to(self, address = None, port = None, strict = False):
"""
Checks if this policy allows exiting to a given destination or not. If the
address or port is omitted then this will check if we're allowed to exit to
any instances of the defined address or port.
:param str address: IPv4 or IPv6 address (with or without brackets)
:param int port: port number
:param bool strict: if the address or port is excluded then check if we can
exit to **all** instances of the defined address or port
:returns: **True** if exiting to this destination is allowed, **False** otherwise
"""
if not self.is_exiting_allowed():
return False
for rule in self._get_rules():
if rule.is_match(address, port, strict):
return rule.is_accept
return self._is_allowed_default
@lru_cache()
def is_exiting_allowed(self):
"""
Provides **True** if the policy allows exiting whatsoever, **False**
otherwise.
"""
rejected_ports = set()
for rule in self._get_rules():
if rule.is_accept:
for port in range(rule.min_port, rule.max_port + 1):
if port not in rejected_ports:
return True
elif rule.is_address_wildcard():
if rule.is_port_wildcard():
return False
else:
rejected_ports.update(range(rule.min_port, rule.max_port + 1))
return self._is_allowed_default
@lru_cache()
def summary(self):
"""
Provides a short description of our policy chain, similar to a
microdescriptor. This excludes entries that don't cover all IP
addresses, and is either white-list or blacklist policy based on
the final entry. For instance...
::
>>> policy = ExitPolicy('accept *:80', 'accept *:443', 'reject *:*')
>>> policy.summary()
'accept 80, 443'
>>> policy = ExitPolicy('accept *:443', 'reject *:1-1024', 'accept *:*')
>>> policy.summary()
'reject 1-442, 444-1024'
:returns: **str** with a concise summary for our policy
"""
# determines if we're a white-list or blacklist
is_whitelist = not self._is_allowed_default
for rule in self._get_rules():
if rule.is_address_wildcard() and rule.is_port_wildcard():
is_whitelist = not rule.is_accept
break
# Iterates over the policies and adds the the ports we'll return (ie,
# allows if a white-list and rejects if a blacklist). Regardless of a
# port's allow/reject policy, all further entries with that port are
# ignored since policies respect the first matching policy.
display_ports, skip_ports = [], set()
for rule in self._get_rules():
if not rule.is_address_wildcard():
continue
elif rule.is_port_wildcard():
break
for port in range(rule.min_port, rule.max_port + 1):
if port in skip_ports:
continue
# if accept + white-list or reject + blacklist then add
if rule.is_accept == is_whitelist:
display_ports.append(port)
# all further entries with this port should be ignored
skip_ports.add(port)
# convert port list to a list of ranges (ie, ['1-3'] rather than [1, 2, 3])
if display_ports:
display_ranges, temp_range = [], []
display_ports.sort()
display_ports.append(None) # ending item to include last range in loop
for port in display_ports:
if not temp_range or temp_range[-1] + 1 == port:
temp_range.append(port)
else:
if len(temp_range) > 1:
display_ranges.append('%i-%i' % (temp_range[0], temp_range[-1]))
else:
display_ranges.append(str(temp_range[0]))
temp_range = [port]
else:
# everything for the inverse
is_whitelist = not is_whitelist
display_ranges = ['1-65535']
# constructs the summary string
label_prefix = 'accept ' if is_whitelist else 'reject '
return (label_prefix + ', '.join(display_ranges)).strip()
def has_private(self):
"""
Checks if we have any rules expanded from the 'private' keyword. Tor
appends these by default to the start of the policy and includes a dynamic
address (the relay's public IP).
.. versionadded:: 1.3.0
:returns: **True** if we have any private rules expanded from the 'private'
keyword, **False** otherwise
"""
for rule in self._get_rules():
if rule.is_private():
return True
return False
def strip_private(self):
"""
Provides a copy of this policy without 'private' policy entries.
.. versionadded:: 1.3.0
:returns: **ExitPolicy** without private rules
"""
return ExitPolicy(*[rule for rule in self._get_rules() if not rule.is_private()])
def has_default(self):
"""
Checks if we have the default policy suffix.
.. versionadded:: 1.3.0
:returns: **True** if we have the default policy suffix, **False** otherwise
"""
for rule in self._get_rules():
if rule.is_default():
return True
return False
def strip_default(self):
"""
Provides a copy of this policy without the default policy suffix.
.. versionadded:: 1.3.0
:returns: **ExitPolicy** without default rules
"""
return ExitPolicy(*[rule for rule in self._get_rules() if not rule.is_default()])
def _get_rules(self):
if self._rules is None:
rules = []
is_all_accept, is_all_reject = True, True
if isinstance(self._input_rules, bytes):
decompressed_rules = zlib.decompress(self._input_rules).split(b',')
else:
decompressed_rules = self._input_rules
for rule in decompressed_rules:
if isinstance(rule, bytes):
rule = stem.util.str_tools._to_unicode(rule)
if stem.util._is_str(rule):
if not rule.strip():
continue
rule = ExitPolicyRule(rule.strip())
if rule.is_accept:
is_all_reject = False
else:
is_all_accept = False
rules.append(rule)
if rule.is_address_wildcard() and rule.is_port_wildcard():
break # this is a catch-all, no reason to include more
# If we only have one kind of entry *and* end with a wildcard then
# we might as well use the simpler version. For instance...
#
# reject *:80, reject *:443, reject *:*
#
# ... could also be represented as simply...
#
# reject *:*
#
# This mostly comes up with reject-all policies because the
# 'reject private:*' appends an extra seven rules that have no
# effect.
if rules and (rules[-1].is_address_wildcard() and rules[-1].is_port_wildcard()):
if is_all_accept:
rules = [ExitPolicyRule('accept *:*')]
elif is_all_reject:
rules = [ExitPolicyRule('reject *:*')]
_flag_private_rules(rules)
_flag_default_rules(rules)
self._rules = rules
self._input_rules = None
return self._rules
def __len__(self):
return len(self._get_rules())
def __iter__(self):
for rule in self._get_rules():
yield rule
@lru_cache()
def __str__(self):
return ', '.join([str(rule) for rule in self._get_rules()])
def __hash__(self):
if self._hash is None:
my_hash = 0
for rule in self._get_rules():
my_hash *= 1024
my_hash += hash(rule)
self._hash = my_hash
return self._hash
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, ExitPolicy) else False
def __ne__(self, other):
return not self == other
class MicroExitPolicy(ExitPolicy):
"""
Exit policy provided by the microdescriptors. This is a distilled version of
a normal :class:`~stem.exit_policy.ExitPolicy` contains, just consisting of a
list of ports that are either accepted or rejected. For instance...
::
accept 80,443 # only accepts common http ports
reject 1-1024 # only accepts non-privileged ports
Since these policies are a subset of the exit policy information (lacking IP
ranges) clients can only use them to guess if a relay will accept traffic or
not. To quote the `dir-spec `_ (section 3.2.1)...
::
With microdescriptors, clients don't learn exact exit policies:
clients can only guess whether a relay accepts their request, try the
BEGIN request, and might get end-reason-exit-policy if they guessed
wrong, in which case they'll have to try elsewhere.
:var bool is_accept: **True** if these are ports that we accept, **False** if
they're ports that we reject
:param str policy: policy string that describes this policy
"""
def __init__(self, policy):
# Microdescriptor policies are of the form...
#
# MicrodescriptrPolicy ::= ("accept" / "reject") SP PortList NL
# PortList ::= PortOrRange
# PortList ::= PortList "," PortOrRange
# PortOrRange ::= INT "-" INT / INT
self._policy = policy
if policy.startswith('accept'):
self.is_accept = True
elif policy.startswith('reject'):
self.is_accept = False
else:
raise ValueError("A microdescriptor exit policy must start with either 'accept' or 'reject': %s" % policy)
policy = policy[6:]
if not policy.startswith(' '):
raise ValueError('A microdescriptor exit policy should have a space separating accept/reject from its port list: %s' % self._policy)
policy = policy.lstrip()
# convert our port list into MicroExitPolicyRule
rules = []
for port_entry in policy.split(','):
if '-' in port_entry:
min_port, max_port = port_entry.split('-', 1)
else:
min_port = max_port = port_entry
if not stem.util.connection.is_valid_port(min_port) or \
not stem.util.connection.is_valid_port(max_port):
raise ValueError("'%s' is an invalid port range" % port_entry)
rules.append(MicroExitPolicyRule(self.is_accept, int(min_port), int(max_port)))
super(MicroExitPolicy, self).__init__(*rules)
self._is_allowed_default = not self.is_accept
def __str__(self):
return self._policy
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, MicroExitPolicy) else False
def __ne__(self, other):
return not self == other
class ExitPolicyRule(object):
"""
Single rule from the user's exit policy. These rules are chained together to
form complete policies that describe where a relay will and will not allow
traffic to exit.
The format of these rules are formally described in the `dir-spec
`_ as an
'exitpattern'. Note that while these are similar to tor's man page entry for
ExitPolicies, it's not the exact same. An exitpattern is better defined and
stricter in what it'll accept. For instance, ports are not optional and it
does not contain the 'private' alias.
This should be treated as an immutable object.
.. versionchanged:: 1.5.0
Support for 'accept6/reject6' entries and '\*4/6' wildcards.
:var bool is_accept: indicates if exiting is allowed or disallowed
:var str address: address that this rule is for
:var int min_port: lower end of the port range that we include (inclusive)
:var int max_port: upper end of the port range that we include (inclusive)
:param str rule: exit policy rule to be parsed
:raises: **ValueError** if input isn't a valid tor exit policy rule
"""
def __init__(self, rule):
# policy ::= "accept[6]" exitpattern | "reject[6]" exitpattern
# exitpattern ::= addrspec ":" portspec
rule = stem.util.str_tools._to_unicode(rule)
self.is_accept = rule.startswith('accept')
is_ipv6_only = rule.startswith('accept6') or rule.startswith('reject6')
if rule.startswith('accept6') or rule.startswith('reject6'):
exitpattern = rule[7:]
elif rule.startswith('accept') or rule.startswith('reject'):
exitpattern = rule[6:]
else:
raise ValueError("An exit policy must start with either 'accept[6]' or 'reject[6]': %s" % rule)
if not exitpattern.startswith(' '):
raise ValueError('An exit policy should have a space separating its accept/reject from the exit pattern: %s' % rule)
exitpattern = exitpattern.lstrip()
if ':' not in exitpattern or ']' in exitpattern.rsplit(':', 1)[1]:
raise ValueError("An exitpattern must be of the form 'addrspec:portspec': %s" % rule)
self.address = None
self._address_type = None
self._masked_bits = None
self.min_port = self.max_port = None
self._hash = None
# Our mask in ip notation (ex. '255.255.255.0'). This is only set if we
# either have a custom mask that can't be represented by a number of bits,
# or the user has called mask(), lazily loading this.
self._mask = None
# Malformed exit policies are rejected, but there's an exception where it's
# just skipped: when an accept6/reject6 rule has an IPv4 address...
#
# "Using an IPv4 address with accept6 or reject6 is ignored and generates
# a warning."
self._skip_rule = False
addrspec, portspec = exitpattern.rsplit(':', 1)
self._apply_addrspec(rule, addrspec, is_ipv6_only)
self._apply_portspec(rule, portspec)
# Flags to indicate if this rule seems to be expanded from the 'private'
# keyword or tor's default policy suffix.
self._is_private = False
self._is_default_suffix = False
def is_address_wildcard(self):
"""
**True** if we'll match against **any** address, **False** otherwise.
Note that this is different than \*4, \*6, or '/0' address which are
wildcards for only either IPv4 or IPv6.
:returns: **bool** for if our address matching is a wildcard
"""
return self._address_type == _address_type_to_int(AddressType.WILDCARD)
def is_port_wildcard(self):
"""
**True** if we'll match against any port, **False** otherwise.
:returns: **bool** for if our port matching is a wildcard
"""
return self.min_port in (0, 1) and self.max_port == 65535
def is_match(self, address = None, port = None, strict = False):
"""
**True** if we match against the given destination, **False** otherwise. If
the address or port is omitted then this will check if we're allowed to
exit to any instances of the defined address or port.
:param str address: IPv4 or IPv6 address (with or without brackets)
:param int port: port number
:param bool strict: if the address or port is excluded then check if we can
exit to **all** instances of the defined address or port
:returns: **bool** indicating if we match against this destination
:raises: **ValueError** if provided with a malformed address or port
"""
if self._skip_rule:
return False
# validate our input and check if the argument doesn't match our address type
if address is not None:
address_type = self.get_address_type()
if stem.util.connection.is_valid_ipv4_address(address):
if address_type == AddressType.IPv6:
return False
elif stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True):
if address_type == AddressType.IPv4:
return False
address = address.lstrip('[').rstrip(']')
else:
raise ValueError("'%s' isn't a valid IPv4 or IPv6 address" % address)
if port is not None and not stem.util.connection.is_valid_port(port):
raise ValueError("'%s' isn't a valid port" % port)
# If we're not matching against an address or port but the rule has one
# then we're a fuzzy match. When that happens...
#
# * If strict and a reject rule then we're a match ('can exit to *all* instances').
# * If not strict and an accept rule then match ('an exit ot *any* instance').
fuzzy_match = False
if not self.is_address_wildcard():
# Already got the integer representation of our mask and our address
# with the mask applied. Just need to check if this address with the
# mask applied matches.
if address is None:
fuzzy_match = True
else:
comparison_addr_bin = stem.util.connection.address_to_int(address)
comparison_addr_bin &= self._get_mask_bin()
if self._get_address_bin() != comparison_addr_bin:
return False
if not self.is_port_wildcard():
if port is None:
fuzzy_match = True
elif port < self.min_port or port > self.max_port:
return False
if fuzzy_match:
return strict != self.is_accept
else:
return True
def get_address_type(self):
"""
Provides the :data:`~stem.exit_policy.AddressType` for our policy.
:returns: :data:`~stem.exit_policy.AddressType` for the type of address that we have
"""
return _int_to_address_type(self._address_type)
def get_mask(self, cache = True):
"""
Provides the address represented by our mask. This is **None** if our
address type is a wildcard.
:param bool cache: caches the result if **True**
:returns: str of our subnet mask for the address (ex. '255.255.255.0')
"""
# Lazy loading our mask because it is very infrequently requested. There's
# no reason to usually use memory for it.
if not self._mask:
address_type = self.get_address_type()
if address_type == AddressType.WILDCARD:
mask = None
elif address_type == AddressType.IPv4:
mask = stem.util.connection.get_mask_ipv4(self._masked_bits)
elif address_type == AddressType.IPv6:
mask = stem.util.connection.get_mask_ipv6(self._masked_bits)
if not cache:
return mask
self._mask = mask
return self._mask
def get_masked_bits(self):
"""
Provides the number of bits our subnet mask represents. This is **None** if
our mask can't have a bit representation.
:returns: int with the bit representation of our mask
"""
return self._masked_bits
def is_private(self):
"""
Checks if this rule was expanded from the 'private' policy keyword.
.. versionadded:: 1.3.0
:returns: **True** if this rule was expanded from the 'private' keyword, **False** otherwise.
"""
return self._is_private
def is_default(self):
"""
Checks if this rule belongs to the default exit policy suffix.
.. versionadded:: 1.3.0
:returns: **True** if this rule was part of the default end of a policy, **False** otherwise.
"""
return self._is_default_suffix
@lru_cache()
def __str__(self):
"""
Provides the string representation of our policy. This does not
necessarily match the rule that we were constructed from (due to things
like IPv6 address collapsing or the multiple representations that our mask
can have). However, it is a valid that would be accepted by our constructor
to re-create this rule.
"""
label = 'accept ' if self.is_accept else 'reject '
if self.is_address_wildcard():
label += '*:'
else:
address_type = self.get_address_type()
if address_type == AddressType.IPv4:
label += self.address
else:
label += '[%s]' % self.address
# Including our mask label as follows...
# - exclude our mask if it doesn't do anything
# - use our masked bit count if we can
# - use the mask itself otherwise
if (address_type == AddressType.IPv4 and self._masked_bits == 32) or \
(address_type == AddressType.IPv6 and self._masked_bits == 128):
label += ':'
elif self._masked_bits is not None:
label += '/%i:' % self._masked_bits
else:
label += '/%s:' % self.get_mask()
if self.is_port_wildcard():
label += '*'
elif self.min_port == self.max_port:
label += str(self.min_port)
else:
label += '%i-%i' % (self.min_port, self.max_port)
return label
@lru_cache()
def _get_mask_bin(self):
# provides an integer representation of our mask
return int(stem.util.connection._address_to_binary(self.get_mask(False)), 2)
@lru_cache()
def _get_address_bin(self):
# provides an integer representation of our address
return stem.util.connection.address_to_int(self.address) & self._get_mask_bin()
def _apply_addrspec(self, rule, addrspec, is_ipv6_only):
# Parses the addrspec...
# addrspec ::= "*" | ip4spec | ip6spec
# Expand IPv4 and IPv6 specific wildcards into /0 entries so we have one
# fewer bizarre special case headaches to deal with.
if addrspec == '*4':
addrspec = '0.0.0.0/0'
elif addrspec == '*6' or (addrspec == '*' and is_ipv6_only):
addrspec = '[0000:0000:0000:0000:0000:0000:0000:0000]/0'
if '/' in addrspec:
self.address, addr_extra = addrspec.split('/', 1)
else:
self.address, addr_extra = addrspec, None
if addrspec == '*':
self._address_type = _address_type_to_int(AddressType.WILDCARD)
self.address = self._masked_bits = None
elif stem.util.connection.is_valid_ipv4_address(self.address):
# ipv4spec ::= ip4 | ip4 "/" num_ip4_bits | ip4 "/" ip4mask
# ip4 ::= an IPv4 address in dotted-quad format
# ip4mask ::= an IPv4 mask in dotted-quad format
# num_ip4_bits ::= an integer between 0 and 32
if is_ipv6_only:
self._skip_rule = True
self._address_type = _address_type_to_int(AddressType.IPv4)
if addr_extra is None:
self._masked_bits = 32
elif stem.util.connection.is_valid_ipv4_address(addr_extra):
# provided with an ip4mask
try:
self._masked_bits = stem.util.connection._get_masked_bits(addr_extra)
except ValueError:
# mask can't be represented as a number of bits (ex. '255.255.0.255')
self._mask = addr_extra
self._masked_bits = None
elif addr_extra.isdigit():
# provided with a num_ip4_bits
self._masked_bits = int(addr_extra)
if self._masked_bits < 0 or self._masked_bits > 32:
raise ValueError('IPv4 masks must be in the range of 0-32 bits')
else:
raise ValueError("The '%s' isn't a mask nor number of bits: %s" % (addr_extra, rule))
elif self.address.startswith('[') and self.address.endswith(']') and \
stem.util.connection.is_valid_ipv6_address(self.address[1:-1]):
# ip6spec ::= ip6 | ip6 "/" num_ip6_bits
# ip6 ::= an IPv6 address, surrounded by square brackets.
# num_ip6_bits ::= an integer between 0 and 128
self.address = stem.util.connection.expand_ipv6_address(self.address[1:-1].upper())
self._address_type = _address_type_to_int(AddressType.IPv6)
if addr_extra is None:
self._masked_bits = 128
elif addr_extra.isdigit():
# provided with a num_ip6_bits
self._masked_bits = int(addr_extra)
if self._masked_bits < 0 or self._masked_bits > 128:
raise ValueError('IPv6 masks must be in the range of 0-128 bits')
else:
raise ValueError("The '%s' isn't a number of bits: %s" % (addr_extra, rule))
else:
raise ValueError("'%s' isn't a wildcard, IPv4, or IPv6 address: %s" % (addrspec, rule))
def _apply_portspec(self, rule, portspec):
# Parses the portspec...
# portspec ::= "*" | port | port "-" port
# port ::= an integer between 1 and 65535, inclusive.
#
# Due to a tor bug the spec says that we should accept port of zero, but
# connections to port zero are never permitted.
if portspec == '*':
self.min_port, self.max_port = 1, 65535
elif portspec.isdigit():
# provided with a single port
if stem.util.connection.is_valid_port(portspec, allow_zero = True):
self.min_port = self.max_port = int(portspec)
else:
raise ValueError("'%s' isn't within a valid port range: %s" % (portspec, rule))
elif '-' in portspec:
# provided with a port range
port_comp = portspec.split('-', 1)
if stem.util.connection.is_valid_port(port_comp, allow_zero = True):
self.min_port = int(port_comp[0])
self.max_port = int(port_comp[1])
if self.min_port > self.max_port:
raise ValueError("Port range has a lower bound that's greater than its upper bound: %s" % rule)
else:
raise ValueError('Malformed port range: %s' % rule)
else:
raise ValueError("Port value isn't a wildcard, integer, or range: %s" % rule)
def __hash__(self):
if self._hash is None:
self._hash = stem.util._hash_attr(self, 'is_accept', 'address', 'min_port', 'max_port') * 1024 + hash(self.get_mask(False))
return self._hash
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, ExitPolicyRule) else False
def __ne__(self, other):
return not self == other
def _address_type_to_int(address_type):
return AddressType.index_of(address_type)
def _int_to_address_type(address_type_int):
return list(AddressType)[address_type_int]
class MicroExitPolicyRule(ExitPolicyRule):
"""
Lighter weight ExitPolicyRule derivative for microdescriptors.
"""
def __init__(self, is_accept, min_port, max_port):
self.is_accept = is_accept
self.address = None # wildcard address
self.min_port = min_port
self.max_port = max_port
self._skip_rule = False
def is_address_wildcard(self):
return True
def get_address_type(self):
return AddressType.WILDCARD
def get_mask(self, cache = True):
return None
def get_masked_bits(self):
return None
def __hash__(self):
return stem.util._hash_attr(self, 'is_accept', 'min_port', 'max_port', cache = True)
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, MicroExitPolicyRule) else False
def __ne__(self, other):
return not self == other
DEFAULT_POLICY_RULES = tuple([ExitPolicyRule(rule) for rule in (
'reject *:25',
'reject *:119',
'reject *:135-139',
'reject *:445',
'reject *:563',
'reject *:1214',
'reject *:4661-4666',
'reject *:6346-6429',
'reject *:6699',
'reject *:6881-6999',
'accept *:*',
)])
stem-1.7.1/stem/descriptor/ 0000775 0001750 0001750 00000000000 13411004021 016322 5 ustar atagar atagar 0000000 0000000 stem-1.7.1/stem/descriptor/certificate.py 0000664 0001750 0001750 00000023361 13341034346 021202 0 ustar atagar atagar 0000000 0000000 # Copyright 2017-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for `Tor Ed25519 certificates
`_, which are
used to validate the key used to sign server descriptors.
.. versionadded:: 1.6.0
**Module Overview:**
::
Ed25519Certificate - Ed25519 signing key certificate
| +- Ed25519CertificateV1 - version 1 Ed25519 certificate
| |- is_expired - checks if certificate is presently expired
| +- validate - validates signature of a server descriptor
|
+- parse - reads base64 encoded certificate data
Ed25519Extension - extension included within an Ed25519Certificate
.. data:: CertType (enum)
Purpose of Ed25519 certificate. As new certificate versions are added this
enumeration will expand.
============== ===========
CertType Description
============== ===========
**SIGNING** signing a signing key with an identity key
**LINK_CERT** TLS link certificate signed with ed25519 signing key
**AUTH** authentication key signed with ed25519 signing key
============== ===========
.. data:: ExtensionType (enum)
Recognized exception types.
==================== ===========
ExtensionType Description
==================== ===========
**HAS_SIGNING_KEY** includes key used to sign the certificate
==================== ===========
.. data:: ExtensionFlag (enum)
Flags that can be assigned to Ed25519 certificate extensions.
====================== ===========
ExtensionFlag Description
====================== ===========
**AFFECTS_VALIDATION** extension affects whether the certificate is valid
**UNKNOWN** extension includes flags not yet recognized by stem
====================== ===========
"""
import base64
import binascii
import collections
import datetime
import hashlib
import stem.prereq
import stem.util.enum
import stem.util.str_tools
ED25519_HEADER_LENGTH = 40
ED25519_SIGNATURE_LENGTH = 64
ED25519_ROUTER_SIGNATURE_PREFIX = b'Tor router descriptor signature v1'
CertType = stem.util.enum.UppercaseEnum('SIGNING', 'LINK_CERT', 'AUTH')
ExtensionType = stem.util.enum.Enum(('HAS_SIGNING_KEY', 4),)
ExtensionFlag = stem.util.enum.UppercaseEnum('AFFECTS_VALIDATION', 'UNKNOWN')
class Ed25519Extension(collections.namedtuple('Ed25519Extension', ['type', 'flags', 'flag_int', 'data'])):
"""
Extension within an Ed25519 certificate.
:var int type: extension type
:var list flags: extension attribute flags
:var int flag_int: integer encoding of the extension attribute flags
:var bytes data: data the extension concerns
"""
class Ed25519Certificate(object):
"""
Base class for an Ed25519 certificate.
:var int version: certificate format version
:var str encoded: base64 encoded ed25519 certificate
"""
def __init__(self, version, encoded):
self.version = version
self.encoded = encoded
@staticmethod
def parse(content):
"""
Parses the given base64 encoded data as an Ed25519 certificate.
:param str content: base64 encoded certificate
:returns: :class:`~stem.descriptor.certificate.Ed25519Certificate` subclsss
for the given certificate
:raises: **ValueError** if content is malformed
"""
try:
decoded = base64.b64decode(stem.util.str_tools._to_bytes(content))
if not decoded:
raise TypeError('empty')
except (TypeError, binascii.Error) as exc:
raise ValueError("Ed25519 certificate wasn't propoerly base64 encoded (%s):\n%s" % (exc, content))
version = stem.util.str_tools._to_int(decoded[0:1])
if version == 1:
return Ed25519CertificateV1(version, content, decoded)
else:
raise ValueError('Ed25519 certificate is version %i. Parser presently only supports version 1.' % version)
class Ed25519CertificateV1(Ed25519Certificate):
"""
Version 1 Ed25519 certificate, which are used for signing tor server
descriptors.
:var CertType type: certificate purpose
:var datetime expiration: expiration of the certificate
:var int key_type: format of the key
:var bytes key: key content
:var list extensions: :class:`~stem.descriptor.certificate.Ed25519Extension` in this certificate
:var bytes signature: certificate signature
"""
def __init__(self, version, encoded, decoded):
super(Ed25519CertificateV1, self).__init__(version, encoded)
if len(decoded) < ED25519_HEADER_LENGTH + ED25519_SIGNATURE_LENGTH:
raise ValueError('Ed25519 certificate was %i bytes, but should be at least %i' % (len(decoded), ED25519_HEADER_LENGTH + ED25519_SIGNATURE_LENGTH))
cert_type = stem.util.str_tools._to_int(decoded[1:2])
if cert_type in (0, 1, 2, 3):
raise ValueError('Ed25519 certificate cannot have a type of %i. This is reserved to avoid conflicts with tor CERTS cells.' % cert_type)
elif cert_type == 4:
self.type = CertType.SIGNING
elif cert_type == 5:
self.type = CertType.LINK_CERT
elif cert_type == 6:
self.type = CertType.AUTH
elif cert_type == 7:
raise ValueError('Ed25519 certificate cannot have a type of 7. This is reserved for RSA identity cross-certification.')
else:
raise ValueError("BUG: Ed25519 certificate type is decoded from one byte. It shouldn't be possible to have a value of %i." % cert_type)
# expiration time is in hours since epoch
try:
self.expiration = datetime.datetime.utcfromtimestamp(stem.util.str_tools._to_int(decoded[2:6]) * 3600)
except ValueError as exc:
raise ValueError('Invalid expiration timestamp (%s): %s' % (exc, stem.util.str_tools._to_int(decoded[2:6]) * 3600))
self.key_type = stem.util.str_tools._to_int(decoded[6:7])
self.key = decoded[7:39]
self.signature = decoded[-ED25519_SIGNATURE_LENGTH:]
self.extensions = []
extension_count = stem.util.str_tools._to_int(decoded[39:40])
remaining_data = decoded[40:-ED25519_SIGNATURE_LENGTH]
for i in range(extension_count):
if len(remaining_data) < 4:
raise ValueError('Ed25519 extension is missing header field data')
extension_length = stem.util.str_tools._to_int(remaining_data[:2])
extension_type = stem.util.str_tools._to_int(remaining_data[2:3])
extension_flags = stem.util.str_tools._to_int(remaining_data[3:4])
extension_data = remaining_data[4:4 + extension_length]
if extension_length != len(extension_data):
raise ValueError("Ed25519 extension is truncated. It should have %i bytes of data but there's only %i." % (extension_length, len(extension_data)))
flags, remaining_flags = [], extension_flags
if remaining_flags % 2 == 1:
flags.append(ExtensionFlag.AFFECTS_VALIDATION)
remaining_flags -= 1
if remaining_flags:
flags.append(ExtensionFlag.UNKNOWN)
if extension_type == ExtensionType.HAS_SIGNING_KEY and len(extension_data) != 32:
raise ValueError('Ed25519 HAS_SIGNING_KEY extension must be 32 bytes, but was %i.' % len(extension_data))
self.extensions.append(Ed25519Extension(extension_type, flags, extension_flags, extension_data))
remaining_data = remaining_data[4 + extension_length:]
if remaining_data:
raise ValueError('Ed25519 certificate had %i bytes of unused extension data' % len(remaining_data))
def is_expired(self):
"""
Checks if this certificate is presently expired or not.
:returns: **True** if the certificate has expired, **False** otherwise
"""
return datetime.datetime.now() > self.expiration
def validate(self, server_descriptor):
"""
Validates our signing key and that the given descriptor content matches its
Ed25519 signature.
:param stem.descriptor.server_descriptor.Ed25519 server_descriptor: relay
server descriptor to validate
:raises:
* **ValueError** if signing key or descriptor are invalid
* **ImportError** if pynacl module is unavailable
"""
if not stem.prereq._is_pynacl_available():
raise ImportError('Certificate validation requires the pynacl module')
import nacl.signing
import nacl.encoding
from nacl.exceptions import BadSignatureError
descriptor_content = server_descriptor.get_bytes()
signing_key = None
if server_descriptor.ed25519_master_key:
signing_key = nacl.signing.VerifyKey(stem.util.str_tools._to_bytes(server_descriptor.ed25519_master_key) + b'=', encoder = nacl.encoding.Base64Encoder)
else:
for extension in self.extensions:
if extension.type == ExtensionType.HAS_SIGNING_KEY:
signing_key = nacl.signing.VerifyKey(extension.data)
break
if not signing_key:
raise ValueError('Server descriptor missing an ed25519 signing key')
try:
signing_key.verify(base64.b64decode(stem.util.str_tools._to_bytes(self.encoded))[:-ED25519_SIGNATURE_LENGTH], self.signature)
except BadSignatureError as exc:
raise ValueError('Ed25519KeyCertificate signing key is invalid (%s)' % exc)
# ed25519 signature validates descriptor content up until the signature itself
if b'router-sig-ed25519 ' not in descriptor_content:
raise ValueError("Descriptor doesn't have a router-sig-ed25519 entry.")
signed_content = descriptor_content[:descriptor_content.index(b'router-sig-ed25519 ') + 19]
descriptor_sha256_digest = hashlib.sha256(ED25519_ROUTER_SIGNATURE_PREFIX + signed_content).digest()
missing_padding = len(server_descriptor.ed25519_signature) % 4
signature_bytes = base64.b64decode(stem.util.str_tools._to_bytes(server_descriptor.ed25519_signature) + b'=' * missing_padding)
try:
verify_key = nacl.signing.VerifyKey(self.key)
verify_key.verify(descriptor_sha256_digest, signature_bytes)
except BadSignatureError as exc:
raise ValueError('Descriptor Ed25519 certificate signature invalid (%s)' % exc)
stem-1.7.1/stem/descriptor/reader.py 0000664 0001750 0001750 00000044771 13411002341 020156 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Utilities for reading descriptors from local directories and archives. This is
mostly done through the :class:`~stem.descriptor.reader.DescriptorReader`
class, which is an iterator for the descriptor data in a series of
destinations. For example...
::
my_descriptors = [
'/tmp/server-descriptors-2012-03.tar.bz2',
'/tmp/archived_descriptors/',
]
# prints the contents of all the descriptor files
with DescriptorReader(my_descriptors) as reader:
for descriptor in reader:
print descriptor
This ignores files that cannot be processed due to read errors or unparsable
content. To be notified of skipped files you can register a listener with
:func:`~stem.descriptor.reader.DescriptorReader.register_skip_listener`.
The :class:`~stem.descriptor.reader.DescriptorReader` keeps track of the last
modified timestamps for descriptor files that it has read so it can skip
unchanged files if run again. This listing of processed files can also be
persisted and applied to other
:class:`~stem.descriptor.reader.DescriptorReader` instances. For example, the
following prints descriptors as they're changed over the course of a minute,
and picks up where it left off if run again...
::
reader = DescriptorReader(['/tmp/descriptor_data'])
try:
processed_files = load_processed_files('/tmp/used_descriptors')
reader.set_processed_files(processed_files)
except: pass # could not load, maybe this is the first run
start_time = time.time()
while (time.time() - start_time) < 60:
# prints any descriptors that have changed since last checked
with reader:
for descriptor in reader:
print descriptor
time.sleep(1)
save_processed_files('/tmp/used_descriptors', reader.get_processed_files())
**Module Overview:**
::
load_processed_files - Loads a listing of processed files
save_processed_files - Saves a listing of processed files
DescriptorReader - Iterator for descriptor data on the local file system
|- get_processed_files - provides the listing of files that we've processed
|- set_processed_files - sets our tracking of the files we have processed
|- register_read_listener - adds a listener for when files are read
|- register_skip_listener - adds a listener that's notified of skipped files
|- start - begins reading descriptor data
|- stop - stops reading descriptor data
|- __enter__ / __exit__ - manages the descriptor reader thread in the context
+- __iter__ - iterates over descriptor data in unread files
FileSkipped - Base exception for a file that was skipped
|- AlreadyRead - We've already read a file with this last modified timestamp
|- ParsingFailure - Contents can't be parsed as descriptor data
|- UnrecognizedType - File extension indicates non-descriptor data
+- ReadFailed - Wraps an error that was raised while reading the file
+- FileMissing - File does not exist
"""
import mimetypes
import os
import tarfile
import threading
try:
import queue
except ImportError:
import Queue as queue
import stem.descriptor
import stem.prereq
import stem.util
import stem.util.str_tools
import stem.util.system
# flag to indicate when the reader thread is out of descriptor files to read
FINISHED = 'DONE'
class FileSkipped(Exception):
"Base error when we can't provide descriptor data from a file."
class AlreadyRead(FileSkipped):
"""
Already read a file with this 'last modified' timestamp or later.
:param int last_modified: unix timestamp for when the file was last modified
:param int last_modified_when_read: unix timestamp for the modification time
when we last read this file
"""
def __init__(self, last_modified, last_modified_when_read):
super(AlreadyRead, self).__init__('File has already been read since it was last modified. modification time: %s, last read: %s' % (last_modified, last_modified_when_read))
self.last_modified = last_modified
self.last_modified_when_read = last_modified_when_read
class ParsingFailure(FileSkipped):
"""
File contents could not be parsed as descriptor data.
:param ValueError exception: issue that arose when parsing
"""
def __init__(self, parsing_exception):
super(ParsingFailure, self).__init__(parsing_exception)
self.exception = parsing_exception
class UnrecognizedType(FileSkipped):
"""
File doesn't contain descriptor data. This could either be due to its file
type or because it doesn't conform to a recognizable descriptor type.
:param tuple mime_type: the (type, encoding) tuple provided by mimetypes.guess_type()
"""
def __init__(self, mime_type):
super(UnrecognizedType, self).__init__('Unrecognized mime type: %s (%s)' % mime_type)
self.mime_type = mime_type
class ReadFailed(FileSkipped):
"""
An IOError occurred while trying to read the file.
:param IOError exception: issue that arose when reading the file, **None** if
this arose due to the file not being present
"""
def __init__(self, read_exception):
super(ReadFailed, self).__init__(read_exception)
self.exception = read_exception
class FileMissing(ReadFailed):
'File does not exist.'
def __init__(self):
super(FileMissing, self).__init__('File does not exist')
def load_processed_files(path):
"""
Loads a dictionary of 'path => last modified timestamp' mappings, as
persisted by :func:`~stem.descriptor.reader.save_processed_files`, from a
file.
:param str path: location to load the processed files dictionary from
:returns: **dict** of 'path (**str**) => last modified unix timestamp
(**int**)' mappings
:raises:
* **IOError** if unable to read the file
* **TypeError** if unable to parse the file's contents
"""
processed_files = {}
with open(path, 'rb') as input_file:
for line in input_file.readlines():
line = stem.util.str_tools._to_unicode(line.strip())
if not line:
continue # skip blank lines
if ' ' not in line:
raise TypeError('Malformed line: %s' % line)
path, timestamp = line.rsplit(' ', 1)
if not os.path.isabs(path):
raise TypeError("'%s' is not an absolute path" % path)
elif not timestamp.isdigit():
raise TypeError("'%s' is not an integer timestamp" % timestamp)
processed_files[path] = int(timestamp)
return processed_files
def save_processed_files(path, processed_files):
"""
Persists a dictionary of 'path => last modified timestamp' mappings (as
provided by the DescriptorReader's
:func:`~stem.descriptor.reader.DescriptorReader.get_processed_files` method)
so that they can be loaded later and applied to another
:class:`~stem.descriptor.reader.DescriptorReader`.
:param str path: location to save the processed files dictionary to
:param dict processed_files: 'path => last modified' mappings
:raises:
* **IOError** if unable to write to the file
* **TypeError** if processed_files is of the wrong type
"""
# makes the parent directory if it doesn't already exist
try:
path_dir = os.path.dirname(path)
if not os.path.exists(path_dir):
os.makedirs(path_dir)
except OSError as exc:
raise IOError(exc)
with open(path, 'w') as output_file:
for path, timestamp in list(processed_files.items()):
if not os.path.isabs(path):
raise TypeError('Only absolute paths are acceptable: %s' % path)
output_file.write('%s %i\n' % (path, timestamp))
class DescriptorReader(object):
"""
Iterator for the descriptor data on the local file system. This can process
text files, tarball archives (gzip or bzip2), or recurse directories.
By default this limits the number of descriptors that we'll read ahead before
waiting for our caller to fetch some of them. This is included to avoid
unbounded memory usage.
Our persistence_path argument is a convenient method to persist the listing
of files we have processed between runs, however it doesn't allow for error
handling. If you want that then use the
:func:`~stem.descriptor.reader.load_processed_files` and
:func:`~stem.descriptor.reader.save_processed_files` functions instead.
:param str,list target: path or list of paths for files or directories to be read from
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param bool follow_links: determines if we'll follow symlinks when traversing
directories (requires python 2.6)
:param int buffer_size: descriptors we'll buffer before waiting for some to
be read, this is unbounded if zero
:param str persistence_path: if set we will load and save processed file
listings from this path, errors are ignored
:param stem.descriptor.__init__.DocumentHandler document_handler: method in
which to parse :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
:param dict kwargs: additional arguments for the descriptor constructor
"""
def __init__(self, target, validate = False, follow_links = False, buffer_size = 100, persistence_path = None, document_handler = stem.descriptor.DocumentHandler.ENTRIES, **kwargs):
self._targets = [target] if stem.util._is_str(target) else target
# expand any relative paths we got
self._targets = list(map(os.path.abspath, self._targets))
self._validate = validate
self._follow_links = follow_links
self._persistence_path = persistence_path
self._document_handler = document_handler
self._kwargs = kwargs
self._read_listeners = []
self._skip_listeners = []
self._processed_files = {}
self._reader_thread = None
self._reader_thread_lock = threading.RLock()
self._iter_lock = threading.RLock()
self._iter_notice = threading.Event()
self._is_stopped = threading.Event()
self._is_stopped.set()
# Descriptors that we have read but not yet provided to the caller. A
# FINISHED entry is used by the reading thread to indicate the end.
self._unreturned_descriptors = queue.Queue(buffer_size)
if self._persistence_path:
try:
processed_files = load_processed_files(self._persistence_path)
self.set_processed_files(processed_files)
except:
pass
def get_processed_files(self):
"""
For each file that we have read descriptor data from this provides a
mapping of the form...
::
absolute path (str) => last modified unix timestamp (int)
This includes entries set through the
:func:`~stem.descriptor.reader.DescriptorReader.set_processed_files`
method. Each run resets this to only the files that were present during
that run.
:returns: **dict** with the absolute paths and unix timestamp for the last
modified times of the files we have processed
"""
# make sure that we only provide back absolute paths
return dict((os.path.abspath(k), v) for (k, v) in list(self._processed_files.items()))
def set_processed_files(self, processed_files):
"""
Sets the listing of the files we have processed. Most often this is used
with a newly created :class:`~stem.descriptor.reader.DescriptorReader` to
pre-populate the listing of descriptor files that we have seen.
:param dict processed_files: mapping of absolute paths (**str**) to unix
timestamps for the last modified time (**int**)
"""
self._processed_files = dict(processed_files)
def register_read_listener(self, listener):
"""
Registers a listener for when files are read. This is executed prior to
processing files. Listeners are expected to be of the form...
::
my_listener(path)
:param functor listener: functor to be notified when files are read
"""
self._read_listeners.append(listener)
def register_skip_listener(self, listener):
"""
Registers a listener for files that are skipped. This listener is expected
to be a functor of the form...
::
my_listener(path, exception)
:param functor listener: functor to be notified of files that are skipped
to read errors or because they couldn't be parsed as valid descriptor data
"""
self._skip_listeners.append(listener)
def get_buffered_descriptor_count(self):
"""
Provides the number of descriptors that are waiting to be iterated over.
This is limited to the buffer_size that we were constructed with.
:returns: **int** for the estimated number of currently enqueued
descriptors, this is not entirely reliable
"""
return self._unreturned_descriptors.qsize()
def start(self):
"""
Starts reading our descriptor files.
:raises: **ValueError** if we're already reading the descriptor files
"""
with self._reader_thread_lock:
if self._reader_thread:
raise ValueError('Already running, you need to call stop() first')
else:
self._is_stopped.clear()
self._reader_thread = threading.Thread(target = self._read_descriptor_files, name='Descriptor reader')
self._reader_thread.setDaemon(True)
self._reader_thread.start()
def stop(self):
"""
Stops further reading of descriptor files.
"""
with self._reader_thread_lock:
self._is_stopped.set()
self._iter_notice.set()
# clears our queue to unblock enqueue calls
try:
while True:
self._unreturned_descriptors.get_nowait()
except queue.Empty:
pass
self._reader_thread.join()
self._reader_thread = None
if self._persistence_path:
try:
processed_files = self.get_processed_files()
save_processed_files(self._persistence_path, processed_files)
except:
pass
def _read_descriptor_files(self):
new_processed_files = {}
remaining_files = list(self._targets)
while remaining_files and not self._is_stopped.is_set():
target = remaining_files.pop(0)
if not os.path.exists(target):
self._notify_skip_listeners(target, FileMissing())
continue
if os.path.isdir(target):
walker = os.walk(target, followlinks = self._follow_links)
self._handle_walker(walker, new_processed_files)
else:
self._handle_file(target, new_processed_files)
self._processed_files = new_processed_files
if not self._is_stopped.is_set():
self._unreturned_descriptors.put(FINISHED)
self._iter_notice.set()
def __iter__(self):
with self._iter_lock:
while not self._is_stopped.is_set():
try:
descriptor = self._unreturned_descriptors.get_nowait()
if descriptor == FINISHED:
break
else:
yield descriptor
except queue.Empty:
self._iter_notice.wait()
self._iter_notice.clear()
def _handle_walker(self, walker, new_processed_files):
for root, _, files in walker:
for filename in files:
self._handle_file(os.path.join(root, filename), new_processed_files)
# this can take a while if, say, we're including the root directory
if self._is_stopped.is_set():
return
def _handle_file(self, target, new_processed_files):
# This is a file. Register its last modified timestamp and check if
# it's a file that we should skip.
try:
last_modified = int(os.stat(target).st_mtime)
last_used = self._processed_files.get(target)
new_processed_files[target] = last_modified
except OSError as exc:
self._notify_skip_listeners(target, ReadFailed(exc))
return
if last_used and last_used >= last_modified:
self._notify_skip_listeners(target, AlreadyRead(last_modified, last_used))
return
# Block devices and such are never descriptors, and can cause us to block
# for quite a while so skipping anything that isn't a regular file.
if not os.path.isfile(target):
return
# The mimetypes module only checks the file extension. To actually
# check the content (like the 'file' command) we'd need something like
# pymagic (https://github.com/cloudburst/pymagic).
target_type = mimetypes.guess_type(target)
if target_type[0] in (None, 'text/plain'):
# either '.txt' or an unknown type
self._handle_descriptor_file(target, target_type)
elif stem.util.system.is_tarfile(target):
# handles gzip, bz2, and decompressed tarballs among others
self._handle_archive(target)
else:
self._notify_skip_listeners(target, UnrecognizedType(target_type))
def _handle_descriptor_file(self, target, mime_type):
try:
self._notify_read_listeners(target)
with open(target, 'rb') as target_file:
for desc in stem.descriptor.parse_file(target_file, validate = self._validate, document_handler = self._document_handler, **self._kwargs):
if self._is_stopped.is_set():
return
self._unreturned_descriptors.put(desc)
self._iter_notice.set()
except TypeError:
self._notify_skip_listeners(target, UnrecognizedType(mime_type))
except ValueError as exc:
self._notify_skip_listeners(target, ParsingFailure(exc))
except IOError as exc:
self._notify_skip_listeners(target, ReadFailed(exc))
def _handle_archive(self, target):
# TODO: When dropping python 2.6 support go back to using 'with' for
# tarfiles...
#
# http://bugs.python.org/issue7232
tar_file = None
try:
self._notify_read_listeners(target)
tar_file = tarfile.open(target)
for tar_entry in tar_file:
if tar_entry.isfile():
entry = tar_file.extractfile(tar_entry)
try:
for desc in stem.descriptor.parse_file(entry, validate = self._validate, document_handler = self._document_handler, **self._kwargs):
if self._is_stopped.is_set():
return
desc._set_path(os.path.abspath(target))
desc._set_archive_path(tar_entry.name)
self._unreturned_descriptors.put(desc)
self._iter_notice.set()
except TypeError as exc:
self._notify_skip_listeners(target, ParsingFailure(exc))
except ValueError as exc:
self._notify_skip_listeners(target, ParsingFailure(exc))
finally:
entry.close()
except IOError as exc:
self._notify_skip_listeners(target, ReadFailed(exc))
finally:
if tar_file:
tar_file.close()
def _notify_read_listeners(self, path):
for listener in self._read_listeners:
listener(path)
def _notify_skip_listeners(self, path, exception):
for listener in self._skip_listeners:
listener(path, exception)
def __enter__(self):
self.start()
return self
def __exit__(self, exit_type, value, traceback):
self.stop()
stem-1.7.1/stem/descriptor/networkstatus.py 0000664 0001750 0001750 00000213357 13411002341 021647 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for Tor network status documents. This supports both the v2 and v3
`dir-spec `_.
Documents can be obtained from a few sources...
* The 'cached-consensus' file in Tor's data directory.
* Archived descriptors provided by `CollecTor
`_.
* Directory authorities and mirrors via their DirPort.
... and contain the following sections...
* document header
* list of :class:`stem.descriptor.networkstatus.DirectoryAuthority`
* list of :class:`stem.descriptor.router_status_entry.RouterStatusEntry`
* document footer
**For a great graphical overview see** `Jordan Wright's chart describing the
anatomy of the consensus
`_.
Of these, the router status entry section can be quite large (on the order of
hundreds of kilobytes). As such we provide a couple of methods for reading
network status documents through :func:`~stem.descriptor.__init__.parse_file`.
For more information see :func:`~stem.descriptor.__init__.DocumentHandler`...
::
from stem.descriptor import parse_file, DocumentHandler
with open('.tor/cached-consensus', 'rb') as consensus_file:
# Processes the routers as we read them in. The routers refer to a document
# with an unset 'routers' attribute.
for router in parse_file(consensus_file, 'network-status-consensus-3 1.0', document_handler = DocumentHandler.ENTRIES):
print router.nickname
**Module Overview:**
::
NetworkStatusDocument - Network status document
|- NetworkStatusDocumentV2 - Version 2 network status document
|- NetworkStatusDocumentV3 - Version 3 network status document
+- BridgeNetworkStatusDocument - Version 3 network status document for bridges
KeyCertificate - Certificate used to authenticate an authority
DocumentSignature - Signature of a document by a directory authority
DirectoryAuthority - Directory authority as defined in a v3 network status document
"""
import collections
import io
import stem.descriptor.router_status_entry
import stem.util.str_tools
import stem.util.tor_tools
import stem.version
from stem.descriptor import (
PGP_BLOCK_END,
Descriptor,
DocumentHandler,
_descriptor_content,
_descriptor_components,
_read_until_keywords,
_value,
_parse_simple_line,
_parse_if_present,
_parse_timestamp_line,
_parse_forty_character_hex,
_parse_protocol_line,
_parse_key_block,
_mappings_for,
_random_nickname,
_random_fingerprint,
_random_ipv4_address,
_random_date,
_random_crypto_blob,
)
from stem.descriptor.router_status_entry import (
RouterStatusEntryV2,
RouterStatusEntryV3,
RouterStatusEntryMicroV3,
)
# Version 2 network status document fields, tuples of the form...
# (keyword, is_mandatory)
NETWORK_STATUS_V2_FIELDS = (
('network-status-version', True),
('dir-source', True),
('fingerprint', True),
('contact', True),
('dir-signing-key', True),
('client-versions', False),
('server-versions', False),
('published', True),
('dir-options', False),
('directory-signature', True),
)
# Network status document are either a 'vote' or 'consensus', with different
# mandatory fields for each. Both though require that their fields appear in a
# specific order. This is an ordered listing of the following...
#
# (field, in_votes, in_consensus, is_mandatory)
HEADER_STATUS_DOCUMENT_FIELDS = (
('network-status-version', True, True, True),
('vote-status', True, True, True),
('consensus-methods', True, False, False),
('consensus-method', False, True, False),
('published', True, False, True),
('valid-after', True, True, True),
('fresh-until', True, True, True),
('valid-until', True, True, True),
('voting-delay', True, True, True),
('client-versions', True, True, False),
('server-versions', True, True, False),
('package', True, True, False),
('known-flags', True, True, True),
('flag-thresholds', True, False, False),
('shared-rand-participate', True, False, False),
('shared-rand-commit', True, False, False),
('shared-rand-previous-value', True, True, False),
('shared-rand-current-value', True, True, False),
('bandwidth-file-headers', True, False, False),
('recommended-client-protocols', True, True, False),
('recommended-relay-protocols', True, True, False),
('required-client-protocols', True, True, False),
('required-relay-protocols', True, True, False),
('params', True, True, False),
)
FOOTER_STATUS_DOCUMENT_FIELDS = (
('directory-footer', True, True, False),
('bandwidth-weights', False, True, False),
('directory-signature', True, True, True),
)
AUTH_START = 'dir-source'
ROUTERS_START = 'r'
FOOTER_START = 'directory-footer'
V2_FOOTER_START = 'directory-signature'
DEFAULT_PARAMS = {
'bwweightscale': 10000,
'cbtdisabled': 0,
'cbtnummodes': 3,
'cbtrecentcount': 20,
'cbtmaxtimeouts': 18,
'cbtmincircs': 100,
'cbtquantile': 80,
'cbtclosequantile': 95,
'cbttestfreq': 60,
'cbtmintimeout': 2000,
'cbtinitialtimeout': 60000,
'cbtlearntimeout': 180,
'cbtmaxopencircs': 10,
'UseOptimisticData': 1,
'Support022HiddenServices': 1,
'usecreatefast': 1,
'max-consensuses-age-to-cache-for-diff': 72,
'try-diff-for-consensus-newer-than': 72,
'onion-key-rotation-days': 28,
'onion-key-grace-period-days': 7,
'hs_service_max_rdv_failures': 2,
'circ_max_cell_queue_size': 50000,
}
# KeyCertificate fields, tuple is of the form...
# (keyword, is_mandatory)
KEY_CERTIFICATE_PARAMS = (
('dir-key-certificate-version', True),
('dir-address', False),
('fingerprint', True),
('dir-identity-key', True),
('dir-key-published', True),
('dir-key-expires', True),
('dir-signing-key', True),
('dir-key-crosscert', False),
('dir-key-certification', True),
)
# all parameters are constrained to int32 range
MIN_PARAM, MAX_PARAM = -2147483648, 2147483647
PARAM_RANGE = {
'circwindow': (100, 1000),
'CircuitPriorityHalflifeMsec': (-1, MAX_PARAM),
'perconnbwrate': (-1, MAX_PARAM),
'perconnbwburst': (-1, MAX_PARAM),
'refuseunknownexits': (0, 1),
'bwweightscale': (1, MAX_PARAM),
'cbtdisabled': (0, 1),
'cbtnummodes': (1, 20),
'cbtrecentcount': (3, 1000),
'cbtmaxtimeouts': (3, 10000),
'cbtmincircs': (1, 10000),
'cbtquantile': (10, 99),
'cbtclosequantile': (MIN_PARAM, 99),
'cbttestfreq': (1, MAX_PARAM),
'cbtmintimeout': (500, MAX_PARAM),
'cbtlearntimeout': (10, 60000),
'cbtmaxopencircs': (0, 14),
'UseOptimisticData': (0, 1),
'Support022HiddenServices': (0, 1),
'usecreatefast': (0, 1),
'UseNTorHandshake': (0, 1),
'FastFlagMinThreshold': (4, MAX_PARAM),
'NumDirectoryGuards': (0, 10),
'NumEntryGuards': (1, 10),
'GuardLifetime': (2592000, 157766400), # min: 30 days, max: 1826 days
'NumNTorsPerTAP': (1, 100000),
'AllowNonearlyExtend': (0, 1),
'AuthDirNumSRVAgreements': (1, MAX_PARAM),
'max-consensuses-age-to-cache-for-diff': (0, 8192),
'try-diff-for-consensus-newer-than': (0, 8192),
'onion-key-rotation-days': (1, 90),
'onion-key-grace-period-days': (1, 90), # max is the highest onion-key-rotation-days
'hs_service_max_rdv_failures': (1, 10),
'circ_max_cell_queue_size': (1000, 4294967295),
}
class PackageVersion(collections.namedtuple('PackageVersion', ['name', 'version', 'url', 'digests'])):
"""
Latest recommended version of a package that's available.
:var str name: name of the package
:var str version: latest recommended version
:var str url: package's url
:var dict digests: mapping of digest types to their value
"""
class SharedRandomnessCommitment(collections.namedtuple('SharedRandomnessCommitment', ['version', 'algorithm', 'identity', 'commit', 'reveal'])):
"""
Directory authority's commitment for generating the next shared random value.
:var int version: shared randomness protocol version
:var str algorithm: hash algorithm used to make the commitment
:var str identity: authority's sha1 identity fingerprint
:var str commit: base64 encoded commitment hash to the shared random value
:var str reveal: base64 encoded commitment to the shared random value,
**None** of not provided
"""
def _parse_file(document_file, document_type = None, validate = False, is_microdescriptor = False, document_handler = DocumentHandler.ENTRIES, **kwargs):
"""
Parses a network status and iterates over the RouterStatusEntry in it. The
document that these instances reference have an empty 'routers' attribute to
allow for limited memory usage.
:param file document_file: file with network status document content
:param class document_type: NetworkStatusDocument subclass
:param bool validate: checks the validity of the document's contents if
**True**, skips these checks otherwise
:param bool is_microdescriptor: **True** if this is for a microdescriptor
consensus, **False** otherwise
:param stem.descriptor.__init__.DocumentHandler document_handler: method in
which to parse :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
:param dict kwargs: additional arguments for the descriptor constructor
:returns: :class:`stem.descriptor.networkstatus.NetworkStatusDocument` object
:raises:
* **ValueError** if the document_version is unrecognized or the contents is
malformed and validate is **True**
* **IOError** if the file can't be read
"""
# we can't properly default this since NetworkStatusDocumentV3 isn't defined yet
if document_type is None:
document_type = NetworkStatusDocumentV3
if document_type == NetworkStatusDocumentV2:
document_type, router_type = NetworkStatusDocumentV2, RouterStatusEntryV2
elif document_type == NetworkStatusDocumentV3:
router_type = RouterStatusEntryMicroV3 if is_microdescriptor else RouterStatusEntryV3
elif document_type == BridgeNetworkStatusDocument:
document_type, router_type = BridgeNetworkStatusDocument, RouterStatusEntryV2
else:
raise ValueError("Document type %i isn't recognized (only able to parse v2, v3, and bridge)" % document_type)
if document_handler == DocumentHandler.DOCUMENT:
yield document_type(document_file.read(), validate, **kwargs)
return
# getting the document without the routers section
header = _read_until_keywords((ROUTERS_START, FOOTER_START, V2_FOOTER_START), document_file)
if header and header[0].startswith(b'@type'):
header = header[1:]
routers_start = document_file.tell()
_read_until_keywords((FOOTER_START, V2_FOOTER_START), document_file, skip = True)
routers_end = document_file.tell()
footer = document_file.readlines()
document_content = bytes.join(b'', header + footer)
if document_handler == DocumentHandler.BARE_DOCUMENT:
yield document_type(document_content, validate, **kwargs)
elif document_handler == DocumentHandler.ENTRIES:
desc_iterator = stem.descriptor.router_status_entry._parse_file(
document_file,
validate,
entry_class = router_type,
entry_keyword = ROUTERS_START,
start_position = routers_start,
end_position = routers_end,
extra_args = (document_type(document_content, validate),),
**kwargs
)
for desc in desc_iterator:
yield desc
else:
raise ValueError('Unrecognized document_handler: %s' % document_handler)
def _parse_file_key_certs(certificate_file, validate = False):
"""
Parses a file containing one or more authority key certificates.
:param file certificate_file: file with key certificates
:param bool validate: checks the validity of the certificate's contents if
**True**, skips these checks otherwise
:returns: iterator for :class:`stem.descriptor.networkstatus.KeyCertificate`
instance in the file
:raises:
* **ValueError** if the key certificate content is invalid and validate is
**True**
* **IOError** if the file can't be read
"""
while True:
keycert_content = _read_until_keywords('dir-key-certification', certificate_file)
# we've reached the 'router-signature', now include the pgp style block
block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0]
keycert_content += _read_until_keywords(block_end_prefix, certificate_file, True)
if keycert_content:
yield stem.descriptor.networkstatus.KeyCertificate(bytes.join(b'', keycert_content), validate = validate)
else:
break # done parsing file
class NetworkStatusDocument(Descriptor):
"""
Common parent for network status documents.
"""
def _parse_version_line(keyword, attribute, expected_version):
def _parse(descriptor, entries):
value = _value(keyword, entries)
if not value.isdigit():
raise ValueError('Document has a non-numeric version: %s %s' % (keyword, value))
setattr(descriptor, attribute, int(value))
if int(value) != expected_version:
raise ValueError("Expected a version %i document, but got version '%s' instead" % (expected_version, value))
return _parse
def _parse_dir_source_line(descriptor, entries):
value = _value('dir-source', entries)
dir_source_comp = value.split()
if len(dir_source_comp) < 3:
raise ValueError("The 'dir-source' line of a v2 network status document must have three values: dir-source %s" % value)
if not dir_source_comp[0]:
# https://trac.torproject.org/7055
raise ValueError("Authority's hostname can't be blank: dir-source %s" % value)
elif not stem.util.connection.is_valid_ipv4_address(dir_source_comp[1]):
raise ValueError("Authority's address isn't a valid IPv4 address: %s" % dir_source_comp[1])
elif not stem.util.connection.is_valid_port(dir_source_comp[2], allow_zero = True):
raise ValueError("Authority's DirPort is invalid: %s" % dir_source_comp[2])
descriptor.hostname = dir_source_comp[0]
descriptor.address = dir_source_comp[1]
descriptor.dir_port = None if dir_source_comp[2] == '0' else int(dir_source_comp[2])
_parse_network_status_version_line = _parse_version_line('network-status-version', 'version', 2)
_parse_fingerprint_line = _parse_forty_character_hex('fingerprint', 'fingerprint')
_parse_contact_line = _parse_simple_line('contact', 'contact')
_parse_dir_signing_key_line = _parse_key_block('dir-signing-key', 'signing_key', 'RSA PUBLIC KEY')
_parse_client_versions_line = _parse_simple_line('client-versions', 'client_versions', func = lambda v: v.split(','))
_parse_server_versions_line = _parse_simple_line('server-versions', 'server_versions', func = lambda v: v.split(','))
_parse_published_line = _parse_timestamp_line('published', 'published')
_parse_dir_options_line = _parse_simple_line('dir-options', 'options', func = lambda v: v.split())
_parse_directory_signature_line = _parse_key_block('directory-signature', 'signature', 'SIGNATURE', value_attribute = 'signing_authority')
class NetworkStatusDocumentV2(NetworkStatusDocument):
"""
Version 2 network status document. These have been deprecated and are no
longer generated by Tor.
:var dict routers: fingerprints to :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2`
contained in the document
:var int version: **\*** document version
:var str hostname: **\*** hostname of the authority
:var str address: **\*** authority's IP address
:var int dir_port: **\*** authority's DirPort
:var str fingerprint: **\*** authority's fingerprint
:var str contact: **\*** authority's contact information
:var str signing_key: **\*** authority's public signing key
:var list client_versions: list of recommended client tor version strings
:var list server_versions: list of recommended server tor version strings
:var datetime published: **\*** time when the document was published
:var list options: **\*** list of things that this authority decides
:var str signing_authority: **\*** name of the authority signing the document
:var str signature: **\*** authority's signature for the document
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
ATTRIBUTES = {
'version': (None, _parse_network_status_version_line),
'hostname': (None, _parse_dir_source_line),
'address': (None, _parse_dir_source_line),
'dir_port': (None, _parse_dir_source_line),
'fingerprint': (None, _parse_fingerprint_line),
'contact': (None, _parse_contact_line),
'signing_key': (None, _parse_dir_signing_key_line),
'client_versions': ([], _parse_client_versions_line),
'server_versions': ([], _parse_server_versions_line),
'published': (None, _parse_published_line),
'options': ([], _parse_dir_options_line),
'signing_authority': (None, _parse_directory_signature_line),
'signatures': (None, _parse_directory_signature_line),
}
PARSER_FOR_LINE = {
'network-status-version': _parse_network_status_version_line,
'dir-source': _parse_dir_source_line,
'fingerprint': _parse_fingerprint_line,
'contact': _parse_contact_line,
'dir-signing-key': _parse_dir_signing_key_line,
'client-versions': _parse_client_versions_line,
'server-versions': _parse_server_versions_line,
'published': _parse_published_line,
'dir-options': _parse_dir_options_line,
'directory-signature': _parse_directory_signature_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('network-status-version', '2'),
('dir-source', '%s %s 80' % (_random_ipv4_address(), _random_ipv4_address())),
('fingerprint', _random_fingerprint()),
('contact', 'arma at mit dot edu'),
('published', _random_date()),
('dir-signing-key', _random_crypto_blob('RSA PUBLIC KEY')),
), (
('directory-signature', 'moria2' + _random_crypto_blob('SIGNATURE')),
))
def __init__(self, raw_content, validate = False):
super(NetworkStatusDocumentV2, self).__init__(raw_content, lazy_load = not validate)
# Splitting the document from the routers. Unlike v3 documents we're not
# bending over backwards on the validation by checking the field order or
# that header/footer attributes aren't in the wrong section. This is a
# deprecated descriptor type - patches welcome if you want those checks.
document_file = io.BytesIO(raw_content)
document_content = bytes.join(b'', _read_until_keywords((ROUTERS_START, V2_FOOTER_START), document_file))
router_iter = stem.descriptor.router_status_entry._parse_file(
document_file,
validate,
entry_class = RouterStatusEntryV2,
entry_keyword = ROUTERS_START,
section_end_keywords = (V2_FOOTER_START,),
extra_args = (self,),
)
self.routers = dict((desc.fingerprint, desc) for desc in router_iter)
entries = _descriptor_components(document_content + b'\n' + document_file.read(), validate)
if validate:
self._check_constraints(entries)
self._parse(entries, validate)
# 'client-versions' and 'server-versions' are only required if 'Versions'
# is among the options
if 'Versions' in self.options and not ('client-versions' in entries and 'server-versions' in entries):
raise ValueError("Version 2 network status documents must have a 'client-versions' and 'server-versions' when 'Versions' is listed among its dir-options:\n%s" % str(self))
else:
self._entries = entries
def _check_constraints(self, entries):
required_fields = [field for (field, is_mandatory) in NETWORK_STATUS_V2_FIELDS if is_mandatory]
for keyword in required_fields:
if keyword not in entries:
raise ValueError("Network status document (v2) must have a '%s' line:\n%s" % (keyword, str(self)))
# all recognized fields can only appear once
single_fields = [field for (field, _) in NETWORK_STATUS_V2_FIELDS]
for keyword in single_fields:
if keyword in entries and len(entries[keyword]) > 1:
raise ValueError("Network status document (v2) can only have a single '%s' line, got %i:\n%s" % (keyword, len(entries[keyword]), str(self)))
if 'network-status-version' != list(entries.keys())[0]:
raise ValueError("Network status document (v2) are expected to start with a 'network-status-version' line:\n%s" % str(self))
def _parse_header_network_status_version_line(descriptor, entries):
# "network-status-version" version
value = _value('network-status-version', entries)
if ' ' in value:
version, flavor = value.split(' ', 1)
else:
version, flavor = value, None
if not version.isdigit():
raise ValueError('Network status document has a non-numeric version: network-status-version %s' % value)
descriptor.version = int(version)
descriptor.version_flavor = flavor
descriptor.is_microdescriptor = flavor == 'microdesc'
if descriptor.version != 3:
raise ValueError("Expected a version 3 network status document, got version '%s' instead" % descriptor.version)
def _parse_header_vote_status_line(descriptor, entries):
# "vote-status" type
#
# The consensus-method and consensus-methods fields are optional since
# they weren't included in version 1. Setting a default now that we
# know if we're a vote or not.
value = _value('vote-status', entries)
if value == 'consensus':
descriptor.is_consensus, descriptor.is_vote = True, False
elif value == 'vote':
descriptor.is_consensus, descriptor.is_vote = False, True
else:
raise ValueError("A network status document's vote-status line can only be 'consensus' or 'vote', got '%s' instead" % value)
def _parse_header_consensus_methods_line(descriptor, entries):
# "consensus-methods" IntegerList
if descriptor._lazy_loading and descriptor.is_vote:
descriptor.consensus_methods = [1]
value, consensus_methods = _value('consensus-methods', entries), []
for entry in value.split(' '):
if not entry.isdigit():
raise ValueError("A network status document's consensus-methods must be a list of integer values, but was '%s'" % value)
consensus_methods.append(int(entry))
descriptor.consensus_methods = consensus_methods
def _parse_header_consensus_method_line(descriptor, entries):
# "consensus-method" Integer
if descriptor._lazy_loading and descriptor.is_consensus:
descriptor.consensus_method = 1
value = _value('consensus-method', entries)
if not value.isdigit():
raise ValueError("A network status document's consensus-method must be an integer, but was '%s'" % value)
descriptor.consensus_method = int(value)
def _parse_header_voting_delay_line(descriptor, entries):
# "voting-delay" VoteSeconds DistSeconds
value = _value('voting-delay', entries)
value_comp = value.split(' ')
if len(value_comp) == 2 and value_comp[0].isdigit() and value_comp[1].isdigit():
descriptor.vote_delay = int(value_comp[0])
descriptor.dist_delay = int(value_comp[1])
else:
raise ValueError("A network status document's 'voting-delay' line must be a pair of integer values, but was '%s'" % value)
def _parse_versions_line(keyword, attribute):
def _parse(descriptor, entries):
value, entries = _value(keyword, entries), []
for entry in value.split(','):
try:
entries.append(stem.version._get_version(entry))
except ValueError:
raise ValueError("Network status document's '%s' line had '%s', which isn't a parsable tor version: %s %s" % (keyword, entry, keyword, value))
setattr(descriptor, attribute, entries)
return _parse
def _parse_header_flag_thresholds_line(descriptor, entries):
# "flag-thresholds" SP THRESHOLDS
value, thresholds = _value('flag-thresholds', entries).strip(), {}
for key, val in _mappings_for('flag-thresholds', value):
try:
if val.endswith('%'):
# opting for string manipulation rather than just
# 'float(entry_value) / 100' because floating point arithmetic
# will lose precision
thresholds[key] = float('0.' + val[:-1].replace('.', '', 1))
elif '.' in val:
thresholds[key] = float(val)
else:
thresholds[key] = int(val)
except ValueError:
raise ValueError("Network status document's 'flag-thresholds' line is expected to have float values, got: flag-thresholds %s" % value)
descriptor.flag_thresholds = thresholds
def _parse_header_parameters_line(descriptor, entries):
# "params" [Parameters]
# Parameter ::= Keyword '=' Int32
# Int32 ::= A decimal integer between -2147483648 and 2147483647.
# Parameters ::= Parameter | Parameters SP Parameter
if descriptor._lazy_loading:
descriptor.params = dict(DEFAULT_PARAMS) if descriptor._default_params else {}
value = _value('params', entries)
if value != '':
descriptor.params = _parse_int_mappings('params', value, True)
descriptor._check_params_constraints()
def _parse_directory_footer_line(descriptor, entries):
# nothing to parse, simply checking that we don't have a value
value = _value('directory-footer', entries)
if value:
raise ValueError("A network status document's 'directory-footer' line shouldn't have any content, got 'directory-footer %s'" % value)
def _parse_footer_directory_signature_line(descriptor, entries):
signatures = []
for sig_value, block_type, block_contents in entries['directory-signature']:
if sig_value.count(' ') not in (1, 2):
raise ValueError("Authority signatures in a network status document are expected to be of the form 'directory-signature [METHOD] FINGERPRINT KEY_DIGEST', received: %s" % sig_value)
if not block_contents or block_type != 'SIGNATURE':
raise ValueError("'directory-signature' should be followed by a SIGNATURE block, but was a %s" % block_type)
if sig_value.count(' ') == 1:
method = 'sha1' # default if none was provided
fingerprint, key_digest = sig_value.split(' ', 1)
else:
method, fingerprint, key_digest = sig_value.split(' ', 2)
signatures.append(DocumentSignature(method, fingerprint, key_digest, block_contents, True))
descriptor.signatures = signatures
def _parse_package_line(descriptor, entries):
package_versions = []
for value, _, _ in entries['package']:
value_comp = value.split(' ', 3)
if len(value_comp) < 3:
raise ValueError("'package' must at least have a 'PackageName Version URL': %s" % value)
name, version, url = value_comp[:3]
digests = {}
if len(value_comp) == 4:
for key, val in _mappings_for('package', value_comp[3]):
digests[key] = val
package_versions.append(PackageVersion(name, version, url, digests))
descriptor.packages = package_versions
def _parsed_shared_rand_commit(descriptor, entries):
# "shared-rand-commit" Version AlgName Identity Commit [Reveal]
commitments = []
for value, _, _ in entries['shared-rand-commit']:
value_comp = value.split()
if len(value_comp) < 4:
raise ValueError("'shared-rand-commit' must at least have a 'Version AlgName Identity Commit': %s" % value)
version, algorithm, identity, commit = value_comp[:4]
reveal = value_comp[4] if len(value_comp) >= 5 else None
if not version.isdigit():
raise ValueError("The version on our 'shared-rand-commit' line wasn't an integer: %s" % value)
commitments.append(SharedRandomnessCommitment(int(version), algorithm, identity, commit, reveal))
descriptor.shared_randomness_commitments = commitments
def _parse_shared_rand_previous_value(descriptor, entries):
# "shared-rand-previous-value" NumReveals Value
value = _value('shared-rand-previous-value', entries)
value_comp = value.split(' ')
if len(value_comp) == 2 and value_comp[0].isdigit():
descriptor.shared_randomness_previous_reveal_count = int(value_comp[0])
descriptor.shared_randomness_previous_value = value_comp[1]
else:
raise ValueError("A network status document's 'shared-rand-previous-value' line must be a pair of values, the first an integer but was '%s'" % value)
def _parse_shared_rand_current_value(descriptor, entries):
# "shared-rand-current-value" NumReveals Value
value = _value('shared-rand-current-value', entries)
value_comp = value.split(' ')
if len(value_comp) == 2 and value_comp[0].isdigit():
descriptor.shared_randomness_current_reveal_count = int(value_comp[0])
descriptor.shared_randomness_current_value = value_comp[1]
else:
raise ValueError("A network status document's 'shared-rand-current-value' line must be a pair of values, the first an integer but was '%s'" % value)
def _parse_bandwidth_file_headers(descriptor, entries):
# "bandwidth-file-headers" KeyValues
# KeyValues ::= "" | KeyValue | KeyValues SP KeyValue
# KeyValue ::= Keyword '=' Value
# Value ::= ArgumentChar+
value = _value('bandwidth-file-headers', entries)
results = {}
for key, val in _mappings_for('bandwidth-file-headers', value):
results[key] = val
descriptor.bandwidth_file_headers = results
_parse_header_valid_after_line = _parse_timestamp_line('valid-after', 'valid_after')
_parse_header_fresh_until_line = _parse_timestamp_line('fresh-until', 'fresh_until')
_parse_header_valid_until_line = _parse_timestamp_line('valid-until', 'valid_until')
_parse_header_client_versions_line = _parse_versions_line('client-versions', 'client_versions')
_parse_header_server_versions_line = _parse_versions_line('server-versions', 'server_versions')
_parse_header_known_flags_line = _parse_simple_line('known-flags', 'known_flags', func = lambda v: [entry for entry in v.split(' ') if entry])
_parse_footer_bandwidth_weights_line = _parse_simple_line('bandwidth-weights', 'bandwidth_weights', func = lambda v: _parse_int_mappings('bandwidth-weights', v, True))
_parse_shared_rand_participate_line = _parse_if_present('shared-rand-participate', 'is_shared_randomness_participate')
_parse_recommended_client_protocols_line = _parse_protocol_line('recommended-client-protocols', 'recommended_client_protocols')
_parse_recommended_relay_protocols_line = _parse_protocol_line('recommended-relay-protocols', 'recommended_relay_protocols')
_parse_required_client_protocols_line = _parse_protocol_line('required-client-protocols', 'required_client_protocols')
_parse_required_relay_protocols_line = _parse_protocol_line('required-relay-protocols', 'required_relay_protocols')
class NetworkStatusDocumentV3(NetworkStatusDocument):
"""
Version 3 network status document. This could be either a vote or consensus.
:var dict routers: fingerprint to :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
mapping for relays contained in the document
:var int version: **\*** document version
:var str version_flavor: **\*** flavor associated with the document (such as 'microdesc')
:var bool is_consensus: **\*** **True** if the document is a consensus
:var bool is_vote: **\*** **True** if the document is a vote
:var bool is_microdescriptor: **\*** **True** if this is a microdescriptor
flavored document, **False** otherwise
:var datetime valid_after: **\*** time when the consensus became valid
:var datetime fresh_until: **\*** time when the next consensus should be produced
:var datetime valid_until: **\*** time when this consensus becomes obsolete
:var int vote_delay: **\*** number of seconds allowed for collecting votes
from all authorities
:var int dist_delay: **\*** number of seconds allowed for collecting
signatures from all authorities
:var list client_versions: list of recommended client tor versions
:var list server_versions: list of recommended server tor versions
:var list packages: **\*** list of :data:`~stem.descriptor.networkstatus.PackageVersion` entries
:var list known_flags: **\*** list of :data:`~stem.Flag` for the router's flags
:var dict params: **\*** dict of parameter(**str**) => value(**int**) mappings
:var list directory_authorities: **\*** list of :class:`~stem.descriptor.networkstatus.DirectoryAuthority`
objects that have generated this document
:var list signatures: **\*** :class:`~stem.descriptor.networkstatus.DocumentSignature`
of the authorities that have signed the document
**Consensus Attributes:**
:var int consensus_method: method version used to generate this consensus
:var dict bandwidth_weights: dict of weight(str) => value(int) mappings
:var int shared_randomness_current_reveal_count: number of commitments
used to generate the current shared random value
:var str shared_randomness_current_value: base64 encoded current shared
random value
:var int shared_randomness_previous_reveal_count: number of commitments
used to generate the last shared random value
:var str shared_randomness_previous_value: base64 encoded last shared random
value
**Vote Attributes:**
:var list consensus_methods: list of ints for the supported method versions
:var datetime published: time when the document was published
:var dict flag_thresholds: **\*** mapping of internal performance thresholds used while making the vote, values are **ints** or **floats**
:var dict recommended_client_protocols: recommended protocols for clients
:var dict recommended_relay_protocols: recommended protocols for relays
:var dict required_client_protocols: required protocols for clients
:var dict required_relay_protocols: required protocols for relays
:var dict bandwidth_file_headers: headers from the bandwidth authority that
generated this vote
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as None if undefined
.. versionchanged:: 1.4.0
Added the packages attribute.
.. versionchanged:: 1.5.0
Added the is_shared_randomness_participate, shared_randomness_commitments,
shared_randomness_previous_reveal_count,
shared_randomness_previous_value,
shared_randomness_current_reveal_count, and
shared_randomness_current_value attributes.
.. versionchanged:: 1.6.0
Added the recommended_client_protocols, recommended_relay_protocols,
required_client_protocols, and required_relay_protocols attributes.
.. versionchanged:: 1.6.0
The is_shared_randomness_participate and shared_randomness_commitments
were misdocumented in the tor spec and as such never set. They're now an
attribute of votes in the **directory_authorities**.
.. versionchanged:: 1.7.0
The shared_randomness_current_reveal_count and
shared_randomness_previous_reveal_count attributes were undocumented and
not provided properly if retrieved before their shred_randomness_*_value
counterpart.
.. versionchanged:: 1.7.0
Added the bandwidth_file_headers attributbute.
"""
ATTRIBUTES = {
'version': (None, _parse_header_network_status_version_line),
'version_flavor': (None, _parse_header_network_status_version_line),
'is_consensus': (True, _parse_header_vote_status_line),
'is_vote': (False, _parse_header_vote_status_line),
'is_microdescriptor': (False, _parse_header_network_status_version_line),
'consensus_methods': ([], _parse_header_consensus_methods_line),
'published': (None, _parse_published_line),
'consensus_method': (None, _parse_header_consensus_method_line),
'valid_after': (None, _parse_header_valid_after_line),
'fresh_until': (None, _parse_header_fresh_until_line),
'valid_until': (None, _parse_header_valid_until_line),
'vote_delay': (None, _parse_header_voting_delay_line),
'dist_delay': (None, _parse_header_voting_delay_line),
'client_versions': ([], _parse_header_client_versions_line),
'server_versions': ([], _parse_header_server_versions_line),
'packages': ([], _parse_package_line),
'known_flags': ([], _parse_header_known_flags_line),
'flag_thresholds': ({}, _parse_header_flag_thresholds_line),
'recommended_client_protocols': ({}, _parse_recommended_client_protocols_line),
'recommended_relay_protocols': ({}, _parse_recommended_relay_protocols_line),
'required_client_protocols': ({}, _parse_required_client_protocols_line),
'required_relay_protocols': ({}, _parse_required_relay_protocols_line),
'params': ({}, _parse_header_parameters_line),
'shared_randomness_previous_reveal_count': (None, _parse_shared_rand_previous_value),
'shared_randomness_previous_value': (None, _parse_shared_rand_previous_value),
'shared_randomness_current_reveal_count': (None, _parse_shared_rand_current_value),
'shared_randomness_current_value': (None, _parse_shared_rand_current_value),
'bandwidth_file_headers': ({}, _parse_bandwidth_file_headers),
'signatures': ([], _parse_footer_directory_signature_line),
'bandwidth_weights': ({}, _parse_footer_bandwidth_weights_line),
}
HEADER_PARSER_FOR_LINE = {
'network-status-version': _parse_header_network_status_version_line,
'vote-status': _parse_header_vote_status_line,
'consensus-methods': _parse_header_consensus_methods_line,
'consensus-method': _parse_header_consensus_method_line,
'published': _parse_published_line,
'valid-after': _parse_header_valid_after_line,
'fresh-until': _parse_header_fresh_until_line,
'valid-until': _parse_header_valid_until_line,
'voting-delay': _parse_header_voting_delay_line,
'client-versions': _parse_header_client_versions_line,
'server-versions': _parse_header_server_versions_line,
'package': _parse_package_line,
'known-flags': _parse_header_known_flags_line,
'flag-thresholds': _parse_header_flag_thresholds_line,
'recommended-client-protocols': _parse_recommended_client_protocols_line,
'recommended-relay-protocols': _parse_recommended_relay_protocols_line,
'required-client-protocols': _parse_required_client_protocols_line,
'required-relay-protocols': _parse_required_relay_protocols_line,
'params': _parse_header_parameters_line,
'shared-rand-previous-value': _parse_shared_rand_previous_value,
'shared-rand-current-value': _parse_shared_rand_current_value,
'bandwidth-file-headers': _parse_bandwidth_file_headers,
}
FOOTER_PARSER_FOR_LINE = {
'directory-footer': _parse_directory_footer_line,
'bandwidth-weights': _parse_footer_bandwidth_weights_line,
'directory-signature': _parse_footer_directory_signature_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False, authorities = None, routers = None):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
attr = {} if attr is None else dict(attr)
is_vote = attr.get('vote-status') == 'vote'
if is_vote:
extra_defaults = {'consensus-methods': '1 9', 'published': _random_date()}
else:
extra_defaults = {'consensus-method': '9'}
if is_vote and authorities is None:
authorities = [DirectoryAuthority.create(is_vote = is_vote)]
for k, v in extra_defaults.items():
if exclude and k in exclude:
continue # explicitly excluding this field
elif k not in attr:
attr[k] = v
desc_content = _descriptor_content(attr, exclude, (
('network-status-version', '3'),
('vote-status', 'consensus'),
('consensus-methods', None),
('consensus-method', None),
('published', None),
('valid-after', _random_date()),
('fresh-until', _random_date()),
('valid-until', _random_date()),
('voting-delay', '300 300'),
('client-versions', None),
('server-versions', None),
('package', None),
('known-flags', 'Authority BadExit Exit Fast Guard HSDir Named Running Stable Unnamed V2Dir Valid'),
('params', None),
), (
('directory-footer', ''),
('bandwidth-weights', None),
('directory-signature', '%s %s%s' % (_random_fingerprint(), _random_fingerprint(), _random_crypto_blob('SIGNATURE'))),
))
# inject the authorities and/or routers between the header and footer
if authorities:
if b'directory-footer' in desc_content:
footer_div = desc_content.find(b'\ndirectory-footer') + 1
elif b'directory-signature' in desc_content:
footer_div = desc_content.find(b'\ndirectory-signature') + 1
else:
if routers:
desc_content += b'\n'
footer_div = len(desc_content) + 1
authority_content = stem.util.str_tools._to_bytes('\n'.join([str(a) for a in authorities]) + '\n')
desc_content = desc_content[:footer_div] + authority_content + desc_content[footer_div:]
if routers:
if b'directory-footer' in desc_content:
footer_div = desc_content.find(b'\ndirectory-footer') + 1
elif b'directory-signature' in desc_content:
footer_div = desc_content.find(b'\ndirectory-signature') + 1
else:
if routers:
desc_content += b'\n'
footer_div = len(desc_content) + 1
router_content = stem.util.str_tools._to_bytes('\n'.join([str(r) for r in routers]) + '\n')
desc_content = desc_content[:footer_div] + router_content + desc_content[footer_div:]
return desc_content
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, authorities = None, routers = None):
return cls(cls.content(attr, exclude, sign, authorities, routers), validate = validate)
def __init__(self, raw_content, validate = False, default_params = True):
"""
Parse a v3 network status document.
:param str raw_content: raw network status document data
:param bool validate: **True** if the document is to be validated, **False** otherwise
:param bool default_params: includes defaults in our params dict, otherwise
it just contains values from the document
:raises: **ValueError** if the document is invalid
"""
super(NetworkStatusDocumentV3, self).__init__(raw_content, lazy_load = not validate)
document_file = io.BytesIO(raw_content)
# TODO: Tor misdocumented these as being in the header rather than the
# authority section. As such these have never been set but we need the
# attributes for stem 1.5 compatability. Drop these in 2.0.
self.is_shared_randomness_participate = False
self.shared_randomness_commitments = []
self._default_params = default_params
self._header(document_file, validate)
self.directory_authorities = tuple(stem.descriptor.router_status_entry._parse_file(
document_file,
validate,
entry_class = DirectoryAuthority,
entry_keyword = AUTH_START,
section_end_keywords = (ROUTERS_START, FOOTER_START, V2_FOOTER_START),
extra_args = (self.is_vote,),
))
if validate and self.is_vote and len(self.directory_authorities) != 1:
raise ValueError('Votes should only have an authority entry for the one that issued it, got %i: %s' % (len(self.directory_authorities), self.directory_authorities))
router_iter = stem.descriptor.router_status_entry._parse_file(
document_file,
validate,
entry_class = RouterStatusEntryMicroV3 if self.is_microdescriptor else RouterStatusEntryV3,
entry_keyword = ROUTERS_START,
section_end_keywords = (FOOTER_START, V2_FOOTER_START),
extra_args = (self,),
)
self.routers = dict((desc.fingerprint, desc) for desc in router_iter)
self._footer(document_file, validate)
def validate_signatures(self, key_certs):
"""
Validates we're properly signed by the signing certificates.
.. versionadded:: 1.6.0
:param list key_certs: :class:`~stem.descriptor.networkstatus.KeyCertificates`
to validate the consensus against
:raises: **ValueError** if an insufficient number of valid signatures are present.
"""
# sha1 hash of the body and header
local_digest = self._digest_for_content(b'network-status-version', b'directory-signature ')
valid_digests, total_digests = 0, 0
required_digests = len(self.signatures) / 2.0
signing_keys = dict([(cert.fingerprint, cert.signing_key) for cert in key_certs])
for sig in self.signatures:
if sig.identity not in signing_keys:
continue
signed_digest = self._digest_for_signature(signing_keys[sig.identity], sig.signature)
total_digests += 1
if signed_digest == local_digest:
valid_digests += 1
if valid_digests < required_digests:
raise ValueError('Network Status Document has %i valid signatures out of %i total, needed %i' % (valid_digests, total_digests, required_digests))
def get_unrecognized_lines(self):
if self._lazy_loading:
self._parse(self._header_entries, False, parser_for_line = self.HEADER_PARSER_FOR_LINE)
self._parse(self._footer_entries, False, parser_for_line = self.FOOTER_PARSER_FOR_LINE)
self._lazy_loading = False
return super(NetworkStatusDocumentV3, self).get_unrecognized_lines()
def meets_consensus_method(self, method):
"""
Checks if we meet the given consensus-method. This works for both votes and
consensuses, checking our 'consensus-method' and 'consensus-methods'
entries.
:param int method: consensus-method to check for
:returns: **True** if we meet the given consensus-method, and **False** otherwise
"""
if self.consensus_method is not None:
return self.consensus_method >= method
elif self.consensus_methods is not None:
return bool([x for x in self.consensus_methods if x >= method])
else:
return False # malformed document
def _compare(self, other, method):
if not isinstance(other, NetworkStatusDocumentV3):
return False
return method(str(self).strip(), str(other).strip())
def _header(self, document_file, validate):
content = bytes.join(b'', _read_until_keywords((AUTH_START, ROUTERS_START, FOOTER_START), document_file))
entries = _descriptor_components(content, validate)
header_fields = [attr[0] for attr in HEADER_STATUS_DOCUMENT_FIELDS]
if validate:
# all known header fields can only appear once except
for keyword, values in list(entries.items()):
if len(values) > 1 and keyword in header_fields and keyword != 'package' and keyword != 'shared-rand-commit':
raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
if self._default_params:
self.params = dict(DEFAULT_PARAMS)
self._parse(entries, validate, parser_for_line = self.HEADER_PARSER_FOR_LINE)
# should only appear in consensus-method 7 or later
if not self.meets_consensus_method(7) and 'params' in list(entries.keys()):
raise ValueError("A network status document's 'params' line should only appear in consensus-method 7 or later")
_check_for_missing_and_disallowed_fields(self, entries, HEADER_STATUS_DOCUMENT_FIELDS)
# default consensus_method and consensus_methods based on if we're a consensus or vote
if self.is_consensus and not self.consensus_method:
self.consensus_method = 1
elif self.is_vote and not self.consensus_methods:
self.consensus_methods = [1]
else:
self._header_entries = entries
self._entries.update(entries)
def _footer(self, document_file, validate):
entries = _descriptor_components(document_file.read(), validate)
footer_fields = [attr[0] for attr in FOOTER_STATUS_DOCUMENT_FIELDS]
if validate:
for keyword, values in list(entries.items()):
# all known footer fields can only appear once except...
# * 'directory-signature' in a consensus
if len(values) > 1 and keyword in footer_fields:
if not (keyword == 'directory-signature' and self.is_consensus):
raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
self._parse(entries, validate, parser_for_line = self.FOOTER_PARSER_FOR_LINE)
# Check that the footer has the right initial line. Prior to consensus
# method 9 it's a 'directory-signature' and after that footers start with
# 'directory-footer'.
if entries:
if self.meets_consensus_method(9):
if list(entries.keys())[0] != 'directory-footer':
raise ValueError("Network status document's footer should start with a 'directory-footer' line in consensus-method 9 or later")
else:
if list(entries.keys())[0] != 'directory-signature':
raise ValueError("Network status document's footer should start with a 'directory-signature' line prior to consensus-method 9")
_check_for_missing_and_disallowed_fields(self, entries, FOOTER_STATUS_DOCUMENT_FIELDS)
else:
self._footer_entries = entries
self._entries.update(entries)
def _check_params_constraints(self):
"""
Checks that the params we know about are within their documented ranges.
"""
for key, value in self.params.items():
minimum, maximum = PARAM_RANGE.get(key, (MIN_PARAM, MAX_PARAM))
# there's a few dynamic parameter ranges
if key == 'cbtclosequantile':
minimum = self.params.get('cbtquantile', minimum)
elif key == 'cbtinitialtimeout':
minimum = self.params.get('cbtmintimeout', minimum)
if value < minimum or value > maximum:
raise ValueError("'%s' value on the params line must be in the range of %i - %i, was %i" % (key, minimum, maximum, value))
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def _check_for_missing_and_disallowed_fields(document, entries, fields):
"""
Checks that we have mandatory fields for our type, and that we don't have
any fields exclusive to the other (ie, no vote-only fields appear in a
consensus or vice versa).
:param NetworkStatusDocumentV3 document: network status document
:param dict entries: ordered keyword/value mappings of the header or footer
:param list fields: expected field attributes (either
**HEADER_STATUS_DOCUMENT_FIELDS** or **FOOTER_STATUS_DOCUMENT_FIELDS**)
:raises: **ValueError** if we're missing mandatory fields or have fields we shouldn't
"""
missing_fields, disallowed_fields = [], []
for field, in_votes, in_consensus, mandatory in fields:
if mandatory and ((document.is_consensus and in_consensus) or (document.is_vote and in_votes)):
# mandatory field, check that we have it
if field not in entries.keys():
missing_fields.append(field)
elif (document.is_consensus and not in_consensus) or (document.is_vote and not in_votes):
# field we shouldn't have, check that we don't
if field in entries.keys():
disallowed_fields.append(field)
if missing_fields:
raise ValueError('Network status document is missing mandatory field: %s' % ', '.join(missing_fields))
if disallowed_fields:
raise ValueError("Network status document has fields that shouldn't appear in this document type or version: %s" % ', '.join(disallowed_fields))
def _parse_int_mappings(keyword, value, validate):
# Parse a series of 'key=value' entries, checking the following:
# - values are integers
# - keys are sorted in lexical order
results, seen_keys = {}, []
error_template = "Unable to parse network status document's '%s' line (%%s): %s'" % (keyword, value)
for key, val in _mappings_for(keyword, value):
if validate:
# parameters should be in ascending order by their key
for prior_key in seen_keys:
if prior_key > key:
raise ValueError(error_template % 'parameters must be sorted by their key')
try:
# the int() function accepts things like '+123', but we don't want to
if val.startswith('+'):
raise ValueError()
results[key] = int(val)
except ValueError:
raise ValueError(error_template % ("'%s' is a non-numeric value" % val))
seen_keys.append(key)
return results
def _parse_dirauth_source_line(descriptor, entries):
# "dir-source" nickname identity address IP dirport orport
value = _value('dir-source', entries)
dir_source_comp = value.split(' ')
if len(dir_source_comp) < 6:
raise ValueError("Authority entry's 'dir-source' line must have six values: dir-source %s" % value)
if not stem.util.tor_tools.is_valid_nickname(dir_source_comp[0].rstrip('-legacy')):
raise ValueError("Authority's nickname is invalid: %s" % dir_source_comp[0])
elif not stem.util.tor_tools.is_valid_fingerprint(dir_source_comp[1]):
raise ValueError("Authority's v3ident is invalid: %s" % dir_source_comp[1])
elif not dir_source_comp[2]:
# https://trac.torproject.org/7055
raise ValueError("Authority's hostname can't be blank: dir-source %s" % value)
elif not stem.util.connection.is_valid_ipv4_address(dir_source_comp[3]):
raise ValueError("Authority's address isn't a valid IPv4 address: %s" % dir_source_comp[3])
elif not stem.util.connection.is_valid_port(dir_source_comp[4], allow_zero = True):
raise ValueError("Authority's DirPort is invalid: %s" % dir_source_comp[4])
elif not stem.util.connection.is_valid_port(dir_source_comp[5]):
raise ValueError("Authority's ORPort is invalid: %s" % dir_source_comp[5])
descriptor.nickname = dir_source_comp[0]
descriptor.v3ident = dir_source_comp[1]
descriptor.hostname = dir_source_comp[2]
descriptor.address = dir_source_comp[3]
descriptor.dir_port = None if dir_source_comp[4] == '0' else int(dir_source_comp[4])
descriptor.or_port = int(dir_source_comp[5])
descriptor.is_legacy = descriptor.nickname.endswith('-legacy')
_parse_legacy_dir_key_line = _parse_forty_character_hex('legacy-dir-key', 'legacy_dir_key')
_parse_vote_digest_line = _parse_forty_character_hex('vote-digest', 'vote_digest')
class DirectoryAuthority(Descriptor):
"""
Directory authority information obtained from a v3 network status document.
Authorities can optionally use a legacy format. These are no longer found in
practice, but have the following differences...
* The authority's nickname ends with '-legacy'.
* There's no **contact** or **vote_digest** attribute.
:var str nickname: **\*** authority's nickname
:var str v3ident: **\*** identity key fingerprint used to sign votes and consensus
:var str hostname: **\*** hostname of the authority
:var str address: **\*** authority's IP address
:var int dir_port: **\*** authority's DirPort
:var int or_port: **\*** authority's ORPort
:var bool is_legacy: **\*** if the authority's using the legacy format
:var str contact: contact information, this is included if is_legacy is **False**
**Consensus Attributes:**
:var str vote_digest: digest of the authority that contributed to the consensus, this is included if is_legacy is **False**
**Vote Attributes:**
:var str legacy_dir_key: fingerprint of and obsolete identity key
:var stem.descriptor.networkstatus.KeyCertificate key_certificate: **\***
authority's key certificate
:var bool is_shared_randomness_participate: **\*** **True** if this authority
participates in establishing a shared random value, **False** otherwise
:var list shared_randomness_commitments: **\*** list of
:data:`~stem.descriptor.networkstatus.SharedRandomnessCommitment` entries
:var int shared_randomness_previous_reveal_count: number of commitments
used to generate the last shared random value
:var str shared_randomness_previous_value: base64 encoded last shared random
value
:var int shared_randomness_current_reveal_count: number of commitments
used to generate the current shared random value
:var str shared_randomness_current_value: base64 encoded current shared
random value
**\*** mandatory attribute
.. versionchanged:: 1.4.0
Renamed our 'fingerprint' attribute to 'v3ident' (prior attribute exists
for backward compatability, but is deprecated).
.. versionchanged:: 1.6.0
Added the is_shared_randomness_participate, shared_randomness_commitments,
shared_randomness_previous_reveal_count,
shared_randomness_previous_value,
shared_randomness_current_reveal_count, and
shared_randomness_current_value attributes.
"""
ATTRIBUTES = {
'nickname': (None, _parse_dirauth_source_line),
'v3ident': (None, _parse_dirauth_source_line),
'hostname': (None, _parse_dirauth_source_line),
'address': (None, _parse_dirauth_source_line),
'dir_port': (None, _parse_dirauth_source_line),
'or_port': (None, _parse_dirauth_source_line),
'is_legacy': (False, _parse_dirauth_source_line),
'contact': (None, _parse_contact_line),
'vote_digest': (None, _parse_vote_digest_line),
'legacy_dir_key': (None, _parse_legacy_dir_key_line),
'is_shared_randomness_participate': (False, _parse_shared_rand_participate_line),
'shared_randomness_commitments': ([], _parsed_shared_rand_commit),
'shared_randomness_previous_reveal_count': (None, _parse_shared_rand_previous_value),
'shared_randomness_previous_value': (None, _parse_shared_rand_previous_value),
'shared_randomness_current_reveal_count': (None, _parse_shared_rand_current_value),
'shared_randomness_current_value': (None, _parse_shared_rand_current_value),
}
PARSER_FOR_LINE = {
'dir-source': _parse_dirauth_source_line,
'contact': _parse_contact_line,
'legacy-dir-key': _parse_legacy_dir_key_line,
'vote-digest': _parse_vote_digest_line,
'shared-rand-participate': _parse_shared_rand_participate_line,
'shared-rand-commit': _parsed_shared_rand_commit,
'shared-rand-previous-value': _parse_shared_rand_previous_value,
'shared-rand-current-value': _parse_shared_rand_current_value,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False, is_vote = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
attr = {} if attr is None else dict(attr)
# include mandatory 'vote-digest' if a consensus
if not is_vote and not ('vote-digest' in attr or (exclude and 'vote-digest' in exclude)):
attr['vote-digest'] = _random_fingerprint()
content = _descriptor_content(attr, exclude, (
('dir-source', '%s %s no.place.com %s 9030 9090' % (_random_nickname(), _random_fingerprint(), _random_ipv4_address())),
('contact', 'Mike Perry '),
))
if is_vote:
content += b'\n' + KeyCertificate.content()
return content
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, is_vote = False):
return cls(cls.content(attr, exclude, sign, is_vote), validate = validate, is_vote = is_vote)
def __init__(self, raw_content, validate = False, is_vote = False):
"""
Parse a directory authority entry in a v3 network status document.
:param str raw_content: raw directory authority entry information
:param bool validate: checks the validity of the content if True, skips
these checks otherwise
:param bool is_vote: True if this is for a vote, False if it's for a consensus
:raises: ValueError if the descriptor data is invalid
"""
super(DirectoryAuthority, self).__init__(raw_content, lazy_load = not validate)
content = stem.util.str_tools._to_unicode(raw_content)
# separate the directory authority entry from its key certificate
key_div = content.find('\ndir-key-certificate-version')
if key_div != -1:
self.key_certificate = KeyCertificate(content[key_div + 1:], validate)
content = content[:key_div + 1]
else:
self.key_certificate = None
entries = _descriptor_components(content, validate)
if validate and 'dir-source' != list(entries.keys())[0]:
raise ValueError("Authority entries are expected to start with a 'dir-source' line:\n%s" % (content))
# check that we have mandatory fields
if validate:
is_legacy, dir_source_entry = False, entries.get('dir-source')
if dir_source_entry:
is_legacy = dir_source_entry[0][0].split()[0].endswith('-legacy')
required_fields, excluded_fields = ['dir-source'], []
if not is_legacy:
required_fields += ['contact']
if is_vote:
if not self.key_certificate:
raise ValueError('Authority votes must have a key certificate:\n%s' % content)
excluded_fields += ['vote-digest']
elif not is_vote:
if self.key_certificate:
raise ValueError("Authority consensus entries shouldn't have a key certificate:\n%s" % content)
if not is_legacy:
required_fields += ['vote-digest']
excluded_fields += ['legacy-dir-key']
for keyword in required_fields:
if keyword not in entries:
raise ValueError("Authority entries must have a '%s' line:\n%s" % (keyword, content))
for keyword in entries:
if keyword in excluded_fields:
type_label = 'votes' if is_vote else 'consensus entries'
raise ValueError("Authority %s shouldn't have a '%s' line:\n%s" % (type_label, keyword, content))
# all known attributes can only appear at most once
for keyword, values in list(entries.items()):
if len(values) > 1 and keyword in ('dir-source', 'contact', 'legacy-dir-key', 'vote-digest'):
raise ValueError("Authority entries can only have a single '%s' line, got %i:\n%s" % (keyword, len(values), content))
self._parse(entries, validate)
else:
self._entries = entries
# TODO: Due to a bug we had a 'fingerprint' rather than 'v3ident' attribute
# for a long while. Keeping this around for backward compatability, but
# this will be dropped in stem's 2.0 release.
self.fingerprint = self.v3ident
def _compare(self, other, method):
if not isinstance(other, DirectoryAuthority):
return False
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def _parse_dir_address_line(descriptor, entries):
# "dir-address" IPPort
value = _value('dir-address', entries)
if ':' not in value:
raise ValueError("Key certificate's 'dir-address' is expected to be of the form ADDRESS:PORT: dir-address %s" % value)
address, dirport = value.rsplit(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address):
raise ValueError("Key certificate's address isn't a valid IPv4 address: dir-address %s" % value)
elif not stem.util.connection.is_valid_port(dirport):
raise ValueError("Key certificate's dirport is invalid: dir-address %s" % value)
descriptor.address = address
descriptor.dir_port = int(dirport)
_parse_dir_key_certificate_version_line = _parse_version_line('dir-key-certificate-version', 'version', 3)
_parse_dir_key_published_line = _parse_timestamp_line('dir-key-published', 'published')
_parse_dir_key_expires_line = _parse_timestamp_line('dir-key-expires', 'expires')
_parse_identity_key_line = _parse_key_block('dir-identity-key', 'identity_key', 'RSA PUBLIC KEY')
_parse_signing_key_line = _parse_key_block('dir-signing-key', 'signing_key', 'RSA PUBLIC KEY')
_parse_dir_key_crosscert_line = _parse_key_block('dir-key-crosscert', 'crosscert', 'ID SIGNATURE')
_parse_dir_key_certification_line = _parse_key_block('dir-key-certification', 'certification', 'SIGNATURE')
class KeyCertificate(Descriptor):
"""
Directory key certificate for a v3 network status document.
:var int version: **\*** version of the key certificate
:var str address: authority's IP address
:var int dir_port: authority's DirPort
:var str fingerprint: **\*** authority's fingerprint
:var str identity_key: **\*** long term authority identity key
:var datetime published: **\*** time when this key was generated
:var datetime expires: **\*** time after which this key becomes invalid
:var str signing_key: **\*** directory server's public signing key
:var str crosscert: signature made using certificate's signing key
:var str certification: **\*** signature of this key certificate signed with
the identity key
**\*** mandatory attribute
"""
ATTRIBUTES = {
'version': (None, _parse_dir_key_certificate_version_line),
'address': (None, _parse_dir_address_line),
'dir_port': (None, _parse_dir_address_line),
'fingerprint': (None, _parse_fingerprint_line),
'identity_key': (None, _parse_identity_key_line),
'published': (None, _parse_dir_key_published_line),
'expires': (None, _parse_dir_key_expires_line),
'signing_key': (None, _parse_signing_key_line),
'crosscert': (None, _parse_dir_key_crosscert_line),
'certification': (None, _parse_dir_key_certification_line),
}
PARSER_FOR_LINE = {
'dir-key-certificate-version': _parse_dir_key_certificate_version_line,
'dir-address': _parse_dir_address_line,
'fingerprint': _parse_fingerprint_line,
'dir-key-published': _parse_dir_key_published_line,
'dir-key-expires': _parse_dir_key_expires_line,
'dir-identity-key': _parse_identity_key_line,
'dir-signing-key': _parse_signing_key_line,
'dir-key-crosscert': _parse_dir_key_crosscert_line,
'dir-key-certification': _parse_dir_key_certification_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('dir-key-certificate-version', '3'),
('fingerprint', _random_fingerprint()),
('dir-key-published', _random_date()),
('dir-key-expires', _random_date()),
('dir-identity-key', _random_crypto_blob('RSA PUBLIC KEY')),
('dir-signing-key', _random_crypto_blob('RSA PUBLIC KEY')),
), (
('dir-key-certification', _random_crypto_blob('SIGNATURE')),
))
def __init__(self, raw_content, validate = False):
super(KeyCertificate, self).__init__(raw_content, lazy_load = not validate)
entries = _descriptor_components(raw_content, validate)
if validate:
if 'dir-key-certificate-version' != list(entries.keys())[0]:
raise ValueError("Key certificates must start with a 'dir-key-certificate-version' line:\n%s" % (raw_content))
elif 'dir-key-certification' != list(entries.keys())[-1]:
raise ValueError("Key certificates must end with a 'dir-key-certification' line:\n%s" % (raw_content))
# check that we have mandatory fields and that our known fields only
# appear once
for keyword, is_mandatory in KEY_CERTIFICATE_PARAMS:
if is_mandatory and keyword not in entries:
raise ValueError("Key certificates must have a '%s' line:\n%s" % (keyword, raw_content))
entry_count = len(entries.get(keyword, []))
if entry_count > 1:
raise ValueError("Key certificates can only have a single '%s' line, got %i:\n%s" % (keyword, entry_count, raw_content))
self._parse(entries, validate)
else:
self._entries = entries
def _compare(self, other, method):
if not isinstance(other, KeyCertificate):
return False
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
class DocumentSignature(object):
"""
Directory signature of a v3 network status document.
:var str method: algorithm used to make the signature
:var str identity: fingerprint of the authority that made the signature
:var str key_digest: digest of the signing key
:var str signature: document signature
:param bool validate: checks validity if **True**
:raises: **ValueError** if a validity check fails
"""
def __init__(self, method, identity, key_digest, signature, validate = False):
# Checking that these attributes are valid. Technically the key
# digest isn't a fingerprint, but it has the same characteristics.
if validate:
if not stem.util.tor_tools.is_valid_fingerprint(identity):
raise ValueError('Malformed fingerprint (%s) in the document signature' % identity)
if not stem.util.tor_tools.is_valid_fingerprint(key_digest):
raise ValueError('Malformed key digest (%s) in the document signature' % key_digest)
self.method = method
self.identity = identity
self.key_digest = key_digest
self.signature = signature
def _compare(self, other, method):
if not isinstance(other, DocumentSignature):
return False
for attr in ('method', 'identity', 'key_digest', 'signature'):
if getattr(self, attr) != getattr(other, attr):
return method(getattr(self, attr), getattr(other, attr))
return method(True, True) # we're equal
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
class BridgeNetworkStatusDocument(NetworkStatusDocument):
"""
Network status document containing bridges. This is only available through
the metrics site.
:var dict routers: fingerprint to :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
mapping for relays contained in the document
:var datetime published: time when the document was published
"""
def __init__(self, raw_content, validate = False):
super(BridgeNetworkStatusDocument, self).__init__(raw_content)
self.published = None
document_file = io.BytesIO(raw_content)
published_line = stem.util.str_tools._to_unicode(document_file.readline())
if published_line.startswith('published '):
published_line = published_line.split(' ', 1)[1].strip()
try:
self.published = stem.util.str_tools._parse_timestamp(published_line)
except ValueError:
if validate:
raise ValueError("Bridge network status document's 'published' time wasn't parsable: %s" % published_line)
elif validate:
raise ValueError("Bridge network status documents must start with a 'published' line:\n%s" % stem.util.str_tools._to_unicode(raw_content))
router_iter = stem.descriptor.router_status_entry._parse_file(
document_file,
validate,
entry_class = RouterStatusEntryV2,
extra_args = (self,),
)
self.routers = dict((desc.fingerprint, desc) for desc in router_iter)
stem-1.7.1/stem/descriptor/extrainfo_descriptor.py 0000664 0001750 0001750 00000124347 13411002341 023147 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for Tor extra-info descriptors. These are published by relays whenever
their server descriptor is published and have a similar format. However, unlike
server descriptors these don't contain information that Tor clients require to
function and as such aren't fetched by default.
Defined in section 2.1.2 of the `dir-spec
`_,
extra-info descriptors contain interesting but non-vital information such as
usage statistics. Tor clients cannot request these documents for bridges.
Extra-info descriptors are available from a few sources...
* If you have 'DownloadExtraInfo 1' in your torrc...
* control port via 'GETINFO extra-info/digest/\*' queries
* the 'cached-extrainfo' file in tor's data directory
* Archived descriptors provided by `CollecTor `_.
* Directory authorities and mirrors via their DirPort.
**Module Overview:**
::
ExtraInfoDescriptor - Tor extra-info descriptor.
|- RelayExtraInfoDescriptor - Extra-info descriptor for a relay.
|- BridgeExtraInfoDescriptor - Extra-info descriptor for a bridge.
|
+- digest - calculates the upper-case hex digest value for our content
.. data:: DirResponse (enum)
Enumeration for known statuses for ExtraInfoDescriptor's dir_*_responses.
=================== ===========
DirResponse Description
=================== ===========
**OK** network status requests that were answered
**NOT_ENOUGH_SIGS** network status wasn't signed by enough authorities
**UNAVAILABLE** requested network status was unavailable
**NOT_FOUND** requested network status was not found
**NOT_MODIFIED** network status unmodified since If-Modified-Since time
**BUSY** directory was busy
=================== ===========
.. data:: DirStat (enum)
Enumeration for known stats for ExtraInfoDescriptor's dir_*_direct_dl and
dir_*_tunneled_dl.
===================== ===========
DirStat Description
===================== ===========
**COMPLETE** requests that completed successfully
**TIMEOUT** requests that didn't complete within a ten minute timeout
**RUNNING** requests still in process when measurement's taken
**MIN** smallest rate at which a descriptor was downloaded in B/s
**MAX** largest rate at which a descriptor was downloaded in B/s
**D1-4** and **D6-9** rate of the slowest x/10 download rates in B/s
**Q1** and **Q3** rate of the slowest and fastest quarter download rates in B/s
**MD** median download rate in B/s
===================== ===========
"""
import functools
import hashlib
import re
import stem.prereq
import stem.util.connection
import stem.util.enum
import stem.util.str_tools
from stem.descriptor import (
PGP_BLOCK_END,
Descriptor,
create_signing_key,
_descriptor_content,
_read_until_keywords,
_descriptor_components,
_value,
_values,
_parse_simple_line,
_parse_timestamp_line,
_parse_forty_character_hex,
_parse_key_block,
_mappings_for,
_append_router_signature,
_random_nickname,
_random_fingerprint,
_random_date,
_random_crypto_blob,
)
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
# known statuses for dirreq-v2-resp and dirreq-v3-resp...
DirResponse = stem.util.enum.Enum(
('OK', 'ok'),
('NOT_ENOUGH_SIGS', 'not-enough-sigs'),
('UNAVAILABLE', 'unavailable'),
('NOT_FOUND', 'not-found'),
('NOT_MODIFIED', 'not-modified'),
('BUSY', 'busy'),
)
# known stats for dirreq-v2/3-direct-dl and dirreq-v2/3-tunneled-dl...
dir_stats = ['complete', 'timeout', 'running', 'min', 'max', 'q1', 'q3', 'md']
dir_stats += ['d%i' % i for i in range(1, 5)]
dir_stats += ['d%i' % i for i in range(6, 10)]
DirStat = stem.util.enum.Enum(*[(stat.upper(), stat) for stat in dir_stats])
# relay descriptors must have exactly one of the following
REQUIRED_FIELDS = (
'extra-info',
'published',
'router-signature',
)
# optional entries that can appear at most once
SINGLE_FIELDS = (
'read-history',
'write-history',
'geoip-db-digest',
'geoip6-db-digest',
'bridge-stats-end',
'bridge-ips',
'dirreq-stats-end',
'dirreq-v2-ips',
'dirreq-v3-ips',
'dirreq-v2-reqs',
'dirreq-v3-reqs',
'dirreq-v2-share',
'dirreq-v3-share',
'dirreq-v2-resp',
'dirreq-v3-resp',
'dirreq-v2-direct-dl',
'dirreq-v3-direct-dl',
'dirreq-v2-tunneled-dl',
'dirreq-v3-tunneled-dl',
'dirreq-read-history',
'dirreq-write-history',
'entry-stats-end',
'entry-ips',
'cell-stats-end',
'cell-processed-cells',
'cell-queued-cells',
'cell-time-in-queue',
'cell-circuits-per-decile',
'conn-bi-direct',
'exit-stats-end',
'exit-kibibytes-written',
'exit-kibibytes-read',
'exit-streams-opened',
)
_timestamp_re = re.compile('^(.*) \(([0-9]+) s\)( .*)?$')
_locale_re = re.compile('^[a-zA-Z0-9\?]{2}$')
def _parse_file(descriptor_file, is_bridge = False, validate = False, **kwargs):
"""
Iterates over the extra-info descriptors in a file.
:param file descriptor_file: file with descriptor content
:param bool is_bridge: parses the file as being a bridge descriptor
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param dict kwargs: additional arguments for the descriptor constructor
:returns: iterator for :class:`~stem.descriptor.extrainfo_descriptor.ExtraInfoDescriptor`
instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is **True**
* **IOError** if the file can't be read
"""
while True:
if not is_bridge:
extrainfo_content = _read_until_keywords('router-signature', descriptor_file)
# we've reached the 'router-signature', now include the pgp style block
block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0]
extrainfo_content += _read_until_keywords(block_end_prefix, descriptor_file, True)
else:
extrainfo_content = _read_until_keywords('router-digest', descriptor_file, True)
if extrainfo_content:
if extrainfo_content[0].startswith(b'@type'):
extrainfo_content = extrainfo_content[1:]
if is_bridge:
yield BridgeExtraInfoDescriptor(bytes.join(b'', extrainfo_content), validate, **kwargs)
else:
yield RelayExtraInfoDescriptor(bytes.join(b'', extrainfo_content), validate, **kwargs)
else:
break # done parsing file
def _parse_timestamp_and_interval(keyword, content):
"""
Parses a 'YYYY-MM-DD HH:MM:SS (NSEC s) *' entry.
:param str keyword: line's keyword
:param str content: line content to be parsed
:returns: **tuple** of the form (timestamp (**datetime**), interval
(**int**), remaining content (**str**))
:raises: **ValueError** if the content is malformed
"""
line = '%s %s' % (keyword, content)
content_match = _timestamp_re.match(content)
if not content_match:
raise ValueError('Malformed %s line: %s' % (keyword, line))
timestamp_str, interval, remainder = content_match.groups()
if remainder:
remainder = remainder[1:] # remove leading space
if not interval.isdigit():
raise ValueError("%s line's interval wasn't a number: %s" % (keyword, line))
try:
timestamp = stem.util.str_tools._parse_timestamp(timestamp_str)
return timestamp, int(interval), remainder
except ValueError:
raise ValueError("%s line's timestamp wasn't parsable: %s" % (keyword, line))
def _parse_extra_info_line(descriptor, entries):
# "extra-info" Nickname Fingerprint
value = _value('extra-info', entries)
extra_info_comp = value.split()
if len(extra_info_comp) < 2:
raise ValueError('Extra-info line must have two values: extra-info %s' % value)
elif not stem.util.tor_tools.is_valid_nickname(extra_info_comp[0]):
raise ValueError("Extra-info line entry isn't a valid nickname: %s" % extra_info_comp[0])
elif not stem.util.tor_tools.is_valid_fingerprint(extra_info_comp[1]):
raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % extra_info_comp[1])
descriptor.nickname = extra_info_comp[0]
descriptor.fingerprint = extra_info_comp[1]
def _parse_transport_line(descriptor, entries):
# "transport" transportname address:port [arglist]
# Everything after the transportname is scrubbed in published bridge
# descriptors, so we'll never see it in practice.
#
# These entries really only make sense for bridges, but have been seen
# on non-bridges in the wild when the relay operator configured it this
# way.
transports = {}
for value in _values('transport', entries):
name, address, port, args = None, None, None, None
if ' ' not in value:
# scrubbed
name = value
else:
# not scrubbed
value_comp = value.split()
if len(value_comp) < 1:
raise ValueError('Transport line is missing its transport name: transport %s' % value)
elif len(value_comp) < 2:
raise ValueError('Transport line is missing its address:port value: transport %s' % value)
elif ':' not in value_comp[1]:
raise ValueError("Transport line's address:port entry is missing a colon: transport %s" % value)
name = value_comp[0]
address, port_str = value_comp[1].rsplit(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address) or \
stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True):
raise ValueError('Transport line has a malformed address: transport %s' % value)
elif not stem.util.connection.is_valid_port(port_str):
raise ValueError('Transport line has a malformed port: transport %s' % value)
address.lstrip('[').rstrip(']')
port = int(port_str)
args = value_comp[2:] if len(value_comp) >= 3 else []
transports[name] = (address, port, args)
descriptor.transport = transports
def _parse_cell_circuits_per_decline_line(descriptor, entries):
# "cell-circuits-per-decile" num
value = _value('cell-circuits-per-decile', entries)
if not value.isdigit():
raise ValueError('Non-numeric cell-circuits-per-decile value: %s' % value)
elif int(value) < 0:
raise ValueError('Negative cell-circuits-per-decile value: %s' % value)
descriptor.cell_circuits_per_decile = int(value)
def _parse_padding_counts_line(descriptor, entries):
# "padding-counts" YYYY-MM-DD HH:MM:SS (NSEC s) key=val key=val...
value = _value('padding-counts', entries)
timestamp, interval, remainder = _parse_timestamp_and_interval('padding-counts', value)
counts = {}
for k, v in _mappings_for('padding-counts', remainder, require_value = True):
counts[k] = int(v) if v.isdigit() else v
setattr(descriptor, 'padding_counts_end', timestamp)
setattr(descriptor, 'padding_counts_interval', interval)
setattr(descriptor, 'padding_counts', counts)
def _parse_dirreq_line(keyword, recognized_counts_attr, unrecognized_counts_attr, descriptor, entries):
value = _value(keyword, entries)
recognized_counts = {}
unrecognized_counts = {}
is_response_stats = keyword in ('dirreq-v2-resp', 'dirreq-v3-resp')
key_set = DirResponse if is_response_stats else DirStat
key_type = 'STATUS' if is_response_stats else 'STAT'
for status, count in _mappings_for(keyword, value, divider = ','):
if not count.isdigit():
raise ValueError('%s lines should contain %s=COUNT mappings: %s %s' % (keyword, key_type, keyword, value))
if status in key_set:
recognized_counts[status] = int(count)
else:
unrecognized_counts[status] = int(count)
setattr(descriptor, recognized_counts_attr, recognized_counts)
setattr(descriptor, unrecognized_counts_attr, unrecognized_counts)
def _parse_dirreq_share_line(keyword, attribute, descriptor, entries):
value = _value(keyword, entries)
if not value.endswith('%'):
raise ValueError('%s lines should be a percentage: %s %s' % (keyword, keyword, value))
elif float(value[:-1]) < 0:
raise ValueError('Negative percentage value: %s %s' % (keyword, value))
# bug means it might be above 100%: https://lists.torproject.org/pipermail/tor-dev/2012-June/003679.html
setattr(descriptor, attribute, float(value[:-1]) / 100)
def _parse_cell_line(keyword, attribute, descriptor, entries):
# "" num,...,num
value = _value(keyword, entries)
entries, exc = [], None
if value:
for entry in value.split(','):
try:
# Values should be positive but as discussed in ticket #5849
# there was a bug around this. It was fixed in tor 0.2.2.1.
entries.append(float(entry))
except ValueError:
exc = ValueError('Non-numeric entry in %s listing: %s %s' % (keyword, keyword, value))
setattr(descriptor, attribute, entries)
if exc:
raise exc
def _parse_timestamp_and_interval_line(keyword, end_attribute, interval_attribute, descriptor, entries):
# "" YYYY-MM-DD HH:MM:SS (NSEC s)
timestamp, interval, _ = _parse_timestamp_and_interval(keyword, _value(keyword, entries))
setattr(descriptor, end_attribute, timestamp)
setattr(descriptor, interval_attribute, interval)
def _parse_conn_bi_direct_line(descriptor, entries):
# "conn-bi-direct" YYYY-MM-DD HH:MM:SS (NSEC s) BELOW,READ,WRITE,BOTH
value = _value('conn-bi-direct', entries)
timestamp, interval, remainder = _parse_timestamp_and_interval('conn-bi-direct', value)
stats = remainder.split(',')
if len(stats) != 4 or not (stats[0].isdigit() and stats[1].isdigit() and stats[2].isdigit() and stats[3].isdigit()):
raise ValueError('conn-bi-direct line should end with four numeric values: conn-bi-direct %s' % value)
descriptor.conn_bi_direct_end = timestamp
descriptor.conn_bi_direct_interval = interval
descriptor.conn_bi_direct_below = int(stats[0])
descriptor.conn_bi_direct_read = int(stats[1])
descriptor.conn_bi_direct_write = int(stats[2])
descriptor.conn_bi_direct_both = int(stats[3])
def _parse_history_line(keyword, end_attribute, interval_attribute, values_attribute, descriptor, entries):
# "" YYYY-MM-DD HH:MM:SS (NSEC s) NUM,NUM,NUM,NUM,NUM...
value = _value(keyword, entries)
timestamp, interval, remainder = _parse_timestamp_and_interval(keyword, value)
history_values = []
if remainder:
try:
history_values = [int(entry) for entry in remainder.split(',')]
except ValueError:
raise ValueError('%s line has non-numeric values: %s %s' % (keyword, keyword, value))
setattr(descriptor, end_attribute, timestamp)
setattr(descriptor, interval_attribute, interval)
setattr(descriptor, values_attribute, history_values)
def _parse_port_count_line(keyword, attribute, descriptor, entries):
# "" port=N,port=N,...
value, port_mappings = _value(keyword, entries), {}
for port, stat in _mappings_for(keyword, value, divider = ','):
if (port != 'other' and not stem.util.connection.is_valid_port(port)) or not stat.isdigit():
raise ValueError('Entries in %s line should only be PORT=N entries: %s %s' % (keyword, keyword, value))
port = int(port) if port.isdigit() else port
port_mappings[port] = int(stat)
setattr(descriptor, attribute, port_mappings)
def _parse_geoip_to_count_line(keyword, attribute, descriptor, entries):
# "" CC=N,CC=N,...
#
# The maxmind geoip (https://www.maxmind.com/app/iso3166) has numeric
# locale codes for some special values, for instance...
# A1,"Anonymous Proxy"
# A2,"Satellite Provider"
# ??,"Unknown"
value, locale_usage = _value(keyword, entries), {}
for locale, count in _mappings_for(keyword, value, divider = ','):
if not _locale_re.match(locale) or not count.isdigit():
raise ValueError('Entries in %s line should only be CC=N entries: %s %s' % (keyword, keyword, value))
locale_usage[locale] = int(count)
setattr(descriptor, attribute, locale_usage)
def _parse_bridge_ip_versions_line(descriptor, entries):
value, ip_versions = _value('bridge-ip-versions', entries), {}
for protocol, count in _mappings_for('bridge-ip-versions', value, divider = ','):
if not count.isdigit():
raise stem.ProtocolError('IP protocol count was non-numeric (%s): bridge-ip-versions %s' % (count, value))
ip_versions[protocol] = int(count)
descriptor.ip_versions = ip_versions
def _parse_bridge_ip_transports_line(descriptor, entries):
value, ip_transports = _value('bridge-ip-transports', entries), {}
for protocol, count in _mappings_for('bridge-ip-transports', value, divider = ','):
if not count.isdigit():
raise stem.ProtocolError('Transport count was non-numeric (%s): bridge-ip-transports %s' % (count, value))
ip_transports[protocol] = int(count)
descriptor.ip_transports = ip_transports
def _parse_hs_stats(keyword, stat_attribute, extra_attribute, descriptor, entries):
# "" num key=val key=val...
value, stat, extra = _value(keyword, entries), None, {}
if value is None:
pass # not in the descriptor
elif value == '':
raise ValueError("'%s' line was blank" % keyword)
else:
if ' ' in value:
stat_value, remainder = value.split(' ', 1)
else:
stat_value, remainder = value, None
try:
stat = int(stat_value)
except ValueError:
raise ValueError("'%s' stat was non-numeric (%s): %s %s" % (keyword, stat_value, keyword, value))
for key, val in _mappings_for(keyword, remainder):
extra[key] = val
setattr(descriptor, stat_attribute, stat)
setattr(descriptor, extra_attribute, extra)
_parse_identity_ed25519_line = _parse_key_block('identity-ed25519', 'ed25519_certificate', 'ED25519 CERT')
_parse_master_key_ed25519_line = _parse_simple_line('master-key-ed25519', 'ed25519_certificate_hash')
_parse_geoip_db_digest_line = _parse_forty_character_hex('geoip-db-digest', 'geoip_db_digest')
_parse_geoip6_db_digest_line = _parse_forty_character_hex('geoip6-db-digest', 'geoip6_db_digest')
_parse_dirreq_v2_resp_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-resp', 'dir_v2_responses', 'dir_v2_responses_unknown')
_parse_dirreq_v3_resp_line = functools.partial(_parse_dirreq_line, 'dirreq-v3-resp', 'dir_v3_responses', 'dir_v3_responses_unknown')
_parse_dirreq_v2_direct_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-direct-dl', 'dir_v2_direct_dl', 'dir_v2_direct_dl_unknown')
_parse_dirreq_v3_direct_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v3-direct-dl', 'dir_v3_direct_dl', 'dir_v3_direct_dl_unknown')
_parse_dirreq_v2_tunneled_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-tunneled-dl', 'dir_v2_tunneled_dl', 'dir_v2_tunneled_dl_unknown')
_parse_dirreq_v3_tunneled_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v3-tunneled-dl', 'dir_v3_tunneled_dl', 'dir_v3_tunneled_dl_unknown')
_parse_dirreq_v2_share_line = functools.partial(_parse_dirreq_share_line, 'dirreq-v2-share', 'dir_v2_share')
_parse_dirreq_v3_share_line = functools.partial(_parse_dirreq_share_line, 'dirreq-v3-share', 'dir_v3_share')
_parse_cell_processed_cells_line = functools.partial(_parse_cell_line, 'cell-processed-cells', 'cell_processed_cells')
_parse_cell_queued_cells_line = functools.partial(_parse_cell_line, 'cell-queued-cells', 'cell_queued_cells')
_parse_cell_time_in_queue_line = functools.partial(_parse_cell_line, 'cell-time-in-queue', 'cell_time_in_queue')
_parse_published_line = _parse_timestamp_line('published', 'published')
_parse_geoip_start_time_line = _parse_timestamp_line('geoip-start-time', 'geoip_start_time')
_parse_cell_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'cell-stats-end', 'cell_stats_end', 'cell_stats_interval')
_parse_entry_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'entry-stats-end', 'entry_stats_end', 'entry_stats_interval')
_parse_exit_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'exit-stats-end', 'exit_stats_end', 'exit_stats_interval')
_parse_bridge_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'bridge-stats-end', 'bridge_stats_end', 'bridge_stats_interval')
_parse_dirreq_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'dirreq-stats-end', 'dir_stats_end', 'dir_stats_interval')
_parse_read_history_line = functools.partial(_parse_history_line, 'read-history', 'read_history_end', 'read_history_interval', 'read_history_values')
_parse_write_history_line = functools.partial(_parse_history_line, 'write-history', 'write_history_end', 'write_history_interval', 'write_history_values')
_parse_dirreq_read_history_line = functools.partial(_parse_history_line, 'dirreq-read-history', 'dir_read_history_end', 'dir_read_history_interval', 'dir_read_history_values')
_parse_dirreq_write_history_line = functools.partial(_parse_history_line, 'dirreq-write-history', 'dir_write_history_end', 'dir_write_history_interval', 'dir_write_history_values')
_parse_exit_kibibytes_written_line = functools.partial(_parse_port_count_line, 'exit-kibibytes-written', 'exit_kibibytes_written')
_parse_exit_kibibytes_read_line = functools.partial(_parse_port_count_line, 'exit-kibibytes-read', 'exit_kibibytes_read')
_parse_exit_streams_opened_line = functools.partial(_parse_port_count_line, 'exit-streams-opened', 'exit_streams_opened')
_parse_hidden_service_stats_end_line = _parse_timestamp_line('hidserv-stats-end', 'hs_stats_end')
_parse_hidden_service_rend_relayed_cells_line = functools.partial(_parse_hs_stats, 'hidserv-rend-relayed-cells', 'hs_rend_cells', 'hs_rend_cells_attr')
_parse_hidden_service_dir_onions_seen_line = functools.partial(_parse_hs_stats, 'hidserv-dir-onions-seen', 'hs_dir_onions_seen', 'hs_dir_onions_seen_attr')
_parse_dirreq_v2_ips_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v2-ips', 'dir_v2_ips')
_parse_dirreq_v3_ips_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v3-ips', 'dir_v3_ips')
_parse_dirreq_v2_reqs_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v2-reqs', 'dir_v2_requests')
_parse_dirreq_v3_reqs_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v3-reqs', 'dir_v3_requests')
_parse_geoip_client_origins_line = functools.partial(_parse_geoip_to_count_line, 'geoip-client-origins', 'geoip_client_origins')
_parse_entry_ips_line = functools.partial(_parse_geoip_to_count_line, 'entry-ips', 'entry_ips')
_parse_bridge_ips_line = functools.partial(_parse_geoip_to_count_line, 'bridge-ips', 'bridge_ips')
_parse_router_sig_ed25519_line = _parse_simple_line('router-sig-ed25519', 'ed25519_signature')
_parse_router_digest_sha256_line = _parse_simple_line('router-digest-sha256', 'router_digest_sha256')
_parse_router_digest_line = _parse_forty_character_hex('router-digest', '_digest')
_parse_router_signature_line = _parse_key_block('router-signature', 'signature', 'SIGNATURE')
class ExtraInfoDescriptor(Descriptor):
"""
Extra-info descriptor document.
:var str nickname: **\*** relay's nickname
:var str fingerprint: **\*** identity key fingerprint
:var datetime published: **\*** time in UTC when this descriptor was made
:var str geoip_db_digest: sha1 of the geoIP database file for IPv4 addresses
:var str geoip6_db_digest: sha1 of the geoIP database file for IPv6 addresses
:var dict transport: **\*** mapping of transport methods to their (address,
port, args) tuple, these usually appear on bridges in which case all of
those are **None**
**Bi-directional connection usage:**
:var datetime conn_bi_direct_end: end of the sampling interval
:var int conn_bi_direct_interval: seconds per interval
:var int conn_bi_direct_below: connections that read/wrote less than 20 KiB
:var int conn_bi_direct_read: connections that read at least 10x more than wrote
:var int conn_bi_direct_write: connections that wrote at least 10x more than read
:var int conn_bi_direct_both: remaining connections
**Bytes read/written for relayed traffic:**
:var datetime read_history_end: end of the sampling interval
:var int read_history_interval: seconds per interval
:var list read_history_values: bytes read during each interval
:var datetime write_history_end: end of the sampling interval
:var int write_history_interval: seconds per interval
:var list write_history_values: bytes written during each interval
**Cell relaying statistics:**
:var datetime cell_stats_end: end of the period when stats were gathered
:var int cell_stats_interval: length in seconds of the interval
:var list cell_processed_cells: measurement of processed cells per circuit
:var list cell_queued_cells: measurement of queued cells per circuit
:var list cell_time_in_queue: mean enqueued time in milliseconds for cells
:var int cell_circuits_per_decile: mean number of circuits in a decile
**Directory Mirror Attributes:**
:var datetime dir_stats_end: end of the period when stats were gathered
:var int dir_stats_interval: length in seconds of the interval
:var dict dir_v2_ips: mapping of locales to rounded count of requester ips
:var dict dir_v3_ips: mapping of locales to rounded count of requester ips
:var float dir_v2_share: percent of total directory traffic it expects to serve
:var float dir_v3_share: percent of total directory traffic it expects to serve
:var dict dir_v2_requests: mapping of locales to rounded count of requests
:var dict dir_v3_requests: mapping of locales to rounded count of requests
:var dict dir_v2_responses: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirResponse` to their rounded count
:var dict dir_v3_responses: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirResponse` to their rounded count
:var dict dir_v2_responses_unknown: mapping of unrecognized statuses to their count
:var dict dir_v3_responses_unknown: mapping of unrecognized statuses to their count
:var dict dir_v2_direct_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over DirPort
:var dict dir_v3_direct_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over DirPort
:var dict dir_v2_direct_dl_unknown: mapping of unrecognized stats to their measurement
:var dict dir_v3_direct_dl_unknown: mapping of unrecognized stats to their measurement
:var dict dir_v2_tunneled_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over ORPort
:var dict dir_v3_tunneled_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over ORPort
:var dict dir_v2_tunneled_dl_unknown: mapping of unrecognized stats to their measurement
:var dict dir_v3_tunneled_dl_unknown: mapping of unrecognized stats to their measurement
**Bytes read/written for directory mirroring:**
:var datetime dir_read_history_end: end of the sampling interval
:var int dir_read_history_interval: seconds per interval
:var list dir_read_history_values: bytes read during each interval
:var datetime dir_write_history_end: end of the sampling interval
:var int dir_write_history_interval: seconds per interval
:var list dir_write_history_values: bytes read during each interval
**Guard Attributes:**
:var datetime entry_stats_end: end of the period when stats were gathered
:var int entry_stats_interval: length in seconds of the interval
:var dict entry_ips: mapping of locales to rounded count of unique user ips
**Exit Attributes:**
:var datetime exit_stats_end: end of the period when stats were gathered
:var int exit_stats_interval: length in seconds of the interval
:var dict exit_kibibytes_written: traffic per port (keys are ints or 'other')
:var dict exit_kibibytes_read: traffic per port (keys are ints or 'other')
:var dict exit_streams_opened: streams per port (keys are ints or 'other')
**Hidden Service Attributes:**
:var datetime hs_stats_end: end of the sampling interval
:var int hs_rend_cells: rounded count of the RENDEZVOUS1 cells seen
:var int hs_rend_cells_attr: **\*** attributes provided for the hs_rend_cells
:var int hs_dir_onions_seen: rounded count of the identities seen
:var int hs_dir_onions_seen_attr: **\*** attributes provided for the hs_dir_onions_seen
**Padding Count Attributes:**
:var dict padding_counts: **\*** padding parameters
:var datetime padding_counts_end: end of the period when padding data is being collected
:var int padding_counts_interval: length in seconds of the interval
**Bridge Attributes:**
:var datetime bridge_stats_end: end of the period when stats were gathered
:var int bridge_stats_interval: length in seconds of the interval
:var dict bridge_ips: mapping of locales to rounded count of unique user ips
:var datetime geoip_start_time: replaced by bridge_stats_end (deprecated)
:var dict geoip_client_origins: replaced by bridge_ips (deprecated)
:var dict ip_versions: mapping of ip protocols to a rounded count for the number of users
:var dict ip_versions: mapping of ip transports to a count for the number of users
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
.. versionchanged:: 1.4.0
Added the hs_stats_end, hs_rend_cells, hs_rend_cells_attr,
hs_dir_onions_seen, and hs_dir_onions_seen_attr attributes.
.. versionchanged:: 1.6.0
Added the padding_counts, padding_counts_end, and padding_counts_interval
attributes.
"""
ATTRIBUTES = {
'nickname': (None, _parse_extra_info_line),
'fingerprint': (None, _parse_extra_info_line),
'published': (None, _parse_published_line),
'geoip_db_digest': (None, _parse_geoip_db_digest_line),
'geoip6_db_digest': (None, _parse_geoip6_db_digest_line),
'transport': ({}, _parse_transport_line),
'conn_bi_direct_end': (None, _parse_conn_bi_direct_line),
'conn_bi_direct_interval': (None, _parse_conn_bi_direct_line),
'conn_bi_direct_below': (None, _parse_conn_bi_direct_line),
'conn_bi_direct_read': (None, _parse_conn_bi_direct_line),
'conn_bi_direct_write': (None, _parse_conn_bi_direct_line),
'conn_bi_direct_both': (None, _parse_conn_bi_direct_line),
'read_history_end': (None, _parse_read_history_line),
'read_history_interval': (None, _parse_read_history_line),
'read_history_values': (None, _parse_read_history_line),
'write_history_end': (None, _parse_write_history_line),
'write_history_interval': (None, _parse_write_history_line),
'write_history_values': (None, _parse_write_history_line),
'cell_stats_end': (None, _parse_cell_stats_end_line),
'cell_stats_interval': (None, _parse_cell_stats_end_line),
'cell_processed_cells': (None, _parse_cell_processed_cells_line),
'cell_queued_cells': (None, _parse_cell_queued_cells_line),
'cell_time_in_queue': (None, _parse_cell_time_in_queue_line),
'cell_circuits_per_decile': (None, _parse_cell_circuits_per_decline_line),
'dir_stats_end': (None, _parse_dirreq_stats_end_line),
'dir_stats_interval': (None, _parse_dirreq_stats_end_line),
'dir_v2_ips': (None, _parse_dirreq_v2_ips_line),
'dir_v3_ips': (None, _parse_dirreq_v3_ips_line),
'dir_v2_share': (None, _parse_dirreq_v2_share_line),
'dir_v3_share': (None, _parse_dirreq_v3_share_line),
'dir_v2_requests': (None, _parse_dirreq_v2_reqs_line),
'dir_v3_requests': (None, _parse_dirreq_v3_reqs_line),
'dir_v2_responses': (None, _parse_dirreq_v2_resp_line),
'dir_v3_responses': (None, _parse_dirreq_v3_resp_line),
'dir_v2_responses_unknown': (None, _parse_dirreq_v2_resp_line),
'dir_v3_responses_unknown': (None, _parse_dirreq_v3_resp_line),
'dir_v2_direct_dl': (None, _parse_dirreq_v2_direct_dl_line),
'dir_v3_direct_dl': (None, _parse_dirreq_v3_direct_dl_line),
'dir_v2_direct_dl_unknown': (None, _parse_dirreq_v2_direct_dl_line),
'dir_v3_direct_dl_unknown': (None, _parse_dirreq_v3_direct_dl_line),
'dir_v2_tunneled_dl': (None, _parse_dirreq_v2_tunneled_dl_line),
'dir_v3_tunneled_dl': (None, _parse_dirreq_v3_tunneled_dl_line),
'dir_v2_tunneled_dl_unknown': (None, _parse_dirreq_v2_tunneled_dl_line),
'dir_v3_tunneled_dl_unknown': (None, _parse_dirreq_v3_tunneled_dl_line),
'dir_read_history_end': (None, _parse_dirreq_read_history_line),
'dir_read_history_interval': (None, _parse_dirreq_read_history_line),
'dir_read_history_values': (None, _parse_dirreq_read_history_line),
'dir_write_history_end': (None, _parse_dirreq_write_history_line),
'dir_write_history_interval': (None, _parse_dirreq_write_history_line),
'dir_write_history_values': (None, _parse_dirreq_write_history_line),
'entry_stats_end': (None, _parse_entry_stats_end_line),
'entry_stats_interval': (None, _parse_entry_stats_end_line),
'entry_ips': (None, _parse_entry_ips_line),
'exit_stats_end': (None, _parse_exit_stats_end_line),
'exit_stats_interval': (None, _parse_exit_stats_end_line),
'exit_kibibytes_written': (None, _parse_exit_kibibytes_written_line),
'exit_kibibytes_read': (None, _parse_exit_kibibytes_read_line),
'exit_streams_opened': (None, _parse_exit_streams_opened_line),
'hs_stats_end': (None, _parse_hidden_service_stats_end_line),
'hs_rend_cells': (None, _parse_hidden_service_rend_relayed_cells_line),
'hs_rend_cells_attr': ({}, _parse_hidden_service_rend_relayed_cells_line),
'hs_dir_onions_seen': (None, _parse_hidden_service_dir_onions_seen_line),
'hs_dir_onions_seen_attr': ({}, _parse_hidden_service_dir_onions_seen_line),
'padding_counts': ({}, _parse_padding_counts_line),
'padding_counts_end': (None, _parse_padding_counts_line),
'padding_counts_interval': (None, _parse_padding_counts_line),
'bridge_stats_end': (None, _parse_bridge_stats_end_line),
'bridge_stats_interval': (None, _parse_bridge_stats_end_line),
'bridge_ips': (None, _parse_bridge_ips_line),
'geoip_start_time': (None, _parse_geoip_start_time_line),
'geoip_client_origins': (None, _parse_geoip_client_origins_line),
'ip_versions': (None, _parse_bridge_ip_versions_line),
'ip_transports': (None, _parse_bridge_ip_transports_line),
}
PARSER_FOR_LINE = {
'extra-info': _parse_extra_info_line,
'geoip-db-digest': _parse_geoip_db_digest_line,
'geoip6-db-digest': _parse_geoip6_db_digest_line,
'transport': _parse_transport_line,
'cell-circuits-per-decile': _parse_cell_circuits_per_decline_line,
'dirreq-v2-resp': _parse_dirreq_v2_resp_line,
'dirreq-v3-resp': _parse_dirreq_v3_resp_line,
'dirreq-v2-direct-dl': _parse_dirreq_v2_direct_dl_line,
'dirreq-v3-direct-dl': _parse_dirreq_v3_direct_dl_line,
'dirreq-v2-tunneled-dl': _parse_dirreq_v2_tunneled_dl_line,
'dirreq-v3-tunneled-dl': _parse_dirreq_v3_tunneled_dl_line,
'dirreq-v2-share': _parse_dirreq_v2_share_line,
'dirreq-v3-share': _parse_dirreq_v3_share_line,
'cell-processed-cells': _parse_cell_processed_cells_line,
'cell-queued-cells': _parse_cell_queued_cells_line,
'cell-time-in-queue': _parse_cell_time_in_queue_line,
'published': _parse_published_line,
'geoip-start-time': _parse_geoip_start_time_line,
'cell-stats-end': _parse_cell_stats_end_line,
'entry-stats-end': _parse_entry_stats_end_line,
'exit-stats-end': _parse_exit_stats_end_line,
'bridge-stats-end': _parse_bridge_stats_end_line,
'dirreq-stats-end': _parse_dirreq_stats_end_line,
'conn-bi-direct': _parse_conn_bi_direct_line,
'read-history': _parse_read_history_line,
'write-history': _parse_write_history_line,
'dirreq-read-history': _parse_dirreq_read_history_line,
'dirreq-write-history': _parse_dirreq_write_history_line,
'exit-kibibytes-written': _parse_exit_kibibytes_written_line,
'exit-kibibytes-read': _parse_exit_kibibytes_read_line,
'exit-streams-opened': _parse_exit_streams_opened_line,
'hidserv-stats-end': _parse_hidden_service_stats_end_line,
'hidserv-rend-relayed-cells': _parse_hidden_service_rend_relayed_cells_line,
'hidserv-dir-onions-seen': _parse_hidden_service_dir_onions_seen_line,
'padding-counts': _parse_padding_counts_line,
'dirreq-v2-ips': _parse_dirreq_v2_ips_line,
'dirreq-v3-ips': _parse_dirreq_v3_ips_line,
'dirreq-v2-reqs': _parse_dirreq_v2_reqs_line,
'dirreq-v3-reqs': _parse_dirreq_v3_reqs_line,
'geoip-client-origins': _parse_geoip_client_origins_line,
'entry-ips': _parse_entry_ips_line,
'bridge-ips': _parse_bridge_ips_line,
'bridge-ip-versions': _parse_bridge_ip_versions_line,
'bridge-ip-transports': _parse_bridge_ip_transports_line,
}
def __init__(self, raw_contents, validate = False):
"""
Extra-info descriptor constructor. By default this validates the
descriptor's content as it's parsed. This validation can be disabled to
either improve performance or be accepting of malformed data.
:param str raw_contents: extra-info content provided by the relay
:param bool validate: checks the validity of the extra-info descriptor if
**True**, skips these checks otherwise
:raises: **ValueError** if the contents is malformed and validate is True
"""
super(ExtraInfoDescriptor, self).__init__(raw_contents, lazy_load = not validate)
entries = _descriptor_components(raw_contents, validate)
if validate:
for keyword in self._required_fields():
if keyword not in entries:
raise ValueError("Extra-info descriptor must have a '%s' entry" % keyword)
for keyword in self._required_fields() + SINGLE_FIELDS:
if keyword in entries and len(entries[keyword]) > 1:
raise ValueError("The '%s' entry can only appear once in an extra-info descriptor" % keyword)
expected_first_keyword = self._first_keyword()
if expected_first_keyword and expected_first_keyword != list(entries.keys())[0]:
raise ValueError("Extra-info descriptor must start with a '%s' entry" % expected_first_keyword)
expected_last_keyword = self._last_keyword()
if expected_last_keyword and expected_last_keyword != list(entries.keys())[-1]:
raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword)
self._parse(entries, validate)
else:
self._entries = entries
def digest(self):
"""
Provides the upper-case hex encoded sha1 of our content. This value is part
of the server descriptor entry for this relay.
:returns: **str** with the upper-case hex digest value for this server
descriptor
"""
raise NotImplementedError('Unsupported Operation: this should be implemented by the ExtraInfoDescriptor subclass')
def _required_fields(self):
return REQUIRED_FIELDS
def _first_keyword(self):
return 'extra-info'
def _last_keyword(self):
return 'router-signature'
class RelayExtraInfoDescriptor(ExtraInfoDescriptor):
"""
Relay extra-info descriptor, constructed from data such as that provided by
'GETINFO extra-info/digest/\*', cached descriptors, and metrics
(`specification `_).
:var ed25519_certificate str: base64 encoded ed25519 certificate
:var ed25519_signature str: signature of this document using ed25519
:var str signature: **\*** signature for this extrainfo descriptor
**\*** attribute is required when we're parsed with validation
.. versionchanged:: 1.5.0
Added the ed25519_certificate and ed25519_signature attributes.
"""
ATTRIBUTES = dict(ExtraInfoDescriptor.ATTRIBUTES, **{
'ed25519_certificate': (None, _parse_identity_ed25519_line),
'ed25519_signature': (None, _parse_router_sig_ed25519_line),
'signature': (None, _parse_router_signature_line),
})
PARSER_FOR_LINE = dict(ExtraInfoDescriptor.PARSER_FOR_LINE, **{
'identity-ed25519': _parse_identity_ed25519_line,
'router-sig-ed25519': _parse_router_sig_ed25519_line,
'router-signature': _parse_router_signature_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False, signing_key = None):
base_header = (
('extra-info', '%s %s' % (_random_nickname(), _random_fingerprint())),
('published', _random_date()),
)
if signing_key:
sign = True
if sign:
if attr and 'router-signature' in attr:
raise ValueError('Cannot sign the descriptor if a router-signature has been provided')
if signing_key is None:
signing_key = create_signing_key()
content = _descriptor_content(attr, exclude, base_header) + b'\nrouter-signature\n'
return _append_router_signature(content, signing_key.private)
else:
return _descriptor_content(attr, exclude, base_header, (
('router-signature', _random_crypto_blob('SIGNATURE')),
))
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, signing_key = None):
return cls(cls.content(attr, exclude, sign, signing_key), validate = validate)
@lru_cache()
def digest(self):
# our digest is calculated from everything except our signature
raw_content, ending = str(self), '\nrouter-signature\n'
raw_content = raw_content[:raw_content.find(ending) + len(ending)]
return hashlib.sha1(stem.util.str_tools._to_bytes(raw_content)).hexdigest().upper()
class BridgeExtraInfoDescriptor(ExtraInfoDescriptor):
"""
Bridge extra-info descriptor (`bridge descriptor specification
`_)
:var str ed25519_certificate_hash: sha256 hash of the original identity-ed25519
:var str router_digest_sha256: sha256 digest of this document
.. versionchanged:: 1.5.0
Added the ed25519_certificate_hash and router_digest_sha256 attributes.
"""
ATTRIBUTES = dict(ExtraInfoDescriptor.ATTRIBUTES, **{
'ed25519_certificate_hash': (None, _parse_master_key_ed25519_line),
'router_digest_sha256': (None, _parse_router_digest_sha256_line),
'_digest': (None, _parse_router_digest_line),
})
PARSER_FOR_LINE = dict(ExtraInfoDescriptor.PARSER_FOR_LINE, **{
'master-key-ed25519': _parse_master_key_ed25519_line,
'router-digest-sha256': _parse_router_digest_sha256_line,
'router-digest': _parse_router_digest_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('extra-info', 'ec2bridgereaac65a3 %s' % _random_fingerprint()),
('published', _random_date()),
), (
('router-digest', _random_fingerprint()),
))
def digest(self):
return self._digest
def _required_fields(self):
excluded_fields = [
'router-signature',
]
included_fields = [
'router-digest',
]
return tuple(included_fields + [f for f in REQUIRED_FIELDS if f not in excluded_fields])
def _last_keyword(self):
return None
stem-1.7.1/stem/descriptor/hidden_service_descriptor.py 0000664 0001750 0001750 00000040550 13411002341 024114 0 ustar atagar atagar 0000000 0000000 # Copyright 2015-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for Tor hidden service descriptors as described in Tor's `rend-spec
`_.
Unlike other descriptor types these describe a hidden service rather than a
relay. They're created by the service, and can only be fetched via relays with
the HSDir flag.
These are only available through the Controller's
:func:`~stem.control.get_hidden_service_descriptor` method.
**Module Overview:**
::
HiddenServiceDescriptor - Tor hidden service descriptor.
.. versionadded:: 1.4.0
"""
import base64
import binascii
import collections
import hashlib
import io
import stem.prereq
import stem.util.connection
import stem.util.str_tools
from stem.descriptor import (
PGP_BLOCK_END,
Descriptor,
_descriptor_content,
_descriptor_components,
_read_until_keywords,
_bytes_for_block,
_value,
_parse_simple_line,
_parse_timestamp_line,
_parse_key_block,
_random_date,
_random_crypto_blob,
)
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
REQUIRED_FIELDS = (
'rendezvous-service-descriptor',
'version',
'permanent-key',
'secret-id-part',
'publication-time',
'protocol-versions',
'signature',
)
INTRODUCTION_POINTS_ATTR = {
'identifier': None,
'address': None,
'port': None,
'onion_key': None,
'service_key': None,
'intro_authentication': [],
}
# introduction-point fields that can only appear once
SINGLE_INTRODUCTION_POINT_FIELDS = [
'introduction-point',
'ip-address',
'onion-port',
'onion-key',
'service-key',
]
BASIC_AUTH = 1
STEALTH_AUTH = 2
class IntroductionPoints(collections.namedtuple('IntroductionPoints', INTRODUCTION_POINTS_ATTR.keys())):
"""
:var str identifier: hash of this introduction point's identity key
:var str address: address of this introduction point
:var int port: port where this introduction point is listening
:var str onion_key: public key for communicating with this introduction point
:var str service_key: public key for communicating with this hidden service
:var list intro_authentication: tuples of the form (auth_type, auth_data) for
establishing a connection
"""
class DecryptionFailure(Exception):
"""
Failure to decrypt the hidden service descriptor's introduction-points.
"""
def _parse_file(descriptor_file, validate = False, **kwargs):
"""
Iterates over the hidden service descriptors in a file.
:param file descriptor_file: file with descriptor content
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param dict kwargs: additional arguments for the descriptor constructor
:returns: iterator for :class:`~stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor`
instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is **True**
* **IOError** if the file can't be read
"""
while True:
descriptor_content = _read_until_keywords('signature', descriptor_file)
# we've reached the 'signature', now include the pgp style block
block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0]
descriptor_content += _read_until_keywords(block_end_prefix, descriptor_file, True)
if descriptor_content:
if descriptor_content[0].startswith(b'@type'):
descriptor_content = descriptor_content[1:]
yield HiddenServiceDescriptor(bytes.join(b'', descriptor_content), validate, **kwargs)
else:
break # done parsing file
def _parse_version_line(descriptor, entries):
value = _value('version', entries)
if value.isdigit():
descriptor.version = int(value)
else:
raise ValueError('version line must have a positive integer value: %s' % value)
def _parse_protocol_versions_line(descriptor, entries):
value = _value('protocol-versions', entries)
try:
versions = [int(entry) for entry in value.split(',')]
except ValueError:
raise ValueError('protocol-versions line has non-numeric versoins: protocol-versions %s' % value)
for v in versions:
if v <= 0:
raise ValueError('protocol-versions must be positive integers: %s' % value)
descriptor.protocol_versions = versions
def _parse_introduction_points_line(descriptor, entries):
_, block_type, block_contents = entries['introduction-points'][0]
if not block_contents or block_type != 'MESSAGE':
raise ValueError("'introduction-points' should be followed by a MESSAGE block, but was a %s" % block_type)
descriptor.introduction_points_encoded = block_contents
descriptor.introduction_points_auth = [] # field was never implemented in tor (#15190)
try:
descriptor.introduction_points_content = _bytes_for_block(block_contents)
except TypeError:
raise ValueError("'introduction-points' isn't base64 encoded content:\n%s" % block_contents)
_parse_rendezvous_service_descriptor_line = _parse_simple_line('rendezvous-service-descriptor', 'descriptor_id')
_parse_permanent_key_line = _parse_key_block('permanent-key', 'permanent_key', 'RSA PUBLIC KEY')
_parse_secret_id_part_line = _parse_simple_line('secret-id-part', 'secret_id_part')
_parse_publication_time_line = _parse_timestamp_line('publication-time', 'published')
_parse_signature_line = _parse_key_block('signature', 'signature', 'SIGNATURE')
class HiddenServiceDescriptor(Descriptor):
"""
Hidden service descriptor.
:var str descriptor_id: **\*** identifier for this descriptor, this is a base32 hash of several fields
:var int version: **\*** hidden service descriptor version
:var str permanent_key: **\*** long term key of the hidden service
:var str secret_id_part: **\*** hash of the time period, cookie, and replica
values so our descriptor_id can be validated
:var datetime published: **\*** time in UTC when this descriptor was made
:var list protocol_versions: **\*** list of **int** versions that are supported when establishing a connection
:var str introduction_points_encoded: raw introduction points blob
:var list introduction_points_auth: **\*** tuples of the form
(auth_method, auth_data) for our introduction_points_content
(**deprecated**, always **[]**)
:var bytes introduction_points_content: decoded introduction-points content
without authentication data, if using cookie authentication this is
encrypted
:var str signature: signature of the descriptor content
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
.. versionchanged:: 1.6.0
Moved from the deprecated `pycrypto
`_ module to `cryptography
`_ for validating signatures.
.. versionchanged:: 1.6.0
Added the **skip_crypto_validation** constructor argument.
"""
ATTRIBUTES = {
'descriptor_id': (None, _parse_rendezvous_service_descriptor_line),
'version': (None, _parse_version_line),
'permanent_key': (None, _parse_permanent_key_line),
'secret_id_part': (None, _parse_secret_id_part_line),
'published': (None, _parse_publication_time_line),
'protocol_versions': ([], _parse_protocol_versions_line),
'introduction_points_encoded': (None, _parse_introduction_points_line),
'introduction_points_auth': ([], _parse_introduction_points_line),
'introduction_points_content': (None, _parse_introduction_points_line),
'signature': (None, _parse_signature_line),
}
PARSER_FOR_LINE = {
'rendezvous-service-descriptor': _parse_rendezvous_service_descriptor_line,
'version': _parse_version_line,
'permanent-key': _parse_permanent_key_line,
'secret-id-part': _parse_secret_id_part_line,
'publication-time': _parse_publication_time_line,
'protocol-versions': _parse_protocol_versions_line,
'introduction-points': _parse_introduction_points_line,
'signature': _parse_signature_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('rendezvous-service-descriptor', 'y3olqqblqw2gbh6phimfuiroechjjafa'),
('version', '2'),
('permanent-key', _random_crypto_blob('RSA PUBLIC KEY')),
('secret-id-part', 'e24kgecavwsznj7gpbktqsiwgvngsf4e'),
('publication-time', _random_date()),
('protocol-versions', '2,3'),
('introduction-points', '\n-----BEGIN MESSAGE-----\n-----END MESSAGE-----'),
), (
('signature', _random_crypto_blob('SIGNATURE')),
))
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False):
return cls(cls.content(attr, exclude, sign), validate = validate, skip_crypto_validation = not sign)
def __init__(self, raw_contents, validate = False, skip_crypto_validation = False):
super(HiddenServiceDescriptor, self).__init__(raw_contents, lazy_load = not validate)
entries = _descriptor_components(raw_contents, validate, non_ascii_fields = ('introduction-points'))
if validate:
for keyword in REQUIRED_FIELDS:
if keyword not in entries:
raise ValueError("Hidden service descriptor must have a '%s' entry" % keyword)
elif keyword in entries and len(entries[keyword]) > 1:
raise ValueError("The '%s' entry can only appear once in a hidden service descriptor" % keyword)
if 'rendezvous-service-descriptor' != list(entries.keys())[0]:
raise ValueError("Hidden service descriptor must start with a 'rendezvous-service-descriptor' entry")
elif 'signature' != list(entries.keys())[-1]:
raise ValueError("Hidden service descriptor must end with a 'signature' entry")
self._parse(entries, validate)
if not skip_crypto_validation and stem.prereq.is_crypto_available():
signed_digest = self._digest_for_signature(self.permanent_key, self.signature)
content_digest = self._digest_for_content(b'rendezvous-service-descriptor ', b'\nsignature\n')
if signed_digest != content_digest:
raise ValueError('Decrypted digest does not match local digest (calculated: %s, local: %s)' % (signed_digest, content_digest))
else:
self._entries = entries
@lru_cache()
def introduction_points(self, authentication_cookie = None):
"""
Provided this service's introduction points.
:returns: **list** of :class:`~stem.descriptor.hidden_service_descriptor.IntroductionPoints`
:raises:
* **ValueError** if the our introduction-points is malformed
* **DecryptionFailure** if unable to decrypt this field
"""
content = self.introduction_points_content
if not content:
return []
elif authentication_cookie:
if not stem.prereq.is_crypto_available():
raise DecryptionFailure('Decrypting introduction-points requires the cryptography module')
try:
missing_padding = len(authentication_cookie) % 4
authentication_cookie = base64.b64decode(stem.util.str_tools._to_bytes(authentication_cookie) + b'=' * missing_padding)
except TypeError as exc:
raise DecryptionFailure('authentication_cookie must be a base64 encoded string (%s)' % exc)
authentication_type = int(binascii.hexlify(content[0:1]), 16)
if authentication_type == BASIC_AUTH:
content = HiddenServiceDescriptor._decrypt_basic_auth(content, authentication_cookie)
elif authentication_type == STEALTH_AUTH:
content = HiddenServiceDescriptor._decrypt_stealth_auth(content, authentication_cookie)
else:
raise DecryptionFailure("Unrecognized authentication type '%s', currently we only support basic auth (%s) and stealth auth (%s)" % (authentication_type, BASIC_AUTH, STEALTH_AUTH))
if not content.startswith(b'introduction-point '):
raise DecryptionFailure('Unable to decrypt the introduction-points, maybe this is the wrong key?')
elif not content.startswith(b'introduction-point '):
raise DecryptionFailure('introduction-points content is encrypted, you need to provide its authentication_cookie')
return HiddenServiceDescriptor._parse_introduction_points(content)
@staticmethod
def _decrypt_basic_auth(content, authentication_cookie):
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
try:
client_blocks = int(binascii.hexlify(content[1:2]), 16)
except ValueError:
raise DecryptionFailure("When using basic auth the content should start with a number of blocks but wasn't a hex digit: %s" % binascii.hexlify(content[1:2]))
# parse the client id and encrypted session keys
client_entries_length = client_blocks * 16 * 20
client_entries = content[2:2 + client_entries_length]
client_keys = [(client_entries[i:i + 4], client_entries[i + 4:i + 20]) for i in range(0, client_entries_length, 4 + 16)]
iv = content[2 + client_entries_length:2 + client_entries_length + 16]
encrypted = content[2 + client_entries_length + 16:]
client_id = hashlib.sha1(authentication_cookie + iv).digest()[:4]
for entry_id, encrypted_session_key in client_keys:
if entry_id != client_id:
continue # not the session key for this client
# try decrypting the session key
cipher = Cipher(algorithms.AES(authentication_cookie), modes.CTR(b'\x00' * len(iv)), default_backend())
decryptor = cipher.decryptor()
session_key = decryptor.update(encrypted_session_key) + decryptor.finalize()
# attempt to decrypt the intro points with the session key
cipher = Cipher(algorithms.AES(session_key), modes.CTR(iv), default_backend())
decryptor = cipher.decryptor()
decrypted = decryptor.update(encrypted) + decryptor.finalize()
# check if the decryption looks correct
if decrypted.startswith(b'introduction-point '):
return decrypted
return content # nope, unable to decrypt the content
@staticmethod
def _decrypt_stealth_auth(content, authentication_cookie):
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
# byte 1 = authentication type, 2-17 = input vector, 18 on = encrypted content
iv, encrypted = content[1:17], content[17:]
cipher = Cipher(algorithms.AES(authentication_cookie), modes.CTR(iv), default_backend())
decryptor = cipher.decryptor()
return decryptor.update(encrypted) + decryptor.finalize()
@staticmethod
def _parse_introduction_points(content):
"""
Provides the parsed list of IntroductionPoints for the unencrypted content.
"""
introduction_points = []
content_io = io.BytesIO(content)
while True:
content = b''.join(_read_until_keywords('introduction-point', content_io, ignore_first = True))
if not content:
break # reached the end
attr = dict(INTRODUCTION_POINTS_ATTR)
entries = _descriptor_components(content, False)
for keyword, values in list(entries.items()):
value, block_type, block_contents = values[0]
if keyword in SINGLE_INTRODUCTION_POINT_FIELDS and len(values) > 1:
raise ValueError("'%s' can only appear once in an introduction-point block, but appeared %i times" % (keyword, len(values)))
if keyword == 'introduction-point':
attr['identifier'] = value
elif keyword == 'ip-address':
if not stem.util.connection.is_valid_ipv4_address(value):
raise ValueError("'%s' is an invalid IPv4 address" % value)
attr['address'] = value
elif keyword == 'onion-port':
if not stem.util.connection.is_valid_port(value):
raise ValueError("'%s' is an invalid port" % value)
attr['port'] = int(value)
elif keyword == 'onion-key':
attr['onion_key'] = block_contents
elif keyword == 'service-key':
attr['service_key'] = block_contents
elif keyword == 'intro-authentication':
auth_entries = []
for auth_value, _, _ in values:
if ' ' not in auth_value:
raise ValueError("We expected 'intro-authentication [auth_type] [auth_data]', but had '%s'" % auth_value)
auth_type, auth_data = auth_value.split(' ')[:2]
auth_entries.append((auth_type, auth_data))
introduction_points.append(IntroductionPoints(**attr))
return introduction_points
stem-1.7.1/stem/descriptor/export.py 0000664 0001750 0001750 00000010104 13344455475 020245 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Toolkit for exporting descriptors to other formats.
**Module Overview:**
::
export_csv - Exports descriptors to a CSV
export_csv_file - Writes exported CSV output to a file
.. deprecated:: 1.7.0
This module will likely be removed in Stem 2.0 due to lack of usage. If you
use this modle please `let me know `_.
"""
import csv
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import stem.descriptor
import stem.prereq
class _ExportDialect(csv.excel):
lineterminator = '\n'
def export_csv(descriptors, included_fields = (), excluded_fields = (), header = True):
"""
Provides a newline separated CSV for one or more descriptors. If simply
provided with descriptors then the CSV contains all of its attributes,
labeled with a header row. Either 'included_fields' or 'excluded_fields' can
be used for more granular control over its attributes and the order.
:param Descriptor,list descriptors: either a
:class:`~stem.descriptor.Descriptor` or list of descriptors to be exported
:param list included_fields: attributes to include in the csv
:param list excluded_fields: attributes to exclude from the csv
:param bool header: if **True** then the first line will be a comma separated
list of the attribute names (**only supported in python 2.7 and higher**)
:returns: **str** of the CSV for the descriptors, one per line
:raises: **ValueError** if descriptors contain more than one descriptor type
"""
output_buffer = StringIO()
export_csv_file(output_buffer, descriptors, included_fields, excluded_fields, header)
return output_buffer.getvalue()
def export_csv_file(output_file, descriptors, included_fields = (), excluded_fields = (), header = True):
"""
Similar to :func:`stem.descriptor.export.export_csv`, except that the CSV is
written directly to a file.
:param file output_file: file to be written to
:param Descriptor,list descriptors: either a
:class:`~stem.descriptor.Descriptor` or list of descriptors to be exported
:param list included_fields: attributes to include in the csv
:param list excluded_fields: attributes to exclude from the csv
:param bool header: if **True** then the first line will be a comma separated
list of the attribute names (**only supported in python 2.7 and higher**)
:returns: **str** of the CSV for the descriptors, one per line
:raises: **ValueError** if descriptors contain more than one descriptor type
"""
if isinstance(descriptors, stem.descriptor.Descriptor):
descriptors = (descriptors,)
if not descriptors:
return
descriptor_type = type(descriptors[0])
descriptor_type_label = descriptor_type.__name__
included_fields = list(included_fields)
# If the user didn't specify the fields to include then export everything,
# ordered alphabetically. If they did specify fields then make sure that
# they exist.
desc_attr = sorted(vars(descriptors[0]).keys())
if included_fields:
for field in included_fields:
if field not in desc_attr:
raise ValueError("%s does not have a '%s' attribute, valid fields are: %s" % (descriptor_type_label, field, ', '.join(desc_attr)))
else:
included_fields = [attr for attr in desc_attr if not attr.startswith('_')]
for field in excluded_fields:
try:
included_fields.remove(field)
except ValueError:
pass
writer = csv.DictWriter(output_file, included_fields, dialect = _ExportDialect(), extrasaction='ignore')
if header and not stem.prereq._is_python_26():
writer.writeheader()
for desc in descriptors:
if not isinstance(desc, stem.descriptor.Descriptor):
raise ValueError('Unable to export a descriptor CSV since %s is not a descriptor.' % type(desc).__name__)
elif descriptor_type != type(desc):
raise ValueError('To export a descriptor CSV all of the descriptors must be of the same type. First descriptor was a %s but we later got a %s.' % (descriptor_type_label, type(desc)))
writer.writerow(vars(desc))
stem-1.7.1/stem/descriptor/router_status_entry.py 0000664 0001750 0001750 00000060442 13411002341 023051 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for router status entries, the information for individual routers
within a network status document. This information is provided from a few
sources...
* control port via 'GETINFO ns/\*' and 'GETINFO md/\*' queries
* router entries in a network status document, like the cached-consensus
**Module Overview:**
::
RouterStatusEntry - Common parent for router status entries
|- RouterStatusEntryV2 - Entry for a network status v2 document
|- RouterStatusEntryV3 - Entry for a network status v3 document
+- RouterStatusEntryMicroV3 - Entry for a microdescriptor flavored v3 document
"""
import base64
import binascii
import stem.exit_policy
import stem.prereq
import stem.util.str_tools
from stem.descriptor import (
KEYWORD_LINE,
Descriptor,
_descriptor_content,
_value,
_values,
_descriptor_components,
_parse_protocol_line,
_read_until_keywords,
_random_nickname,
_random_ipv4_address,
_random_date,
)
_parse_pr_line = _parse_protocol_line('pr', 'protocols')
def _parse_file(document_file, validate, entry_class, entry_keyword = 'r', start_position = None, end_position = None, section_end_keywords = (), extra_args = ()):
"""
Reads a range of the document_file containing some number of entry_class
instances. We deliminate the entry_class entries by the keyword on their
first line (entry_keyword). When finished the document is left at the
end_position.
Either an end_position or section_end_keywords must be provided.
:param file document_file: file with network status document content
:param bool validate: checks the validity of the document's contents if
**True**, skips these checks otherwise
:param class entry_class: class to construct instance for
:param str entry_keyword: first keyword for the entry instances
:param int start_position: start of the section, default is the current position
:param int end_position: end of the section
:param tuple section_end_keywords: keyword(s) that deliminate the end of the
section if no end_position was provided
:param tuple extra_args: extra arguments for the entry_class (after the
content and validate flag)
:returns: iterator over entry_class instances
:raises:
* **ValueError** if the contents is malformed and validate is **True**
* **IOError** if the file can't be read
"""
if start_position:
document_file.seek(start_position)
else:
start_position = document_file.tell()
# check if we're starting at the end of the section (ie, there's no entries to read)
if section_end_keywords:
first_keyword = None
line_match = KEYWORD_LINE.match(stem.util.str_tools._to_unicode(document_file.readline()))
if line_match:
first_keyword = line_match.groups()[0]
document_file.seek(start_position)
if first_keyword in section_end_keywords:
return
while end_position is None or document_file.tell() < end_position:
desc_lines, ending_keyword = _read_until_keywords(
(entry_keyword,) + section_end_keywords,
document_file,
ignore_first = True,
end_position = end_position,
include_ending_keyword = True
)
desc_content = bytes.join(b'', desc_lines)
if desc_content:
yield entry_class(desc_content, validate, *extra_args)
# check if we stopped at the end of the section
if ending_keyword in section_end_keywords:
break
else:
break
def _parse_r_line(descriptor, entries):
# Parses a RouterStatusEntry's 'r' line. They're very nearly identical for
# all current entry types (v2, v3, and microdescriptor v3) with one little
# wrinkle: only the microdescriptor flavor excludes a 'digest' field.
#
# For v2 and v3 router status entries:
# "r" nickname identity digest publication IP ORPort DirPort
# example: r mauer BD7xbfsCFku3+tgybEZsg8Yjhvw itcuKQ6PuPLJ7m/Oi928WjO2j8g 2012-06-22 13:19:32 80.101.105.103 9001 0
#
# For v3 microdescriptor router status entries:
# "r" nickname identity publication IP ORPort DirPort
# example: r Konata ARIJF2zbqirB9IwsW0mQznccWww 2012-09-24 13:40:40 69.64.48.168 9001 9030
value = _value('r', entries)
include_digest = not isinstance(descriptor, RouterStatusEntryMicroV3)
r_comp = value.split(' ')
# inject a None for the digest to normalize the field positioning
if not include_digest:
r_comp.insert(2, None)
if len(r_comp) < 8:
expected_field_count = 'eight' if include_digest else 'seven'
raise ValueError("%s 'r' line must have %s values: r %s" % (descriptor._name(), expected_field_count, value))
if not stem.util.tor_tools.is_valid_nickname(r_comp[0]):
raise ValueError("%s nickname isn't valid: %s" % (descriptor._name(), r_comp[0]))
elif not stem.util.connection.is_valid_ipv4_address(r_comp[5]):
raise ValueError("%s address isn't a valid IPv4 address: %s" % (descriptor._name(), r_comp[5]))
elif not stem.util.connection.is_valid_port(r_comp[6]):
raise ValueError('%s ORPort is invalid: %s' % (descriptor._name(), r_comp[6]))
elif not stem.util.connection.is_valid_port(r_comp[7], allow_zero = True):
raise ValueError('%s DirPort is invalid: %s' % (descriptor._name(), r_comp[7]))
descriptor.nickname = r_comp[0]
descriptor.fingerprint = _base64_to_hex(r_comp[1])
if include_digest:
descriptor.digest = _base64_to_hex(r_comp[2])
descriptor.address = r_comp[5]
descriptor.or_port = int(r_comp[6])
descriptor.dir_port = None if r_comp[7] == '0' else int(r_comp[7])
try:
published = '%s %s' % (r_comp[3], r_comp[4])
descriptor.published = stem.util.str_tools._parse_timestamp(published)
except ValueError:
raise ValueError("Publication time time wasn't parsable: r %s" % value)
def _parse_a_line(descriptor, entries):
# "a" SP address ":" portlist
# example: a [2001:888:2133:0:82:94:251:204]:9001
or_addresses = []
for value in _values('a', entries):
if ':' not in value:
raise ValueError("%s 'a' line must be of the form '[address]:[ports]': a %s" % (descriptor._name(), value))
address, port = value.rsplit(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address) and not stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True):
raise ValueError("%s 'a' line must start with an IPv6 address: a %s" % (descriptor._name(), value))
if stem.util.connection.is_valid_port(port):
or_addresses.append((address.lstrip('[').rstrip(']'), int(port), stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True)))
else:
raise ValueError("%s 'a' line had an invalid port (%s): a %s" % (descriptor._name(), port, value))
descriptor.or_addresses = or_addresses
def _parse_s_line(descriptor, entries):
# "s" Flags
# example: s Named Running Stable Valid
value = _value('s', entries)
flags = [] if value == '' else value.split(' ')
descriptor.flags = flags
for flag in flags:
if flags.count(flag) > 1:
raise ValueError('%s had duplicate flags: s %s' % (descriptor._name(), value))
elif flag == '':
raise ValueError("%s had extra whitespace on its 's' line: s %s" % (descriptor._name(), value))
def _parse_v_line(descriptor, entries):
# "v" version
# example: v Tor 0.2.2.35
#
# The spec says that if this starts with "Tor " then what follows is a
# tor version. If not then it has "upgraded to a more sophisticated
# protocol versioning system".
value = _value('v', entries)
descriptor.version_line = value
if value.startswith('Tor '):
try:
descriptor.version = stem.version._get_version(value[4:])
except ValueError as exc:
raise ValueError('%s has a malformed tor version (%s): v %s' % (descriptor._name(), exc, value))
def _parse_w_line(descriptor, entries):
# "w" "Bandwidth=" INT ["Measured=" INT] ["Unmeasured=1"]
# example: w Bandwidth=7980
value = _value('w', entries)
w_comp = value.split(' ')
if len(w_comp) < 1:
raise ValueError("%s 'w' line is blank: w %s" % (descriptor._name(), value))
elif not w_comp[0].startswith('Bandwidth='):
raise ValueError("%s 'w' line needs to start with a 'Bandwidth=' entry: w %s" % (descriptor._name(), value))
bandwidth = None
measured = None
is_unmeasured = False
unrecognized_bandwidth_entries = []
for w_entry in w_comp:
if '=' in w_entry:
w_key, w_value = w_entry.split('=', 1)
else:
w_key, w_value = w_entry, None
if w_key == 'Bandwidth':
if not (w_value and w_value.isdigit()):
raise ValueError("%s 'Bandwidth=' entry needs to have a numeric value: w %s" % (descriptor._name(), value))
bandwidth = int(w_value)
elif w_key == 'Measured':
if not (w_value and w_value.isdigit()):
raise ValueError("%s 'Measured=' entry needs to have a numeric value: w %s" % (descriptor._name(), value))
measured = int(w_value)
elif w_key == 'Unmeasured':
if w_value != '1':
raise ValueError("%s 'Unmeasured=' should only have the value of '1': w %s" % (descriptor._name(), value))
is_unmeasured = True
else:
unrecognized_bandwidth_entries.append(w_entry)
descriptor.bandwidth = bandwidth
descriptor.measured = measured
descriptor.is_unmeasured = is_unmeasured
descriptor.unrecognized_bandwidth_entries = unrecognized_bandwidth_entries
def _parse_p_line(descriptor, entries):
# "p" ("accept" / "reject") PortList
#
# examples:
#
# p accept 80,110,143,443,993,995,6660-6669,6697,7000-7001
# p reject 1-65535
value = _value('p', entries)
try:
descriptor.exit_policy = stem.exit_policy.MicroExitPolicy(value)
except ValueError as exc:
raise ValueError('%s exit policy is malformed (%s): p %s' % (descriptor._name(), exc, value))
def _parse_id_line(descriptor, entries):
# "id" "ed25519" ed25519-identity
#
# examples:
#
# id ed25519 none
# id ed25519 8RH34kO07Pp+XYwzdoATVyCibIvmbslUjRkAm7J4IA8
value = _value('id', entries)
if value:
if descriptor.document and not descriptor.document.is_vote:
raise ValueError("%s 'id' line should only appear in votes: id %s" % (descriptor._name(), value))
value_comp = value.split()
if len(value_comp) >= 2:
descriptor.identifier_type = value_comp[0]
descriptor.identifier = value_comp[1]
else:
raise ValueError("'id' lines should contain both the key type and digest: id %s" % value)
def _parse_m_line(descriptor, entries):
# "m" methods 1*(algorithm "=" digest)
# example: m 8,9,10,11,12 sha256=g1vx9si329muxV3tquWIXXySNOIwRGMeAESKs/v4DWs
all_hashes = []
for value in _values('m', entries):
m_comp = value.split(' ')
if not (descriptor.document and descriptor.document.is_vote):
vote_status = 'vote' if descriptor.document else ''
raise ValueError("%s 'm' line should only appear in votes (appeared in a %s): m %s" % (descriptor._name(), vote_status, value))
elif len(m_comp) < 1:
raise ValueError("%s 'm' line needs to start with a series of methods: m %s" % (descriptor._name(), value))
try:
methods = [int(entry) for entry in m_comp[0].split(',')]
except ValueError:
raise ValueError('%s microdescriptor methods should be a series of comma separated integers: m %s' % (descriptor._name(), value))
hashes = {}
for entry in m_comp[1:]:
if '=' not in entry:
raise ValueError("%s can only have a series of 'algorithm=digest' mappings after the methods: m %s" % (descriptor._name(), value))
hash_name, digest = entry.split('=', 1)
hashes[hash_name] = digest
all_hashes.append((methods, hashes))
descriptor.microdescriptor_hashes = all_hashes
def _parse_microdescriptor_m_line(descriptor, entries):
# "m" digest
# example: m aiUklwBrua82obG5AsTX+iEpkjQA2+AQHxZ7GwMfY70
descriptor.digest = _base64_to_hex(_value('m', entries), check_if_fingerprint = False)
def _base64_to_hex(identity, check_if_fingerprint = True):
"""
Decodes a base64 value to hex. For example...
::
>>> _base64_to_hex('p1aag7VwarGxqctS7/fS0y5FU+s')
'A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB'
:param str identity: encoded fingerprint from the consensus
:param bool check_if_fingerprint: asserts that the result is a fingerprint if **True**
:returns: **str** with the uppercase hex encoding of the relay's fingerprint
:raises: **ValueError** if the result isn't a valid fingerprint
"""
# trailing equal signs were stripped from the identity
missing_padding = len(identity) % 4
identity += '=' * missing_padding
try:
identity_decoded = base64.b64decode(stem.util.str_tools._to_bytes(identity))
except (TypeError, binascii.Error):
raise ValueError("Unable to decode identity string '%s'" % identity)
fingerprint = binascii.hexlify(identity_decoded).upper()
if stem.prereq.is_python_3():
fingerprint = stem.util.str_tools._to_unicode(fingerprint)
if check_if_fingerprint:
if not stem.util.tor_tools.is_valid_fingerprint(fingerprint):
raise ValueError("Decoded '%s' to be '%s', which isn't a valid fingerprint" % (identity, fingerprint))
return fingerprint
class RouterStatusEntry(Descriptor):
"""
Information about an individual router stored within a network status
document. This is the common parent for concrete status entry types.
:var stem.descriptor.networkstatus.NetworkStatusDocument document: **\*** document that this descriptor came from
:var str nickname: **\*** router's nickname
:var str fingerprint: **\*** router's fingerprint
:var datetime published: **\*** router's publication
:var str address: **\*** router's IP address
:var int or_port: **\*** router's ORPort
:var int dir_port: **\*** router's DirPort
:var list flags: **\*** list of :data:`~stem.Flag` associated with the relay
:var stem.version.Version version: parsed version of tor, this is **None** if
the relay's using a new versioning scheme
:var str version_line: versioning information reported by the relay
"""
ATTRIBUTES = {
'nickname': (None, _parse_r_line),
'fingerprint': (None, _parse_r_line),
'published': (None, _parse_r_line),
'address': (None, _parse_r_line),
'or_port': (None, _parse_r_line),
'dir_port': (None, _parse_r_line),
'flags': (None, _parse_s_line),
'version_line': (None, _parse_v_line),
'version': (None, _parse_v_line),
}
PARSER_FOR_LINE = {
'r': _parse_r_line,
's': _parse_s_line,
'v': _parse_v_line,
}
def __init__(self, content, validate = False, document = None):
"""
Parse a router descriptor in a network status document.
:param str content: router descriptor content to be parsed
:param NetworkStatusDocument document: document this descriptor came from
:param bool validate: checks the validity of the content if **True**, skips
these checks otherwise
:raises: **ValueError** if the descriptor data is invalid
"""
super(RouterStatusEntry, self).__init__(content, lazy_load = not validate)
self.document = document
entries = _descriptor_components(content, validate)
if validate:
for keyword in self._required_fields():
if keyword not in entries:
raise ValueError("%s must have a '%s' line:\n%s" % (self._name(True), keyword, str(self)))
for keyword in self._single_fields():
if keyword in entries and len(entries[keyword]) > 1:
raise ValueError("%s can only have a single '%s' line, got %i:\n%s" % (self._name(True), keyword, len(entries[keyword]), str(self)))
if 'r' != list(entries.keys())[0]:
raise ValueError("%s are expected to start with a 'r' line:\n%s" % (self._name(True), str(self)))
self._parse(entries, validate)
else:
self._entries = entries
def _name(self, is_plural = False):
"""
Name for this descriptor type.
"""
return 'Router status entries' if is_plural else 'Router status entry'
def _required_fields(self):
"""
Provides lines that must appear in the descriptor.
"""
return ()
def _single_fields(self):
"""
Provides lines that can only appear in the descriptor once.
"""
return ()
def _compare(self, other, method):
if not isinstance(other, RouterStatusEntry):
return False
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
class RouterStatusEntryV2(RouterStatusEntry):
"""
Information about an individual router stored within a version 2 network
status document.
:var str digest: **\*** router's upper-case hex digest
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{
'digest': (None, _parse_r_line),
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('r', '%s p1aag7VwarGxqctS7/fS0y5FU+s oQZFLYe9e4A7bOkWKR7TaNxb0JE %s %s 9001 0' % (_random_nickname(), _random_date(), _random_ipv4_address())),
))
def _name(self, is_plural = False):
return 'Router status entries (v2)' if is_plural else 'Router status entry (v2)'
def _required_fields(self):
return ('r')
def _single_fields(self):
return ('r', 's', 'v')
def _compare(self, other, method):
if not isinstance(other, RouterStatusEntryV2):
return False
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
class RouterStatusEntryV3(RouterStatusEntry):
"""
Information about an individual router stored within a version 3 network
status document.
:var list or_addresses: **\*** relay's OR addresses, this is a tuple listing
of the form (address (**str**), port (**int**), is_ipv6 (**bool**))
:var str identifier_type: identity digest key type
:var str identifier: base64 encoded identity digest
:var str digest: **\*** router's upper-case hex digest
:var int bandwidth: bandwidth measured to be available by the relay, this is a
unit-less heuristic generated by the Bandwidth authoritites to weight relay
selection
:var int measured: *bandwidth* vote provided by a bandwidth authority
:var bool is_unmeasured: *bandwidth* measurement isn't based on three or more
measurements
:var list unrecognized_bandwidth_entries: **\*** bandwidth weighting
information that isn't yet recognized
:var stem.exit_policy.MicroExitPolicy exit_policy: router's exit policy
:var dict protocols: mapping of protocols to their supported versions
:var list microdescriptor_hashes: **\*** tuples of two values, the list of
consensus methods for generating a set of digests and the 'algorithm =>
digest' mappings
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
.. versionchanged:: 1.5.0
Added the identifier and identifier_type attributes.
.. versionchanged:: 1.6.0
Added the protocols attribute.
"""
ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{
'digest': (None, _parse_r_line),
'or_addresses': ([], _parse_a_line),
'identifier_type': (None, _parse_id_line),
'identifier': (None, _parse_id_line),
'bandwidth': (None, _parse_w_line),
'measured': (None, _parse_w_line),
'is_unmeasured': (False, _parse_w_line),
'unrecognized_bandwidth_entries': ([], _parse_w_line),
'exit_policy': (None, _parse_p_line),
'protocols': ({}, _parse_pr_line),
'microdescriptor_hashes': ([], _parse_m_line),
})
PARSER_FOR_LINE = dict(RouterStatusEntry.PARSER_FOR_LINE, **{
'a': _parse_a_line,
'w': _parse_w_line,
'p': _parse_p_line,
'pr': _parse_pr_line,
'id': _parse_id_line,
'm': _parse_m_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('r', '%s p1aag7VwarGxqctS7/fS0y5FU+s oQZFLYe9e4A7bOkWKR7TaNxb0JE %s %s 9001 0' % (_random_nickname(), _random_date(), _random_ipv4_address())),
('s', 'Fast Named Running Stable Valid'),
))
def _name(self, is_plural = False):
return 'Router status entries (v3)' if is_plural else 'Router status entry (v3)'
def _required_fields(self):
return ('r', 's')
def _single_fields(self):
return ('r', 's', 'v', 'w', 'p', 'pr')
def _compare(self, other, method):
if not isinstance(other, RouterStatusEntryV3):
return False
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
class RouterStatusEntryMicroV3(RouterStatusEntry):
"""
Information about an individual router stored within a microdescriptor
flavored network status document.
:var list or_addresses: **\*** relay's OR addresses, this is a tuple listing
of the form (address (**str**), port (**int**), is_ipv6 (**bool**))
:var int bandwidth: bandwidth claimed by the relay (in kb/s)
:var int measured: bandwidth measured to be available by the relay
:var bool is_unmeasured: bandwidth measurement isn't based on three or more
measurements
:var list unrecognized_bandwidth_entries: **\*** bandwidth weighting
information that isn't yet recognized
:var dict protocols: mapping of protocols to their supported versions
:var str digest: **\*** router's hex encoded digest of our corresponding microdescriptor
.. versionchanged:: 1.6.0
Added the protocols attribute.
.. versionchanged:: 1.7.0
Added the or_addresses attribute.
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{
'or_addresses': ([], _parse_a_line),
'bandwidth': (None, _parse_w_line),
'measured': (None, _parse_w_line),
'is_unmeasured': (False, _parse_w_line),
'unrecognized_bandwidth_entries': ([], _parse_w_line),
'protocols': ({}, _parse_pr_line),
'digest': (None, _parse_microdescriptor_m_line),
})
PARSER_FOR_LINE = dict(RouterStatusEntry.PARSER_FOR_LINE, **{
'a': _parse_a_line,
'w': _parse_w_line,
'm': _parse_microdescriptor_m_line,
'pr': _parse_pr_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('r', '%s ARIJF2zbqirB9IwsW0mQznccWww %s %s 9001 9030' % (_random_nickname(), _random_date(), _random_ipv4_address())),
('m', 'aiUklwBrua82obG5AsTX+iEpkjQA2+AQHxZ7GwMfY70'),
('s', 'Fast Guard HSDir Named Running Stable V2Dir Valid'),
))
def _name(self, is_plural = False):
return 'Router status entries (micro v3)' if is_plural else 'Router status entry (micro v3)'
def _required_fields(self):
return ('r', 's', 'm')
def _single_fields(self):
return ('r', 's', 'v', 'w', 'm', 'pr')
def _compare(self, other, method):
if not isinstance(other, RouterStatusEntryMicroV3):
return False
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
stem-1.7.1/stem/descriptor/tordnsel.py 0000664 0001750 0001750 00000007546 13411002341 020545 0 ustar atagar atagar 0000000 0000000 # Copyright 2013-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for `TorDNSEL `_
exit list files.
::
TorDNSEL - Exit list provided by TorDNSEL
"""
import stem.util.connection
import stem.util.str_tools
import stem.util.tor_tools
from stem.descriptor import (
Descriptor,
_read_until_keywords,
_descriptor_components,
)
def _parse_file(tordnsel_file, validate = False, **kwargs):
"""
Iterates over a tordnsel file.
:returns: iterator for :class:`~stem.descriptor.tordnsel.TorDNSEL`
instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is **True**
* **IOError** if the file can't be read
"""
# skip content prior to the first ExitNode
_read_until_keywords('ExitNode', tordnsel_file, skip = True)
while True:
contents = _read_until_keywords('ExitAddress', tordnsel_file)
contents += _read_until_keywords('ExitNode', tordnsel_file)
if contents:
yield TorDNSEL(bytes.join(b'', contents), validate, **kwargs)
else:
break # done parsing file
class TorDNSEL(Descriptor):
"""
TorDNSEL descriptor (`exitlist specification
`_)
:var str fingerprint: **\*** authority's fingerprint
:var datetime published: **\*** time in UTC when this descriptor was made
:var datetime last_status: **\*** time in UTC when the relay was seen in a v2 network status
:var list exit_addresses: **\*** list of (str address, datetime date) tuples consisting of the found IPv4 exit address and the time
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
def __init__(self, raw_contents, validate):
super(TorDNSEL, self).__init__(raw_contents)
raw_contents = stem.util.str_tools._to_unicode(raw_contents)
entries = _descriptor_components(raw_contents, validate)
self.fingerprint = None
self.published = None
self.last_status = None
self.exit_addresses = []
self._parse(entries, validate)
def _parse(self, entries, validate):
for keyword, values in list(entries.items()):
value, block_type, block_content = values[0]
if validate and block_content:
raise ValueError('Unexpected block content: %s' % block_content)
if keyword == 'ExitNode':
if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % value)
self.fingerprint = value
elif keyword == 'Published':
try:
self.published = stem.util.str_tools._parse_timestamp(value)
except ValueError:
if validate:
raise ValueError("Published time wasn't parsable: %s" % value)
elif keyword == 'LastStatus':
try:
self.last_status = stem.util.str_tools._parse_timestamp(value)
except ValueError:
if validate:
raise ValueError("LastStatus time wasn't parsable: %s" % value)
elif keyword == 'ExitAddress':
for value, block_type, block_content in values:
address, date = value.split(' ', 1)
if validate:
if not stem.util.connection.is_valid_ipv4_address(address):
raise ValueError("ExitAddress isn't a valid IPv4 address: %s" % address)
elif block_content:
raise ValueError('Unexpected block content: %s' % block_content)
try:
date = stem.util.str_tools._parse_timestamp(date)
self.exit_addresses.append((address, date))
except ValueError:
if validate:
raise ValueError("ExitAddress found time wasn't parsable: %s" % value)
elif validate:
raise ValueError('Unrecognized keyword: %s' % keyword)
stem-1.7.1/stem/descriptor/__init__.py 0000664 0001750 0001750 00000131551 13411002341 020444 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Package for parsing and processing descriptor data.
**Module Overview:**
::
parse_file - Parses the descriptors in a file.
create - Creates a new custom descriptor.
create_signing_key - Cretes a signing key that can be used for creating descriptors.
Descriptor - Common parent for all descriptor file types.
|- get_path - location of the descriptor on disk if it came from a file
|- get_archive_path - location of the descriptor within the archive it came from
|- get_bytes - similar to str(), but provides our original bytes content
|- get_unrecognized_lines - unparsed descriptor content
+- __str__ - string that the descriptor was made from
.. data:: DocumentHandler (enum)
Ways in which we can parse a
:class:`~stem.descriptor.networkstatus.NetworkStatusDocument`.
Both **ENTRIES** and **BARE_DOCUMENT** have a 'thin' document, which doesn't
have a populated **routers** attribute. This allows for lower memory usage
and upfront runtime. However, if read time and memory aren't a concern then
**DOCUMENT** can provide you with a fully populated document.
Handlers don't change the fact that most methods that provide
descriptors return an iterator. In the case of **DOCUMENT** and
**BARE_DOCUMENT** that iterator would have just a single item -
the document itself.
Simple way to handle this is to call **next()** to get the iterator's one and
only value...
::
import stem.descriptor.remote
from stem.descriptor import DocumentHandler
consensus = next(stem.descriptor.remote.get_consensus(
document_handler = DocumentHandler.BARE_DOCUMENT,
)
=================== ===========
DocumentHandler Description
=================== ===========
**ENTRIES** Iterates over the contained :class:`~stem.descriptor.router_status_entry.RouterStatusEntry`. Each has a reference to the bare document it came from (through its **document** attribute).
**DOCUMENT** :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` with the :class:`~stem.descriptor.router_status_entry.RouterStatusEntry` it contains (through its **routers** attribute).
**BARE_DOCUMENT** :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` **without** a reference to its contents (the :class:`~stem.descriptor.router_status_entry.RouterStatusEntry` are unread).
=================== ===========
"""
import base64
import codecs
import collections
import copy
import hashlib
import os
import random
import re
import string
import tarfile
import stem.prereq
import stem.util
import stem.util.enum
import stem.util.str_tools
import stem.util.system
try:
# added in python 2.7
from collections import OrderedDict
except ImportError:
from stem.util.ordereddict import OrderedDict
__all__ = [
'export',
'reader',
'remote',
'extrainfo_descriptor',
'server_descriptor',
'microdescriptor',
'networkstatus',
'router_status_entry',
'tordnsel',
'parse_file',
'Descriptor',
]
UNSEEKABLE_MSG = """\
File object isn't seekable. Try wrapping it with a BytesIO instead...
content = my_file.read()
parsed_descriptors = stem.descriptor.parse_file(io.BytesIO(content))
"""
KEYWORD_CHAR = 'a-zA-Z0-9-'
WHITESPACE = ' \t'
KEYWORD_LINE = re.compile('^([%s]+)(?:[%s]+(.*))?$' % (KEYWORD_CHAR, WHITESPACE))
SPECIFIC_KEYWORD_LINE = '^(%%s)(?:[%s]+(.*))?$' % WHITESPACE
PGP_BLOCK_START = re.compile('^-----BEGIN ([%s%s]+)-----$' % (KEYWORD_CHAR, WHITESPACE))
PGP_BLOCK_END = '-----END %s-----'
EMPTY_COLLECTION = ([], {}, set())
DIGEST_TYPE_INFO = b'\x00\x01'
DIGEST_PADDING = b'\xFF'
DIGEST_SEPARATOR = b'\x00'
CRYPTO_BLOB = """
MIGJAoGBAJv5IIWQ+WDWYUdyA/0L8qbIkEVH/cwryZWoIaPAzINfrw1WfNZGtBmg
skFtXhOHHqTRN4GPPrZsAIUOQGzQtGb66IQgT4tO/pj+P6QmSCCdTfhvGfgTCsC+
WPi4Fl2qryzTb3QO5r5x7T8OsG2IBUET1bLQzmtbC560SYR49IvVAgMBAAE=
"""
DocumentHandler = stem.util.enum.UppercaseEnum(
'ENTRIES',
'DOCUMENT',
'BARE_DOCUMENT',
)
class SigningKey(collections.namedtuple('SigningKey', ['private', 'public', 'public_digest'])):
"""
Key used by relays to sign their server and extrainfo descriptors.
.. versionadded:: 1.6.0
:var cryptography.hazmat.backends.openssl.rsa._RSAPrivateKey private: private key
:var cryptography.hazmat.backends.openssl.rsa._RSAPublicKey public: public key
:var bytes public_digest: block that can be used for the a server descrptor's 'signing-key' field
"""
def parse_file(descriptor_file, descriptor_type = None, validate = False, document_handler = DocumentHandler.ENTRIES, normalize_newlines = None, **kwargs):
"""
Simple function to read the descriptor contents from a file, providing an
iterator for its :class:`~stem.descriptor.__init__.Descriptor` contents.
If you don't provide a **descriptor_type** argument then this automatically
tries to determine the descriptor type based on the following...
* The @type annotation on the first line. These are generally only found in
the `CollecTor archives `_.
* The filename if it matches something from tor's data directory. For
instance, tor's 'cached-descriptors' contains server descriptors.
This is a handy function for simple usage, but if you're reading multiple
descriptor files you might want to consider the
:class:`~stem.descriptor.reader.DescriptorReader`.
Descriptor types include the following, including further minor versions (ie.
if we support 1.1 then we also support everything from 1.0 and most things
from 1.2, but not 2.0)...
========================================= =====
Descriptor Type Class
========================================= =====
server-descriptor 1.0 :class:`~stem.descriptor.server_descriptor.RelayDescriptor`
extra-info 1.0 :class:`~stem.descriptor.extrainfo_descriptor.RelayExtraInfoDescriptor`
microdescriptor 1.0 :class:`~stem.descriptor.microdescriptor.Microdescriptor`
directory 1.0 **unsupported**
network-status-2 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV2`)
dir-key-certificate-3 1.0 :class:`~stem.descriptor.networkstatus.KeyCertificate`
network-status-consensus-3 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`)
network-status-vote-3 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`)
network-status-microdesc-consensus-3 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`)
bridge-network-status 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` (with a :class:`~stem.descriptor.networkstatus.BridgeNetworkStatusDocument`)
bridge-server-descriptor 1.0 :class:`~stem.descriptor.server_descriptor.BridgeDescriptor`
bridge-extra-info 1.1 or 1.2 :class:`~stem.descriptor.extrainfo_descriptor.BridgeExtraInfoDescriptor`
torperf 1.0 **unsupported**
bridge-pool-assignment 1.0 **unsupported**
tordnsel 1.0 :class:`~stem.descriptor.tordnsel.TorDNSEL`
hidden-service-descriptor 1.0 :class:`~stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor`
========================================= =====
If you're using **python 3** then beware that the open() function defaults to
using text mode. **Binary mode** is strongly suggested because it's both
faster (by my testing by about 33x) and doesn't do universal newline
translation which can make us misparse the document.
::
my_descriptor_file = open(descriptor_path, 'rb')
:param str,file,tarfile descriptor_file: path or opened file with the descriptor contents
:param str descriptor_type: `descriptor type `_, this is guessed if not provided
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param stem.descriptor.__init__.DocumentHandler document_handler: method in
which to parse the :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
:param bool normalize_newlines: converts windows newlines (CRLF), this is the
default when reading data directories on windows
:param dict kwargs: additional arguments for the descriptor constructor
:returns: iterator for :class:`~stem.descriptor.__init__.Descriptor` instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is True
* **TypeError** if we can't match the contents of the file to a descriptor type
* **IOError** if unable to read from the descriptor_file
"""
# Delegate to a helper if this is a path or tarfile.
handler = None
if stem.util._is_str(descriptor_file):
if stem.util.system.is_tarfile(descriptor_file):
handler = _parse_file_for_tar_path
else:
handler = _parse_file_for_path
elif isinstance(descriptor_file, tarfile.TarFile):
handler = _parse_file_for_tarfile
if handler:
for desc in handler(descriptor_file, descriptor_type, validate, document_handler, **kwargs):
yield desc
return
# Not all files are seekable. If unseekable then advising the user.
#
# Python 3.x adds an io.seekable() method, but not an option with python 2.x
# so using an experimental call to tell() to determine this.
try:
descriptor_file.tell()
except IOError:
raise IOError(UNSEEKABLE_MSG)
# The tor descriptor specifications do not provide a reliable method for
# identifying a descriptor file's type and version so we need to guess
# based on its filename. Metrics descriptors, however, can be identified
# by an annotation on their first line...
# https://trac.torproject.org/5651
initial_position = descriptor_file.tell()
first_line = stem.util.str_tools._to_unicode(descriptor_file.readline().strip())
metrics_header_match = re.match('^@type (\S+) (\d+).(\d+)$', first_line)
if not metrics_header_match:
descriptor_file.seek(initial_position)
descriptor_path = getattr(descriptor_file, 'name', None)
filename = '' if descriptor_path is None else os.path.basename(descriptor_file.name)
def parse(descriptor_file):
if normalize_newlines:
descriptor_file = NewlineNormalizer(descriptor_file)
if descriptor_type is not None:
descriptor_type_match = re.match('^(\S+) (\d+).(\d+)$', descriptor_type)
if descriptor_type_match:
desc_type, major_version, minor_version = descriptor_type_match.groups()
return _parse_metrics_file(desc_type, int(major_version), int(minor_version), descriptor_file, validate, document_handler, **kwargs)
else:
raise ValueError("The descriptor_type must be of the form ' .'")
elif metrics_header_match:
# Metrics descriptor handling
desc_type, major_version, minor_version = metrics_header_match.groups()
return _parse_metrics_file(desc_type, int(major_version), int(minor_version), descriptor_file, validate, document_handler, **kwargs)
else:
# Cached descriptor handling. These contain multiple descriptors per file.
if normalize_newlines is None and stem.util.system.is_windows():
descriptor_file = NewlineNormalizer(descriptor_file)
if filename == 'cached-descriptors' or filename == 'cached-descriptors.new':
return stem.descriptor.server_descriptor._parse_file(descriptor_file, validate = validate, **kwargs)
elif filename == 'cached-extrainfo' or filename == 'cached-extrainfo.new':
return stem.descriptor.extrainfo_descriptor._parse_file(descriptor_file, validate = validate, **kwargs)
elif filename == 'cached-microdescs' or filename == 'cached-microdescs.new':
return stem.descriptor.microdescriptor._parse_file(descriptor_file, validate = validate, **kwargs)
elif filename == 'cached-consensus':
return stem.descriptor.networkstatus._parse_file(descriptor_file, validate = validate, document_handler = document_handler, **kwargs)
elif filename == 'cached-microdesc-consensus':
return stem.descriptor.networkstatus._parse_file(descriptor_file, is_microdescriptor = True, validate = validate, document_handler = document_handler, **kwargs)
else:
raise TypeError("Unable to determine the descriptor's type. filename: '%s', first line: '%s'" % (filename, first_line))
for desc in parse(descriptor_file):
if descriptor_path is not None:
desc._set_path(os.path.abspath(descriptor_path))
yield desc
def _parse_file_for_path(descriptor_file, *args, **kwargs):
with open(descriptor_file, 'rb') as desc_file:
for desc in parse_file(desc_file, *args, **kwargs):
yield desc
def _parse_file_for_tar_path(descriptor_file, *args, **kwargs):
# TODO: use 'with' for tarfile after dropping python 2.6 support
tar_file = tarfile.open(descriptor_file)
try:
for desc in parse_file(tar_file, *args, **kwargs):
desc._set_path(os.path.abspath(descriptor_file))
yield desc
finally:
if tar_file:
tar_file.close()
def _parse_file_for_tarfile(descriptor_file, *args, **kwargs):
for tar_entry in descriptor_file:
if tar_entry.isfile():
entry = descriptor_file.extractfile(tar_entry)
if tar_entry.size == 0:
continue
try:
for desc in parse_file(entry, *args, **kwargs):
desc._set_archive_path(entry.name)
yield desc
finally:
entry.close()
def _parse_metrics_file(descriptor_type, major_version, minor_version, descriptor_file, validate, document_handler, **kwargs):
# Parses descriptor files from metrics, yielding individual descriptors. This
# throws a TypeError if the descriptor_type or version isn't recognized.
if descriptor_type == 'server-descriptor' and major_version == 1:
for desc in stem.descriptor.server_descriptor._parse_file(descriptor_file, is_bridge = False, validate = validate, **kwargs):
yield desc
elif descriptor_type == 'bridge-server-descriptor' and major_version == 1:
for desc in stem.descriptor.server_descriptor._parse_file(descriptor_file, is_bridge = True, validate = validate, **kwargs):
yield desc
elif descriptor_type == 'extra-info' and major_version == 1:
for desc in stem.descriptor.extrainfo_descriptor._parse_file(descriptor_file, is_bridge = False, validate = validate, **kwargs):
yield desc
elif descriptor_type == 'microdescriptor' and major_version == 1:
for desc in stem.descriptor.microdescriptor._parse_file(descriptor_file, validate = validate, **kwargs):
yield desc
elif descriptor_type == 'bridge-extra-info' and major_version == 1:
# version 1.1 introduced a 'transport' field...
# https://trac.torproject.org/6257
for desc in stem.descriptor.extrainfo_descriptor._parse_file(descriptor_file, is_bridge = True, validate = validate, **kwargs):
yield desc
elif descriptor_type == 'network-status-2' and major_version == 1:
document_type = stem.descriptor.networkstatus.NetworkStatusDocumentV2
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs):
yield desc
elif descriptor_type == 'dir-key-certificate-3' and major_version == 1:
for desc in stem.descriptor.networkstatus._parse_file_key_certs(descriptor_file, validate = validate, **kwargs):
yield desc
elif descriptor_type in ('network-status-consensus-3', 'network-status-vote-3') and major_version == 1:
document_type = stem.descriptor.networkstatus.NetworkStatusDocumentV3
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs):
yield desc
elif descriptor_type == 'network-status-microdesc-consensus-3' and major_version == 1:
document_type = stem.descriptor.networkstatus.NetworkStatusDocumentV3
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, is_microdescriptor = True, validate = validate, document_handler = document_handler, **kwargs):
yield desc
elif descriptor_type == 'bridge-network-status' and major_version == 1:
document_type = stem.descriptor.networkstatus.BridgeNetworkStatusDocument
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs):
yield desc
elif descriptor_type == 'tordnsel' and major_version == 1:
document_type = stem.descriptor.tordnsel.TorDNSEL
for desc in stem.descriptor.tordnsel._parse_file(descriptor_file, validate = validate, **kwargs):
yield desc
elif descriptor_type == 'hidden-service-descriptor' and major_version == 1:
document_type = stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor
for desc in stem.descriptor.hidden_service_descriptor._parse_file(descriptor_file, validate = validate, **kwargs):
yield desc
else:
raise TypeError("Unrecognized metrics descriptor format. type: '%s', version: '%i.%i'" % (descriptor_type, major_version, minor_version))
def _descriptor_content(attr = None, exclude = (), header_template = (), footer_template = ()):
"""
Constructs a minimal descriptor with the given attributes. The content we
provide back is of the form...
* header_template (with matching attr filled in)
* unused attr entries
* footer_template (with matching attr filled in)
So for instance...
::
_descriptor_content(
attr = {'nickname': 'caerSidi', 'contact': 'atagar'},
header_template = (
('nickname', 'foobar'),
('fingerprint', '12345'),
),
)
... would result in...
::
nickname caerSidi
fingerprint 12345
contact atagar
:param dict attr: keyword/value mappings to be included in the descriptor
:param list exclude: mandatory keywords to exclude from the descriptor
:param tuple header_template: key/value pairs for mandatory fields before unrecognized content
:param tuple footer_template: key/value pairs for mandatory fields after unrecognized content
:returns: bytes with the requested descriptor content
"""
header_content, footer_content = [], []
attr = {} if attr is None else OrderedDict(attr) # shallow copy since we're destructive
for content, template in ((header_content, header_template),
(footer_content, footer_template)):
for keyword, value in template:
if keyword in exclude:
continue
value = stem.util.str_tools._to_unicode(attr.pop(keyword, value))
if value is None:
continue
elif isinstance(value, (tuple, list)):
for v in value:
content.append('%s %s' % (keyword, v))
elif value == '':
content.append(keyword)
elif value.startswith('\n'):
# some values like crypto follow the line instead
content.append('%s%s' % (keyword, value))
else:
content.append('%s %s' % (keyword, value))
remainder = []
for k, v in attr.items():
if isinstance(v, (tuple, list)):
remainder += ['%s %s' % (k, entry) for entry in v]
else:
remainder.append('%s %s' % (k, v))
return stem.util.str_tools._to_bytes('\n'.join(header_content + remainder + footer_content))
def _value(line, entries):
return entries[line][0][0]
def _values(line, entries):
return [entry[0] for entry in entries[line]]
def _parse_simple_line(keyword, attribute, func = None):
def _parse(descriptor, entries):
value = _value(keyword, entries)
setattr(descriptor, attribute, func(value) if func else value)
return _parse
def _parse_if_present(keyword, attribute):
return lambda descriptor, entries: setattr(descriptor, attribute, keyword in entries)
def _parse_bytes_line(keyword, attribute):
def _parse(descriptor, entries):
line_match = re.search(stem.util.str_tools._to_bytes('^(opt )?%s(?:[%s]+(.*))?$' % (keyword, WHITESPACE)), descriptor.get_bytes(), re.MULTILINE)
result = None
if line_match:
value = line_match.groups()[1]
result = b'' if value is None else value
setattr(descriptor, attribute, result)
return _parse
def _parse_timestamp_line(keyword, attribute):
# "" YYYY-MM-DD HH:MM:SS
def _parse(descriptor, entries):
value = _value(keyword, entries)
try:
setattr(descriptor, attribute, stem.util.str_tools._parse_timestamp(value))
except ValueError:
raise ValueError("Timestamp on %s line wasn't parsable: %s %s" % (keyword, keyword, value))
return _parse
def _parse_forty_character_hex(keyword, attribute):
# format of fingerprints, sha1 digests, etc
def _parse(descriptor, entries):
value = _value(keyword, entries)
if not stem.util.tor_tools.is_hex_digits(value, 40):
raise ValueError('%s line had an invalid value (should be 40 hex characters): %s %s' % (keyword, keyword, value))
setattr(descriptor, attribute, value)
return _parse
def _parse_protocol_line(keyword, attribute):
def _parse(descriptor, entries):
# parses 'protocol' entries like: Cons=1-2 Desc=1-2 DirCache=1 HSDir=1
value = _value(keyword, entries)
protocols = OrderedDict()
for k, v in _mappings_for(keyword, value):
versions = []
if not v:
continue
for entry in v.split(','):
if '-' in entry:
min_value, max_value = entry.split('-', 1)
else:
min_value = max_value = entry
if not min_value.isdigit() or not max_value.isdigit():
raise ValueError('Protocol values should be a number or number range, but was: %s %s' % (keyword, value))
versions += range(int(min_value), int(max_value) + 1)
protocols[k] = versions
setattr(descriptor, attribute, protocols)
return _parse
def _parse_key_block(keyword, attribute, expected_block_type, value_attribute = None):
def _parse(descriptor, entries):
value, block_type, block_contents = entries[keyword][0]
if not block_contents or block_type != expected_block_type:
raise ValueError("'%s' should be followed by a %s block, but was a %s" % (keyword, expected_block_type, block_type))
setattr(descriptor, attribute, block_contents)
if value_attribute:
setattr(descriptor, value_attribute, value)
return _parse
def _mappings_for(keyword, value, require_value = False, divider = ' '):
"""
Parses an attribute as a series of 'key=value' mappings. Unlike _parse_*
functions this is a helper, returning the attribute value rather than setting
a descriptor field. This way parsers can perform additional validations.
:param str keyword: descriptor field being parsed
:param str value: 'attribute => values' mappings to parse
:param str divider: separator between the key/value mappings
:param bool require_value: validates that values are not empty
:returns: **generator** with the key/value of the map attribute
:raises: **ValueError** if descriptor content is invalid
"""
if value is None:
return # no descripoter value to process
elif value == '':
return # descriptor field was present, but blank
for entry in value.split(divider):
if '=' not in entry:
raise ValueError("'%s' should be a series of 'key=value' pairs but was: %s" % (keyword, value))
k, v = entry.split('=', 1)
if require_value and not v:
raise ValueError("'%s' line's %s mapping had a blank value: %s" % (keyword, k, value))
yield k, v
def _copy(default):
if default is None or isinstance(default, (bool, stem.exit_policy.ExitPolicy)):
return default # immutable
elif default in EMPTY_COLLECTION:
return type(default)() # collection construction tad faster than copy
else:
return copy.copy(default)
class Descriptor(object):
"""
Common parent for all types of descriptors.
"""
ATTRIBUTES = {} # mapping of 'attribute' => (default_value, parsing_function)
PARSER_FOR_LINE = {} # line keyword to its associated parsing function
def __init__(self, contents, lazy_load = False):
self._path = None
self._archive_path = None
self._raw_contents = contents
self._lazy_loading = lazy_load
self._entries = {}
self._unrecognized_lines = []
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
"""
Creates descriptor content with the given attributes. Mandatory fields are
filled with dummy information unless data is supplied. This doesn't yet
create a valid signature.
.. versionadded:: 1.6.0
:param dict attr: keyword/value mappings to be included in the descriptor
:param list exclude: mandatory keywords to exclude from the descriptor, this
results in an invalid descriptor
:param bool sign: includes cryptographic signatures and digests if True
:returns: **str** with the content of a descriptor
:raises:
* **ImportError** if cryptography is unavailable and sign is True
* **NotImplementedError** if not implemented for this descriptor type
"""
raise NotImplementedError("The create and content methods haven't been implemented for %s" % cls.__name__)
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False):
"""
Creates a descriptor with the given attributes. Mandatory fields are filled
with dummy information unless data is supplied. This doesn't yet create a
valid signature.
.. versionadded:: 1.6.0
:param dict attr: keyword/value mappings to be included in the descriptor
:param list exclude: mandatory keywords to exclude from the descriptor, this
results in an invalid descriptor
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param bool sign: includes cryptographic signatures and digests if True
:returns: :class:`~stem.descriptor.Descriptor` subclass
:raises:
* **ValueError** if the contents is malformed and validate is True
* **ImportError** if cryptography is unavailable and sign is True
* **NotImplementedError** if not implemented for this descriptor type
"""
return cls(cls.content(attr, exclude, sign), validate = validate)
def get_path(self):
"""
Provides the absolute path that we loaded this descriptor from.
:returns: **str** with the absolute path of the descriptor source
"""
return self._path
def get_archive_path(self):
"""
If this descriptor came from an archive then provides its path within the
archive. This is only set if the descriptor came from a
:class:`~stem.descriptor.reader.DescriptorReader`, and is **None** if this
descriptor didn't come from an archive.
:returns: **str** with the descriptor's path within the archive
"""
return self._archive_path
def get_bytes(self):
"""
Provides the ASCII **bytes** of the descriptor. This only differs from
**str()** if you're running python 3.x, in which case **str()** provides a
**unicode** string.
:returns: **bytes** for the descriptor's contents
"""
return self._raw_contents
def get_unrecognized_lines(self):
"""
Provides a list of lines that were either ignored or had data that we did
not know how to process. This is most common due to new descriptor fields
that this library does not yet know how to process. Patches welcome!
:returns: **list** of lines of unrecognized content
"""
if self._lazy_loading:
# we need to go ahead and parse the whole document to figure this out
self._parse(self._entries, False)
self._lazy_loading = False
return list(self._unrecognized_lines)
def _parse(self, entries, validate, parser_for_line = None):
"""
Parses a series of 'keyword => (value, pgp block)' mappings and applies
them as attributes.
:param dict entries: descriptor contents to be applied
:param bool validate: checks the validity of descriptor content if True
:param dict parsers: mapping of lines to the function for parsing it
:raises: **ValueError** if an error occurs in validation
"""
if parser_for_line is None:
parser_for_line = self.PARSER_FOR_LINE
for keyword, values in list(entries.items()):
try:
if keyword in parser_for_line:
parser_for_line[keyword](self, entries)
else:
for value, block_type, block_contents in values:
line = '%s %s' % (keyword, value)
if block_contents:
line += '\n%s' % block_contents
self._unrecognized_lines.append(line)
except ValueError:
if validate:
raise
def _set_path(self, path):
self._path = path
def _set_archive_path(self, path):
self._archive_path = path
def _name(self, is_plural = False):
return str(type(self))
def _digest_for_signature(self, signing_key, signature):
"""
Provides the signed digest we should have given this key and signature.
:param str signing_key: key block used to make this signature
:param str signature: signed digest for this descriptor content
:returns: the digest string encoded in uppercase hex
:raises: ValueError if unable to provide a validly signed digest
"""
if not stem.prereq.is_crypto_available():
raise ValueError('Generating the signed digest requires the cryptography module')
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import load_der_public_key
from cryptography.utils import int_to_bytes, int_from_bytes
key = load_der_public_key(_bytes_for_block(signing_key), default_backend())
modulus = key.public_numbers().n
public_exponent = key.public_numbers().e
sig_as_bytes = _bytes_for_block(signature)
sig_as_long = int_from_bytes(sig_as_bytes, byteorder='big') # convert signature to an int
blocksize = len(sig_as_bytes) # 256B for NetworkStatusDocuments, 128B for others
# use the public exponent[e] & the modulus[n] to decrypt the int
decrypted_int = pow(sig_as_long, public_exponent, modulus)
# convert the int to a byte array
decrypted_bytes = int_to_bytes(decrypted_int, blocksize)
############################################################################
# The decrypted bytes should have a structure exactly along these lines.
# 1 byte - [null '\x00']
# 1 byte - [block type identifier '\x01'] - Should always be 1
# N bytes - [padding '\xFF' ]
# 1 byte - [separator '\x00' ]
# M bytes - [message]
# Total - 128 bytes
# More info here http://www.ietf.org/rfc/rfc2313.txt
# esp the Notes in section 8.1
############################################################################
try:
if decrypted_bytes.index(DIGEST_TYPE_INFO) != 0:
raise ValueError('Verification failed, identifier missing')
except ValueError:
raise ValueError('Verification failed, malformed data')
try:
identifier_offset = 2
# find the separator
seperator_index = decrypted_bytes.index(DIGEST_SEPARATOR, identifier_offset)
except ValueError:
raise ValueError('Verification failed, seperator not found')
digest_hex = codecs.encode(decrypted_bytes[seperator_index + 1:], 'hex_codec')
return stem.util.str_tools._to_unicode(digest_hex.upper())
def _digest_for_content(self, start, end):
"""
Provides the digest of our descriptor's content in a given range.
:param bytes start: start of the range to generate a digest for
:param bytes end: end of the range to generate a digest for
:returns: the digest string encoded in uppercase hex
:raises: ValueError if the digest canot be calculated
"""
raw_descriptor = self.get_bytes()
start_index = raw_descriptor.find(start)
end_index = raw_descriptor.find(end, start_index)
if start_index == -1:
raise ValueError("Digest is for the range starting with '%s' but that isn't in our descriptor" % start)
elif end_index == -1:
raise ValueError("Digest is for the range ending with '%s' but that isn't in our descriptor" % end)
digest_content = raw_descriptor[start_index:end_index + len(end)]
digest_hash = hashlib.sha1(stem.util.str_tools._to_bytes(digest_content))
return stem.util.str_tools._to_unicode(digest_hash.hexdigest().upper())
def __getattr__(self, name):
# We can't use standard hasattr() since it calls this function, recursing.
# Doing so works since it stops recursing after several dozen iterations
# (not sure why), but horrible in terms of performance.
def has_attr(attr):
try:
super(Descriptor, self).__getattribute__(attr)
return True
except:
return False
# If an attribute we should have isn't present it means either...
#
# a. we still need to lazy load this
# b. we read the whole descriptor but it wasn't present, so needs the default
if name in self.ATTRIBUTES and not has_attr(name):
default, parsing_function = self.ATTRIBUTES[name]
if self._lazy_loading:
try:
parsing_function(self, self._entries)
except (ValueError, KeyError):
# Set defaults for anything the parsing function should've covered.
# Despite having a validation failure some attributes might be set in
# which case we keep them.
for attr_name, (attr_default, attr_parser) in self.ATTRIBUTES.items():
if parsing_function == attr_parser and not has_attr(attr_name):
setattr(self, attr_name, _copy(attr_default))
else:
setattr(self, name, _copy(default))
return super(Descriptor, self).__getattribute__(name)
def __str__(self):
if stem.prereq.is_python_3():
return stem.util.str_tools._to_unicode(self._raw_contents)
else:
return self._raw_contents
class NewlineNormalizer(object):
"""
File wrapper that normalizes CRLF line endings.
"""
def __init__(self, wrapped_file):
self._wrapped_file = wrapped_file
self.name = getattr(wrapped_file, 'name', None)
def read(self, *args):
return self._wrapped_file.read(*args).replace(b'\r\n', b'\n')
def readline(self, *args):
return self._wrapped_file.readline(*args).replace(b'\r\n', b'\n')
def readlines(self, *args):
return [line.rstrip(b'\r') for line in self._wrapped_file.readlines(*args)]
def seek(self, *args):
return self._wrapped_file.seek(*args)
def tell(self, *args):
return self._wrapped_file.tell(*args)
def _read_until_keywords(keywords, descriptor_file, inclusive = False, ignore_first = False, skip = False, end_position = None, include_ending_keyword = False):
"""
Reads from the descriptor file until we get to one of the given keywords or reach the
end of the file.
:param str,list keywords: keyword(s) we want to read until
:param file descriptor_file: file with the descriptor content
:param bool inclusive: includes the line with the keyword if True
:param bool ignore_first: doesn't check if the first line read has one of the
given keywords
:param bool skip: skips buffering content, returning None
:param int end_position: end if we reach this point in the file
:param bool include_ending_keyword: provides the keyword we broke on if **True**
:returns: **list** with the lines until we find one of the keywords, this is
a two value tuple with the ending keyword if include_ending_keyword is
**True**
"""
content = None if skip else []
ending_keyword = None
if stem.util._is_str(keywords):
keywords = (keywords,)
if ignore_first:
first_line = descriptor_file.readline()
if first_line and content is not None:
content.append(first_line)
keyword_match = re.compile(SPECIFIC_KEYWORD_LINE % '|'.join(keywords))
while True:
last_position = descriptor_file.tell()
if end_position and last_position >= end_position:
break
line = descriptor_file.readline()
if not line:
break # EOF
line_match = keyword_match.match(stem.util.str_tools._to_unicode(line))
if line_match:
ending_keyword = line_match.groups()[0]
if not inclusive:
descriptor_file.seek(last_position)
elif content is not None:
content.append(line)
break
elif content is not None:
content.append(line)
if include_ending_keyword:
return (content, ending_keyword)
else:
return content
def _bytes_for_block(content):
"""
Provides the base64 decoded content of a pgp-style block.
:param str content: block to be decoded
:returns: decoded block content
:raises: **TypeError** if this isn't base64 encoded content
"""
# strip the '-----BEGIN RSA PUBLIC KEY-----' header and footer
content = ''.join(content.split('\n')[1:-1])
return base64.b64decode(stem.util.str_tools._to_bytes(content))
def _get_pseudo_pgp_block(remaining_contents):
"""
Checks if given contents begins with a pseudo-Open-PGP-style block and, if
so, pops it off and provides it back to the caller.
:param list remaining_contents: lines to be checked for a public key block
:returns: **tuple** of the (block_type, content) or None if it doesn't exist
:raises: **ValueError** if the contents starts with a key block but it's
malformed (for instance, if it lacks an ending line)
"""
if not remaining_contents:
return None # nothing left
block_match = PGP_BLOCK_START.match(remaining_contents[0])
if block_match:
block_type = block_match.groups()[0]
block_lines = []
end_line = PGP_BLOCK_END % block_type
while True:
if not remaining_contents:
raise ValueError("Unterminated pgp style block (looking for '%s'):\n%s" % (end_line, '\n'.join(block_lines)))
line = remaining_contents.pop(0)
block_lines.append(line)
if line == end_line:
return (block_type, '\n'.join(block_lines))
else:
return None
def create_signing_key(private_key = None):
"""
Serializes a signing key if we have one. Otherwise this creates a new signing
key we can use to create descriptors.
.. versionadded:: 1.6.0
:param cryptography.hazmat.backends.openssl.rsa._RSAPrivateKey private_key: private key
:returns: :class:`~stem.descriptor.__init__.SigningKey` that can be used to
create descriptors
:raises: **ImportError** if the cryptography module is unavailable
"""
if not stem.prereq.is_crypto_available():
raise ImportError('Signing requires the cryptography module')
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
if private_key is None:
private_key = rsa.generate_private_key(
public_exponent = 65537,
key_size = 1024,
backend = default_backend(),
)
# When signing the cryptography module includes a constant indicating
# the hash algorithm used. Tor doesn't. This causes signature
# validation failures and unfortunately cryptography have no nice way
# of excluding these so we need to mock out part of their internals...
#
# https://github.com/pyca/cryptography/issues/3713
def no_op(*args, **kwargs):
return 1
private_key._backend._lib.EVP_PKEY_CTX_set_signature_md = no_op
private_key._backend.openssl_assert = no_op
public_key = private_key.public_key()
public_digest = b'\n' + public_key.public_bytes(
encoding = serialization.Encoding.PEM,
format = serialization.PublicFormat.PKCS1,
).strip()
return SigningKey(private_key, public_key, public_digest)
def _append_router_signature(content, private_key):
"""
Appends a router signature to a server or extrainfo descriptor.
:param bytes content: descriptor content up through 'router-signature\\n'
:param cryptography.hazmat.backends.openssl.rsa._RSAPrivateKey private_key:
private relay signing key
:returns: **bytes** with the signed descriptor content
"""
if not stem.prereq.is_crypto_available():
raise ImportError('Signing requires the cryptography module')
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
signature = base64.b64encode(private_key.sign(content, padding.PKCS1v15(), hashes.SHA1()))
return content + b'\n'.join([b'-----BEGIN SIGNATURE-----'] + stem.util.str_tools._split_by_length(signature, 64) + [b'-----END SIGNATURE-----\n'])
def _random_nickname():
return ('Unnamed%i' % random.randint(0, 100000000000000))[:19]
def _random_fingerprint():
return ('%040x' % random.randrange(16 ** 40)).upper()
def _random_ipv4_address():
return '%i.%i.%i.%i' % (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
def _random_date():
return '%i-%02i-%02i %02i:%02i:%02i' % (random.randint(2000, 2015), random.randint(1, 12), random.randint(1, 20), random.randint(0, 23), random.randint(0, 59), random.randint(0, 59))
def _random_crypto_blob(block_type = None):
"""
Provides a random string that can be used for crypto blocks.
"""
random_base64 = stem.util.str_tools._to_unicode(base64.b64encode(os.urandom(140)))
crypto_blob = '\n'.join(stem.util.str_tools._split_by_length(random_base64, 64))
if block_type:
return '\n-----BEGIN %s-----\n%s\n-----END %s-----' % (block_type, crypto_blob, block_type)
else:
return crypto_blob
def _descriptor_components(raw_contents, validate, extra_keywords = (), non_ascii_fields = ()):
"""
Initial breakup of the server descriptor contents to make parsing easier.
A descriptor contains a series of 'keyword lines' which are simply a keyword
followed by an optional value. Lines can also be followed by a signature
block.
To get a sub-listing with just certain keywords use extra_keywords. This can
be useful if we care about their relative ordering with respect to each
other. For instance, we care about the ordering of 'accept' and 'reject'
entries because this influences the resulting exit policy, but for everything
else in server descriptors the order does not matter.
:param str raw_contents: descriptor content provided by the relay
:param bool validate: checks the validity of the descriptor's content if
True, skips these checks otherwise
:param list extra_keywords: entity keywords to put into a separate listing
with ordering intact
:param list non_ascii_fields: fields containing non-ascii content
:returns:
**collections.OrderedDict** with the 'keyword => (value, pgp key) entries'
mappings. If a extra_keywords was provided then this instead provides a two
value tuple, the second being a list of those entries.
"""
if isinstance(raw_contents, bytes):
raw_contents = stem.util.str_tools._to_unicode(raw_contents)
entries = OrderedDict()
extra_entries = [] # entries with a keyword in extra_keywords
remaining_lines = raw_contents.split('\n')
while remaining_lines:
line = remaining_lines.pop(0)
# V2 network status documents explicitly can contain blank lines...
#
# "Implementations MAY insert blank lines for clarity between sections;
# these blank lines are ignored."
#
# ... and server descriptors end with an extra newline. But other documents
# don't say how blank lines should be handled so globally ignoring them.
if not line:
continue
# Some lines have an 'opt ' for backward compatibility. They should be
# ignored. This prefix is being removed in...
# https://trac.torproject.org/projects/tor/ticket/5124
if line.startswith('opt '):
line = line[4:]
line_match = KEYWORD_LINE.match(line)
if not line_match:
if not validate:
continue
raise ValueError('Line contains invalid characters: %s' % line)
keyword, value = line_match.groups()
if value is None:
value = ''
try:
block_attr = _get_pseudo_pgp_block(remaining_lines)
if block_attr:
block_type, block_contents = block_attr
else:
block_type, block_contents = None, None
except ValueError:
if not validate:
continue
raise
if validate and keyword not in non_ascii_fields:
try:
value.encode('ascii')
except UnicodeError:
replaced = ''.join([(char if char in string.printable else '?') for char in value])
raise ValueError("'%s' line had non-ascii content: %s" % (keyword, replaced))
if keyword in extra_keywords:
extra_entries.append('%s %s' % (keyword, value))
else:
entries.setdefault(keyword, []).append((value, block_type, block_contents))
if extra_keywords:
return entries, extra_entries
else:
return entries
# importing at the end to avoid circular dependencies on our Descriptor class
import stem.descriptor.server_descriptor
import stem.descriptor.extrainfo_descriptor
import stem.descriptor.networkstatus
import stem.descriptor.microdescriptor
import stem.descriptor.tordnsel
import stem.descriptor.hidden_service_descriptor
stem-1.7.1/stem/descriptor/server_descriptor.py 0000664 0001750 0001750 00000125122 13411002341 022446 0 ustar atagar atagar 0000000 0000000 # Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for Tor server descriptors, which contains the infrequently changing
information about a Tor relay (contact information, exit policy, public keys,
etc). This information is provided from a few sources...
* The control port via 'GETINFO desc/\*' queries.
* The 'cached-descriptors' file in Tor's data directory.
* Archived descriptors provided by `CollecTor `_.
* Directory authorities and mirrors via their DirPort.
**Module Overview:**
::
ServerDescriptor - Tor server descriptor.
|- RelayDescriptor - Server descriptor for a relay.
| +- make_router_status_entry - Creates a router status entry for this descriptor.
|
|- BridgeDescriptor - Scrubbed server descriptor for a bridge.
| |- is_scrubbed - checks if our content has been properly scrubbed
| +- get_scrubbing_issues - description of issues with our scrubbing
|
|- digest - calculates the upper-case hex digest value for our content
|- get_annotations - dictionary of content prior to the descriptor entry
+- get_annotation_lines - lines that provided the annotations
.. data:: BridgeDistribution (enum)
Preferred method of distributing this relay if a bridge.
.. versionadded:: 1.6.0
===================== ===========
BridgeDistribution Description
===================== ===========
**ANY** No proference, BridgeDB will pick how the bridge is distributed.
**HTTPS** Provided via the `web interface `_.
**EMAIL** Provided in response to emails to bridges@torproject.org.
**MOAT** Provided in interactive menus within Tor Browser.
**HYPHAE** Provided via a cryptographic invitation-based system.
===================== ===========
"""
import base64
import binascii
import functools
import hashlib
import re
import stem.descriptor.certificate
import stem.descriptor.extrainfo_descriptor
import stem.exit_policy
import stem.prereq
import stem.util.connection
import stem.util.enum
import stem.util.str_tools
import stem.util.tor_tools
import stem.version
from stem.descriptor.router_status_entry import RouterStatusEntryV3
from stem.descriptor import (
PGP_BLOCK_END,
Descriptor,
create_signing_key,
_descriptor_content,
_descriptor_components,
_read_until_keywords,
_bytes_for_block,
_value,
_values,
_parse_simple_line,
_parse_if_present,
_parse_bytes_line,
_parse_timestamp_line,
_parse_forty_character_hex,
_parse_protocol_line,
_parse_key_block,
_append_router_signature,
_random_nickname,
_random_ipv4_address,
_random_date,
_random_crypto_blob,
)
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
# relay descriptors must have exactly one of the following
REQUIRED_FIELDS = (
'router',
'bandwidth',
'published',
'onion-key',
'signing-key',
'router-signature',
)
# optional entries that can appear at most once
SINGLE_FIELDS = (
'identity-ed25519',
'master-key-ed25519',
'platform',
'fingerprint',
'hibernating',
'uptime',
'contact',
'read-history',
'write-history',
'eventdns',
'bridge-distribution-request',
'family',
'caches-extra-info',
'extra-info-digest',
'hidden-service-dir',
'protocols',
'allow-single-hop-exits',
'tunnelled-dir-server',
'proto',
'onion-key-crosscert',
'ntor-onion-key',
'ntor-onion-key-crosscert',
'router-sig-ed25519',
)
BridgeDistribution = stem.util.enum.Enum(
('ANY', 'any'),
('HTTPS', 'https'),
('EMAIL', 'email'),
('MOAT', 'moat'),
('HYPHAE', 'hyphae'),
)
DEFAULT_IPV6_EXIT_POLICY = stem.exit_policy.MicroExitPolicy('reject 1-65535')
REJECT_ALL_POLICY = stem.exit_policy.ExitPolicy('reject *:*')
DEFAULT_BRIDGE_DISTRIBUTION = 'any'
def _truncated_b64encode(content):
return stem.util.str_tools._to_unicode(base64.b64encode(content).rstrip(b'='))
def _parse_file(descriptor_file, is_bridge = False, validate = False, **kwargs):
"""
Iterates over the server descriptors in a file.
:param file descriptor_file: file with descriptor content
:param bool is_bridge: parses the file as being a bridge descriptor
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param dict kwargs: additional arguments for the descriptor constructor
:returns: iterator for ServerDescriptor instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is True
* **IOError** if the file can't be read
"""
# Handler for relay descriptors
#
# Cached descriptors consist of annotations followed by the descriptor
# itself. For instance...
#
# @downloaded-at 2012-03-14 16:31:05
# @source "145.53.65.130"
# router caerSidi 71.35.143.157 9001 0 0
# platform Tor 0.2.1.30 on Linux x86_64
#
# router-signature
# -----BEGIN SIGNATURE-----
#
# -----END SIGNATURE-----
#
# Metrics descriptor files are the same, but lack any annotations. The
# following simply does the following...
#
# - parse as annotations until we get to 'router'
# - parse as descriptor content until we get to 'router-signature' followed
# by the end of the signature block
# - construct a descriptor and provide it back to the caller
#
# Any annotations after the last server descriptor is ignored (never provided
# to the caller).
while True:
annotations = _read_until_keywords('router', descriptor_file)
if not is_bridge:
descriptor_content = _read_until_keywords('router-signature', descriptor_file)
# we've reached the 'router-signature', now include the pgp style block
block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0]
descriptor_content += _read_until_keywords(block_end_prefix, descriptor_file, True)
else:
descriptor_content = _read_until_keywords('router-digest', descriptor_file, True)
if descriptor_content:
if descriptor_content[0].startswith(b'@type'):
descriptor_content = descriptor_content[1:]
# strip newlines from annotations
annotations = list(map(bytes.strip, annotations))
descriptor_text = bytes.join(b'', descriptor_content)
if is_bridge:
yield BridgeDescriptor(descriptor_text, validate, annotations, **kwargs)
else:
yield RelayDescriptor(descriptor_text, validate, annotations, **kwargs)
else:
if validate and annotations:
orphaned_annotations = stem.util.str_tools._to_unicode(b'\n'.join(annotations))
raise ValueError('Content conform to being a server descriptor:\n%s' % orphaned_annotations)
break # done parsing descriptors
def _parse_router_line(descriptor, entries):
# "router" nickname address ORPort SocksPort DirPort
value = _value('router', entries)
router_comp = value.split()
if len(router_comp) < 5:
raise ValueError('Router line must have five values: router %s' % value)
elif not stem.util.tor_tools.is_valid_nickname(router_comp[0]):
raise ValueError("Router line entry isn't a valid nickname: %s" % router_comp[0])
elif not stem.util.connection.is_valid_ipv4_address(router_comp[1]):
raise ValueError("Router line entry isn't a valid IPv4 address: %s" % router_comp[1])
elif not stem.util.connection.is_valid_port(router_comp[2], allow_zero = True):
raise ValueError("Router line's ORPort is invalid: %s" % router_comp[2])
elif not stem.util.connection.is_valid_port(router_comp[3], allow_zero = True):
raise ValueError("Router line's SocksPort is invalid: %s" % router_comp[3])
elif not stem.util.connection.is_valid_port(router_comp[4], allow_zero = True):
raise ValueError("Router line's DirPort is invalid: %s" % router_comp[4])
descriptor.nickname = router_comp[0]
descriptor.address = router_comp[1]
descriptor.or_port = int(router_comp[2])
descriptor.socks_port = None if router_comp[3] == '0' else int(router_comp[3])
descriptor.dir_port = None if router_comp[4] == '0' else int(router_comp[4])
def _parse_bandwidth_line(descriptor, entries):
# "bandwidth" bandwidth-avg bandwidth-burst bandwidth-observed
value = _value('bandwidth', entries)
bandwidth_comp = value.split()
if len(bandwidth_comp) < 3:
raise ValueError('Bandwidth line must have three values: bandwidth %s' % value)
elif not bandwidth_comp[0].isdigit():
raise ValueError("Bandwidth line's average rate isn't numeric: %s" % bandwidth_comp[0])
elif not bandwidth_comp[1].isdigit():
raise ValueError("Bandwidth line's burst rate isn't numeric: %s" % bandwidth_comp[1])
elif not bandwidth_comp[2].isdigit():
raise ValueError("Bandwidth line's observed rate isn't numeric: %s" % bandwidth_comp[2])
descriptor.average_bandwidth = int(bandwidth_comp[0])
descriptor.burst_bandwidth = int(bandwidth_comp[1])
descriptor.observed_bandwidth = int(bandwidth_comp[2])
def _parse_platform_line(descriptor, entries):
# "platform" string
_parse_bytes_line('platform', 'platform')(descriptor, entries)
# The platform attribute was set earlier. This line can contain any
# arbitrary data, but tor seems to report its version followed by the
# os like the following...
#
# platform Tor 0.2.2.35 (git-73ff13ab3cc9570d) on Linux x86_64
#
# There's no guarantee that we'll be able to pick these out the
# version, but might as well try to save our caller the effort.
value = _value('platform', entries)
platform_match = re.match('^(?:node-)?Tor (\S*).* on (.*)$', value)
if platform_match:
version_str, descriptor.operating_system = platform_match.groups()
try:
descriptor.tor_version = stem.version._get_version(version_str)
except ValueError:
pass
def _parse_fingerprint_line(descriptor, entries):
# This is forty hex digits split into space separated groups of four.
# Checking that we match this pattern.
value = _value('fingerprint', entries)
fingerprint = value.replace(' ', '')
for grouping in value.split(' '):
if len(grouping) != 4:
raise ValueError('Fingerprint line should have groupings of four hex digits: %s' % value)
if not stem.util.tor_tools.is_valid_fingerprint(fingerprint):
raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % value)
descriptor.fingerprint = fingerprint
def _parse_extrainfo_digest_line(descriptor, entries):
value = _value('extra-info-digest', entries)
digest_comp = value.split(' ')
if not stem.util.tor_tools.is_hex_digits(digest_comp[0], 40):
raise ValueError('extra-info-digest should be 40 hex characters: %s' % digest_comp[0])
descriptor.extra_info_digest = digest_comp[0]
descriptor.extra_info_sha256_digest = digest_comp[1] if len(digest_comp) >= 2 else None
def _parse_hibernating_line(descriptor, entries):
# "hibernating" 0|1 (in practice only set if one)
value = _value('hibernating', entries)
if value not in ('0', '1'):
raise ValueError('Hibernating line had an invalid value, must be zero or one: %s' % value)
descriptor.hibernating = value == '1'
def _parse_uptime_line(descriptor, entries):
# We need to be tolerant of negative uptimes to accommodate a past tor
# bug...
#
# Changes in version 0.1.2.7-alpha - 2007-02-06
# - If our system clock jumps back in time, don't publish a negative
# uptime in the descriptor. Also, don't let the global rate limiting
# buckets go absurdly negative.
#
# After parsing all of the attributes we'll double check that negative
# uptimes only occurred prior to this fix.
value = _value('uptime', entries)
try:
descriptor.uptime = int(value)
except ValueError:
raise ValueError('Uptime line must have an integer value: %s' % value)
def _parse_protocols_line(descriptor, entries):
value = _value('protocols', entries)
protocols_match = re.match('^Link (.*) Circuit (.*)$', value)
if not protocols_match:
raise ValueError('Protocols line did not match the expected pattern: protocols %s' % value)
link_versions, circuit_versions = protocols_match.groups()
descriptor.link_protocols = link_versions.split(' ')
descriptor.circuit_protocols = circuit_versions.split(' ')
def _parse_or_address_line(descriptor, entries):
all_values = _values('or-address', entries)
or_addresses = []
for entry in all_values:
line = 'or-address %s' % entry
if ':' not in entry:
raise ValueError('or-address line missing a colon: %s' % line)
address, port = entry.rsplit(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address) and not stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True):
raise ValueError('or-address line has a malformed address: %s' % line)
if not stem.util.connection.is_valid_port(port):
raise ValueError('or-address line has a malformed port: %s' % line)
or_addresses.append((address.lstrip('[').rstrip(']'), int(port), stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True)))
descriptor.or_addresses = or_addresses
def _parse_history_line(keyword, history_end_attribute, history_interval_attribute, history_values_attribute, descriptor, entries):
value = _value(keyword, entries)
timestamp, interval, remainder = stem.descriptor.extrainfo_descriptor._parse_timestamp_and_interval(keyword, value)
try:
if remainder:
history_values = [int(entry) for entry in remainder.split(',')]
else:
history_values = []
except ValueError:
raise ValueError('%s line has non-numeric values: %s %s' % (keyword, keyword, value))
setattr(descriptor, history_end_attribute, timestamp)
setattr(descriptor, history_interval_attribute, interval)
setattr(descriptor, history_values_attribute, history_values)
def _parse_exit_policy(descriptor, entries):
if hasattr(descriptor, '_unparsed_exit_policy'):
if descriptor._unparsed_exit_policy and stem.util.str_tools._to_unicode(descriptor._unparsed_exit_policy[0]) == 'reject *:*':
descriptor.exit_policy = REJECT_ALL_POLICY
else:
descriptor.exit_policy = stem.exit_policy.ExitPolicy(*descriptor._unparsed_exit_policy)
del descriptor._unparsed_exit_policy
def _parse_identity_ed25519_line(descriptor, entries):
_parse_key_block('identity-ed25519', 'ed25519_certificate', 'ED25519 CERT')(descriptor, entries)
if descriptor.ed25519_certificate:
cert_lines = descriptor.ed25519_certificate.split('\n')
if cert_lines[0] == '-----BEGIN ED25519 CERT-----' and cert_lines[-1] == '-----END ED25519 CERT-----':
descriptor.certificate = stem.descriptor.certificate.Ed25519Certificate.parse(''.join(cert_lines[1:-1]))
_parse_master_key_ed25519_line = _parse_simple_line('master-key-ed25519', 'ed25519_master_key')
_parse_master_key_ed25519_for_hash_line = _parse_simple_line('master-key-ed25519', 'ed25519_certificate_hash')
_parse_contact_line = _parse_bytes_line('contact', 'contact')
_parse_published_line = _parse_timestamp_line('published', 'published')
_parse_read_history_line = functools.partial(_parse_history_line, 'read-history', 'read_history_end', 'read_history_interval', 'read_history_values')
_parse_write_history_line = functools.partial(_parse_history_line, 'write-history', 'write_history_end', 'write_history_interval', 'write_history_values')
_parse_ipv6_policy_line = _parse_simple_line('ipv6-policy', 'exit_policy_v6', func = lambda v: stem.exit_policy.MicroExitPolicy(v))
_parse_allow_single_hop_exits_line = _parse_if_present('allow-single-hop-exits', 'allow_single_hop_exits')
_parse_tunneled_dir_server_line = _parse_if_present('tunnelled-dir-server', 'allow_tunneled_dir_requests')
_parse_proto_line = _parse_protocol_line('proto', 'protocols')
_parse_hidden_service_dir_line = _parse_if_present('hidden-service-dir', 'is_hidden_service_dir')
_parse_caches_extra_info_line = _parse_if_present('caches-extra-info', 'extra_info_cache')
_parse_bridge_distribution_request_line = _parse_simple_line('bridge-distribution-request', 'bridge_distribution')
_parse_family_line = _parse_simple_line('family', 'family', func = lambda v: set(v.split(' ')))
_parse_eventdns_line = _parse_simple_line('eventdns', 'eventdns', func = lambda v: v == '1')
_parse_onion_key_line = _parse_key_block('onion-key', 'onion_key', 'RSA PUBLIC KEY')
_parse_onion_key_crosscert_line = _parse_key_block('onion-key-crosscert', 'onion_key_crosscert', 'CROSSCERT')
_parse_signing_key_line = _parse_key_block('signing-key', 'signing_key', 'RSA PUBLIC KEY')
_parse_router_signature_line = _parse_key_block('router-signature', 'signature', 'SIGNATURE')
_parse_ntor_onion_key_line = _parse_simple_line('ntor-onion-key', 'ntor_onion_key')
_parse_ntor_onion_key_crosscert_line = _parse_key_block('ntor-onion-key-crosscert', 'ntor_onion_key_crosscert', 'ED25519 CERT', 'ntor_onion_key_crosscert_sign')
_parse_router_sig_ed25519_line = _parse_simple_line('router-sig-ed25519', 'ed25519_signature')
_parse_router_digest_sha256_line = _parse_simple_line('router-digest-sha256', 'router_digest_sha256')
_parse_router_digest_line = _parse_forty_character_hex('router-digest', '_digest')
class ServerDescriptor(Descriptor):
"""
Common parent for server descriptors.
:var str nickname: **\*** relay's nickname
:var str fingerprint: identity key fingerprint
:var datetime published: **\*** time in UTC when this descriptor was made
:var str address: **\*** IPv4 address of the relay
:var int or_port: **\*** port used for relaying
:var int socks_port: **\*** port used as client (**deprecated**, always **None**)
:var int dir_port: **\*** port used for descriptor mirroring
:var bytes platform: line with operating system and tor version
:var stem.version.Version tor_version: version of tor
:var str operating_system: operating system
:var int uptime: uptime when published in seconds
:var bytes contact: contact information
:var stem.exit_policy.ExitPolicy exit_policy: **\*** stated exit policy
:var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\*** exit policy for IPv6
:var BridgeDistribution bridge_distribution: **\*** preferred method of providing this relay's
address if a bridge
:var set family: **\*** nicknames or fingerprints of declared family
:var int average_bandwidth: **\*** average rate it's willing to relay in bytes/s
:var int burst_bandwidth: **\*** burst rate it's willing to relay in bytes/s
:var int observed_bandwidth: **\*** estimated capacity based on usage in bytes/s
:var list link_protocols: link protocols supported by the relay
:var list circuit_protocols: circuit protocols supported by the relay
:var bool is_hidden_service_dir: **\*** indicates if the relay serves hidden
service descriptors
:var bool hibernating: **\*** hibernating when published
:var bool allow_single_hop_exits: **\*** flag if single hop exiting is allowed
:var bool allow_tunneled_dir_requests: **\*** flag if tunneled directory
requests are accepted
:var bool extra_info_cache: **\*** flag if a mirror for extra-info documents
:var str extra_info_digest: upper-case hex encoded digest of our extra-info document
:var str extra_info_sha256_digest: base64 encoded sha256 digest of our extra-info document
:var bool eventdns: flag for evdns backend (**deprecated**, always unset)
:var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol
:var list or_addresses: **\*** alternative for our address/or_port
attributes, each entry is a tuple of the form (address (**str**), port
(**int**), is_ipv6 (**bool**))
:var dict protocols: mapping of protocols to their supported versions
**Deprecated**, moved to extra-info descriptor...
:var datetime read_history_end: end of the sampling interval
:var int read_history_interval: seconds per interval
:var list read_history_values: bytes read during each interval
:var datetime write_history_end: end of the sampling interval
:var int write_history_interval: seconds per interval
:var list write_history_values: bytes written during each interval
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
.. versionchanged:: 1.5.0
Added the allow_tunneled_dir_requests attribute.
.. versionchanged:: 1.6.0
Added the extra_info_sha256_digest, protocols, and bridge_distribution
attributes.
.. versionchanged:: 1.7.0
Added the is_hidden_service_dir attribute.
.. versionchanged:: 1.7.0
Deprecated the hidden_service_dir field, it's never been populated
(:spec:`43c2f78`). This field will be removed in Stem 2.0.
"""
ATTRIBUTES = {
'nickname': (None, _parse_router_line),
'fingerprint': (None, _parse_fingerprint_line),
'contact': (None, _parse_contact_line),
'published': (None, _parse_published_line),
'exit_policy': (None, _parse_exit_policy),
'address': (None, _parse_router_line),
'or_port': (None, _parse_router_line),
'socks_port': (None, _parse_router_line),
'dir_port': (None, _parse_router_line),
'platform': (None, _parse_platform_line),
'tor_version': (None, _parse_platform_line),
'operating_system': (None, _parse_platform_line),
'uptime': (None, _parse_uptime_line),
'exit_policy_v6': (DEFAULT_IPV6_EXIT_POLICY, _parse_ipv6_policy_line),
'bridge_distribution': (DEFAULT_BRIDGE_DISTRIBUTION, _parse_bridge_distribution_request_line),
'family': (set(), _parse_family_line),
'average_bandwidth': (None, _parse_bandwidth_line),
'burst_bandwidth': (None, _parse_bandwidth_line),
'observed_bandwidth': (None, _parse_bandwidth_line),
'link_protocols': (None, _parse_protocols_line),
'circuit_protocols': (None, _parse_protocols_line),
'is_hidden_service_dir': (False, _parse_hidden_service_dir_line),
'hibernating': (False, _parse_hibernating_line),
'allow_single_hop_exits': (False, _parse_allow_single_hop_exits_line),
'allow_tunneled_dir_requests': (False, _parse_tunneled_dir_server_line),
'protocols': ({}, _parse_proto_line),
'extra_info_cache': (False, _parse_caches_extra_info_line),
'extra_info_digest': (None, _parse_extrainfo_digest_line),
'extra_info_sha256_digest': (None, _parse_extrainfo_digest_line),
'eventdns': (None, _parse_eventdns_line),
'ntor_onion_key': (None, _parse_ntor_onion_key_line),
'or_addresses': ([], _parse_or_address_line),
'read_history_end': (None, _parse_read_history_line),
'read_history_interval': (None, _parse_read_history_line),
'read_history_values': (None, _parse_read_history_line),
'write_history_end': (None, _parse_write_history_line),
'write_history_interval': (None, _parse_write_history_line),
'write_history_values': (None, _parse_write_history_line),
}
PARSER_FOR_LINE = {
'router': _parse_router_line,
'bandwidth': _parse_bandwidth_line,
'platform': _parse_platform_line,
'published': _parse_published_line,
'fingerprint': _parse_fingerprint_line,
'contact': _parse_contact_line,
'hibernating': _parse_hibernating_line,
'extra-info-digest': _parse_extrainfo_digest_line,
'hidden-service-dir': _parse_hidden_service_dir_line,
'uptime': _parse_uptime_line,
'protocols': _parse_protocols_line,
'ntor-onion-key': _parse_ntor_onion_key_line,
'or-address': _parse_or_address_line,
'read-history': _parse_read_history_line,
'write-history': _parse_write_history_line,
'ipv6-policy': _parse_ipv6_policy_line,
'allow-single-hop-exits': _parse_allow_single_hop_exits_line,
'tunnelled-dir-server': _parse_tunneled_dir_server_line,
'proto': _parse_proto_line,
'caches-extra-info': _parse_caches_extra_info_line,
'bridge-distribution-request': _parse_bridge_distribution_request_line,
'family': _parse_family_line,
'eventdns': _parse_eventdns_line,
}
def __init__(self, raw_contents, validate = False, annotations = None):
"""
Server descriptor constructor, created from an individual relay's
descriptor content (as provided by 'GETINFO desc/*', cached descriptors,
and metrics).
By default this validates the descriptor's content as it's parsed. This
validation can be disables to either improve performance or be accepting of
malformed data.
:param str raw_contents: descriptor content provided by the relay
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param list annotations: lines that appeared prior to the descriptor
:raises: **ValueError** if the contents is malformed and validate is True
"""
super(ServerDescriptor, self).__init__(raw_contents, lazy_load = not validate)
self._annotation_lines = annotations if annotations else []
# A descriptor contains a series of 'keyword lines' which are simply a
# keyword followed by an optional value. Lines can also be followed by a
# signature block.
#
# We care about the ordering of 'accept' and 'reject' entries because this
# influences the resulting exit policy, but for everything else the order
# does not matter so breaking it into key / value pairs.
entries, self._unparsed_exit_policy = _descriptor_components(stem.util.str_tools._to_unicode(raw_contents), validate, extra_keywords = ('accept', 'reject'), non_ascii_fields = ('contact', 'platform'))
# TODO: Remove the following field in Stem 2.0. It has never been populated...
#
# https://gitweb.torproject.org/torspec.git/commit/?id=43c2f78
self.hidden_service_dir = ['2']
if validate:
self._parse(entries, validate)
_parse_exit_policy(self, entries)
# if we have a negative uptime and a tor version that shouldn't exhibit
# this bug then fail validation
if validate and self.uptime and self.tor_version:
if self.uptime < 0 and self.tor_version >= stem.version.Version('0.1.2.7'):
raise ValueError("Descriptor for version '%s' had a negative uptime value: %i" % (self.tor_version, self.uptime))
self._check_constraints(entries)
else:
self._entries = entries
def digest(self):
"""
Provides the hex encoded sha1 of our content. This value is part of the
network status entry for this relay.
:returns: **unicode** with the upper-case hex digest value for this server descriptor
"""
raise NotImplementedError('Unsupported Operation: this should be implemented by the ServerDescriptor subclass')
@lru_cache()
def get_annotations(self):
"""
Provides content that appeared prior to the descriptor. If this comes from
the cached-descriptors file then this commonly contains content like...
::
@downloaded-at 2012-03-18 21:18:29
@source "173.254.216.66"
:returns: **dict** with the key/value pairs in our annotations
"""
annotation_dict = {}
for line in self._annotation_lines:
if b' ' in line:
key, value = line.split(b' ', 1)
annotation_dict[key] = value
else:
annotation_dict[line] = None
return annotation_dict
def get_annotation_lines(self):
"""
Provides the lines of content that appeared prior to the descriptor. This
is the same as the
:func:`~stem.descriptor.server_descriptor.ServerDescriptor.get_annotations`
results, but with the unparsed lines and ordering retained.
:returns: **list** with the lines of annotation that came before this descriptor
"""
return self._annotation_lines
def _check_constraints(self, entries):
"""
Does a basic check that the entries conform to this descriptor type's
constraints.
:param dict entries: keyword => (value, pgp key) entries
:raises: **ValueError** if an issue arises in validation
"""
for keyword in self._required_fields():
if keyword not in entries:
raise ValueError("Descriptor must have a '%s' entry" % keyword)
for keyword in self._single_fields():
if keyword in entries and len(entries[keyword]) > 1:
raise ValueError("The '%s' entry can only appear once in a descriptor" % keyword)
expected_first_keyword = self._first_keyword()
if expected_first_keyword and expected_first_keyword != list(entries.keys())[0]:
raise ValueError("Descriptor must start with a '%s' entry" % expected_first_keyword)
expected_last_keyword = self._last_keyword()
if expected_last_keyword and expected_last_keyword != list(entries.keys())[-1]:
raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword)
if 'identity-ed25519' in entries.keys():
if 'router-sig-ed25519' not in entries.keys():
raise ValueError('Descriptor must have router-sig-ed25519 entry to accompany identity-ed25519')
elif 'router-sig-ed25519' not in list(entries.keys())[-2:]:
raise ValueError("Descriptor must have 'router-sig-ed25519' as the next-to-last entry")
if not self.exit_policy:
raise ValueError("Descriptor must have at least one 'accept' or 'reject' entry")
# Constraints that the descriptor must meet to be valid. These can be None if
# not applicable.
def _required_fields(self):
return REQUIRED_FIELDS
def _single_fields(self):
return REQUIRED_FIELDS + SINGLE_FIELDS
def _first_keyword(self):
return 'router'
def _last_keyword(self):
return 'router-signature'
class RelayDescriptor(ServerDescriptor):
"""
Server descriptor (`descriptor specification
`_)
:var stem.certificate.Ed25519Certificate certificate: ed25519 certificate
:var str ed25519_certificate: base64 encoded ed25519 certificate
:var str ed25519_master_key: base64 encoded master key for our ed25519 certificate
:var str ed25519_signature: signature of this document using ed25519
:var str onion_key: **\*** key used to encrypt EXTEND cells
:var str onion_key_crosscert: signature generated using the onion_key
:var str ntor_onion_key_crosscert: signature generated using the ntor-onion-key
:var str ntor_onion_key_crosscert_sign: sign of the corresponding ed25519 public key
:var str signing_key: **\*** relay's long-term identity key
:var str signature: **\*** signature for this descriptor
**\*** attribute is required when we're parsed with validation
.. versionchanged:: 1.5.0
Added the ed25519_certificate, ed25519_master_key, ed25519_signature,
onion_key_crosscert, ntor_onion_key_crosscert, and
ntor_onion_key_crosscert_sign attributes.
.. versionchanged:: 1.6.0
Moved from the deprecated `pycrypto
`_ module to `cryptography
`_ for validating signatures.
.. versionchanged:: 1.6.0
Added the certificate attribute.
.. deprecated:: 1.6.0
Our **ed25519_certificate** is deprecated in favor of our new
**certificate** attribute. The base64 encoded certificate is available via
the certificate's **encoded** attribute.
.. versionchanged:: 1.6.0
Added the **skip_crypto_validation** constructor argument.
"""
ATTRIBUTES = dict(ServerDescriptor.ATTRIBUTES, **{
'certificate': (None, _parse_identity_ed25519_line),
'ed25519_certificate': (None, _parse_identity_ed25519_line),
'ed25519_master_key': (None, _parse_master_key_ed25519_line),
'ed25519_signature': (None, _parse_router_sig_ed25519_line),
'onion_key': (None, _parse_onion_key_line),
'onion_key_crosscert': (None, _parse_onion_key_crosscert_line),
'ntor_onion_key_crosscert': (None, _parse_ntor_onion_key_crosscert_line),
'ntor_onion_key_crosscert_sign': (None, _parse_ntor_onion_key_crosscert_line),
'signing_key': (None, _parse_signing_key_line),
'signature': (None, _parse_router_signature_line),
})
PARSER_FOR_LINE = dict(ServerDescriptor.PARSER_FOR_LINE, **{
'identity-ed25519': _parse_identity_ed25519_line,
'master-key-ed25519': _parse_master_key_ed25519_line,
'router-sig-ed25519': _parse_router_sig_ed25519_line,
'onion-key': _parse_onion_key_line,
'onion-key-crosscert': _parse_onion_key_crosscert_line,
'ntor-onion-key-crosscert': _parse_ntor_onion_key_crosscert_line,
'signing-key': _parse_signing_key_line,
'router-signature': _parse_router_signature_line,
})
def __init__(self, raw_contents, validate = False, annotations = None, skip_crypto_validation = False):
super(RelayDescriptor, self).__init__(raw_contents, validate, annotations)
if validate:
if self.fingerprint:
key_hash = hashlib.sha1(_bytes_for_block(self.signing_key)).hexdigest()
if key_hash != self.fingerprint.lower():
raise ValueError('Fingerprint does not match the hash of our signing key (fingerprint: %s, signing key hash: %s)' % (self.fingerprint.lower(), key_hash))
if not skip_crypto_validation and stem.prereq.is_crypto_available():
signed_digest = self._digest_for_signature(self.signing_key, self.signature)
if signed_digest != self.digest():
raise ValueError('Decrypted digest does not match local digest (calculated: %s, local: %s)' % (signed_digest, self.digest()))
if self.onion_key_crosscert and stem.prereq.is_crypto_available():
onion_key_crosscert_digest = self._digest_for_signature(self.onion_key, self.onion_key_crosscert)
if onion_key_crosscert_digest != self._onion_key_crosscert_digest():
raise ValueError('Decrypted onion-key-crosscert digest does not match local digest (calculated: %s, local: %s)' % (onion_key_crosscert_digest, self._onion_key_crosscert_digest()))
if stem.prereq._is_pynacl_available() and self.certificate:
self.certificate.validate(self)
@classmethod
def content(cls, attr = None, exclude = (), sign = False, signing_key = None):
if signing_key:
sign = True
if attr is None:
attr = {}
base_header = (
('router', '%s %s 9001 0 0' % (_random_nickname(), _random_ipv4_address())),
('published', _random_date()),
('bandwidth', '153600 256000 104590'),
('reject', '*:*'),
('onion-key', _random_crypto_blob('RSA PUBLIC KEY')),
('signing-key', _random_crypto_blob('RSA PUBLIC KEY')),
)
if sign:
if attr and 'signing-key' in attr:
raise ValueError('Cannot sign the descriptor if a signing-key has been provided')
elif attr and 'router-signature' in attr:
raise ValueError('Cannot sign the descriptor if a router-signature has been provided')
if signing_key is None:
signing_key = create_signing_key()
if 'fingerprint' not in attr:
fingerprint = hashlib.sha1(_bytes_for_block(stem.util.str_tools._to_unicode(signing_key.public_digest.strip()))).hexdigest().upper()
attr['fingerprint'] = ' '.join(stem.util.str_tools._split_by_length(fingerprint, 4))
attr['signing-key'] = signing_key.public_digest
content = _descriptor_content(attr, exclude, base_header) + b'\nrouter-signature\n'
return _append_router_signature(content, signing_key.private)
else:
return _descriptor_content(attr, exclude, base_header, (
('router-sig-ed25519', None),
('router-signature', _random_crypto_blob('SIGNATURE')),
))
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, signing_key = None):
return cls(cls.content(attr, exclude, sign, signing_key), validate = validate, skip_crypto_validation = not sign)
@lru_cache()
def digest(self):
"""
Provides the digest of our descriptor's content.
:returns: the digest string encoded in uppercase hex
:raises: ValueError if the digest cannot be calculated
"""
return self._digest_for_content(b'router ', b'\nrouter-signature\n')
def make_router_status_entry(self):
"""
Provides a RouterStatusEntryV3 for this descriptor content.
.. versionadded:: 1.6.0
:returns: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
that would be in the consensus
"""
if not self.fingerprint:
raise ValueError('Server descriptor lacks a fingerprint. This is an optional field, but required to make a router status entry.')
attr = {
'r': ' '.join([
self.nickname,
_truncated_b64encode(binascii.unhexlify(stem.util.str_tools._to_bytes(self.fingerprint))),
_truncated_b64encode(binascii.unhexlify(stem.util.str_tools._to_bytes(self.digest()))),
self.published.strftime('%Y-%m-%d %H:%M:%S'),
self.address,
str(self.or_port),
str(self.dir_port) if self.dir_port else '0',
]),
'w': 'Bandwidth=%i' % self.average_bandwidth,
'p': self.exit_policy.summary().replace(', ', ','),
}
if self.tor_version:
attr['v'] = 'Tor %s' % self.tor_version
if self.or_addresses:
attr['a'] = ['%s:%s' % (addr, port) for addr, port, _ in self.or_addresses]
if self.certificate:
attr['id'] = 'ed25519 %s' % _truncated_b64encode(self.certificate.key)
return RouterStatusEntryV3.create(attr)
@lru_cache()
def _onion_key_crosscert_digest(self):
"""
Provides the digest of the onion-key-crosscert data. This consists of the
RSA identity key sha1 and ed25519 identity key.
:returns: **unicode** digest encoded in uppercase hex
:raises: ValueError if the digest cannot be calculated
"""
signing_key_digest = hashlib.sha1(_bytes_for_block(self.signing_key)).digest()
data = signing_key_digest + base64.b64decode(stem.util.str_tools._to_bytes(self.ed25519_master_key) + b'=')
return stem.util.str_tools._to_unicode(binascii.hexlify(data).upper())
def _compare(self, other, method):
if not isinstance(other, RelayDescriptor):
return False
return method(str(self).strip(), str(other).strip())
def _check_constraints(self, entries):
super(RelayDescriptor, self)._check_constraints(entries)
if self.ed25519_certificate:
if not self.onion_key_crosscert:
raise ValueError("Descriptor must have a 'onion-key-crosscert' when identity-ed25519 is present")
elif not self.ed25519_signature:
raise ValueError("Descriptor must have a 'router-sig-ed25519' when identity-ed25519 is present")
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
class BridgeDescriptor(ServerDescriptor):
"""
Bridge descriptor (`bridge descriptor specification
`_)
:var str ed25519_certificate_hash: sha256 hash of the original identity-ed25519
:var str router_digest_sha256: sha256 digest of this document
.. versionchanged:: 1.5.0
Added the ed25519_certificate_hash and router_digest_sha256 attributes.
Also added ntor_onion_key (previously this only belonged to unsanitized
descriptors).
"""
ATTRIBUTES = dict(ServerDescriptor.ATTRIBUTES, **{
'ed25519_certificate_hash': (None, _parse_master_key_ed25519_for_hash_line),
'router_digest_sha256': (None, _parse_router_digest_sha256_line),
'_digest': (None, _parse_router_digest_line),
})
PARSER_FOR_LINE = dict(ServerDescriptor.PARSER_FOR_LINE, **{
'master-key-ed25519': _parse_master_key_ed25519_for_hash_line,
'router-digest-sha256': _parse_router_digest_sha256_line,
'router-digest': _parse_router_digest_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('router', '%s %s 9001 0 0' % (_random_nickname(), _random_ipv4_address())),
('router-digest', '006FD96BA35E7785A6A3B8B75FE2E2435A13BDB4'),
('published', _random_date()),
('bandwidth', '409600 819200 5120'),
('reject', '*:*'),
))
def digest(self):
return self._digest
def is_scrubbed(self):
"""
Checks if we've been properly scrubbed in accordance with the `bridge
descriptor specification
`_.
Validation is a moving target so this may not be fully up to date.
:returns: **True** if we're scrubbed, **False** otherwise
"""
return self.get_scrubbing_issues() == []
@lru_cache()
def get_scrubbing_issues(self):
"""
Provides issues with our scrubbing.
:returns: **list** of strings which describe issues we have with our
scrubbing, this list is empty if we're properly scrubbed
"""
issues = []
if not self.address.startswith('10.'):
issues.append("Router line's address should be scrubbed to be '10.x.x.x': %s" % self.address)
if self.contact and self.contact != 'somebody':
issues.append("Contact line should be scrubbed to be 'somebody', but instead had '%s'" % self.contact)
for address, _, is_ipv6 in self.or_addresses:
if not is_ipv6 and not address.startswith('10.'):
issues.append("or-address line's address should be scrubbed to be '10.x.x.x': %s" % address)
elif is_ipv6 and not address.startswith('fd9f:2e19:3bcf::'):
# TODO: this check isn't quite right because we aren't checking that
# the next grouping of hex digits contains 1-2 digits
issues.append("or-address line's address should be scrubbed to be 'fd9f:2e19:3bcf::xx:xxxx': %s" % address)
for line in self.get_unrecognized_lines():
if line.startswith('onion-key '):
issues.append('Bridge descriptors should have their onion-key scrubbed: %s' % line)
elif line.startswith('signing-key '):
issues.append('Bridge descriptors should have their signing-key scrubbed: %s' % line)
elif line.startswith('router-signature '):
issues.append('Bridge descriptors should have their signature scrubbed: %s' % line)
return issues
def _required_fields(self):
# bridge required fields are the same as a relay descriptor, minus items
# excluded according to the format page
excluded_fields = [
'onion-key',
'signing-key',
'router-signature',
]
included_fields = [
'router-digest',
]
return tuple(included_fields + [f for f in REQUIRED_FIELDS if f not in excluded_fields])
def _single_fields(self):
return self._required_fields() + SINGLE_FIELDS
def _last_keyword(self):
return None
def _compare(self, other, method):
if not isinstance(other, BridgeDescriptor):
return False
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
stem-1.7.1/stem/descriptor/remote.py 0000664 0001750 0001750 00000103151 13411002341 020173 0 ustar atagar atagar 0000000 0000000 # Copyright 2013-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Module for remotely retrieving descriptors from directory authorities and
mirrors. This is the simplest method for getting current tor descriptor
information...
::
import stem.descriptor.remote
for desc in stem.descriptor.remote.get_server_descriptors():
if desc.exit_policy.is_exiting_allowed():
print(' %s (%s)' % (desc.nickname, desc.fingerprint))
More custom downloading behavior can be done through the
:class:`~stem.descriptor.remote.DescriptorDownloader` class, which issues
:class:`~stem.descriptor.remote.Query` instances to get you descriptor
content. For example...
::
from stem.descriptor.remote import DescriptorDownloader
downloader = DescriptorDownloader(
use_mirrors = True,
timeout = 10,
)
query = downloader.get_server_descriptors()
print('Exit Relays:')
try:
for desc in query.run():
if desc.exit_policy.is_exiting_allowed():
print(' %s (%s)' % (desc.nickname, desc.fingerprint))
print
print('Query took %0.2f seconds' % query.runtime)
except Exception as exc:
print('Unable to retrieve the server descriptors: %s' % exc)
::
get_instance - Provides a singleton DescriptorDownloader used for...
|- their_server_descriptor - provides the server descriptor of the relay we download from
|- get_server_descriptors - provides present server descriptors
|- get_extrainfo_descriptors - provides present extrainfo descriptors
+- get_consensus - provides the present consensus or router status entries
Query - Asynchronous request to download tor descriptors
|- start - issues the query if it isn't already running
+- run - blocks until the request is finished and provides the results
DescriptorDownloader - Configurable class for issuing queries
|- use_directory_mirrors - use directory mirrors to download future descriptors
|- their_server_descriptor - provides the server descriptor of the relay we download from
|- get_server_descriptors - provides present server descriptors
|- get_extrainfo_descriptors - provides present extrainfo descriptors
|- get_consensus - provides the present consensus or router status entries
|- get_key_certificates - provides present authority key certificates
+- query - request an arbitrary descriptor resource
.. versionadded:: 1.1.0
.. data:: MAX_FINGERPRINTS
Maximum number of descriptors that can requested at a time by their
fingerprints.
.. data:: MAX_MICRODESCRIPTOR_HASHES
Maximum number of microdescriptors that can requested at a time by their
hashes.
.. data:: Compression (enum)
Compression when downloading descriptors.
.. versionadded:: 1.7.0
=============== ===========
Compression Description
=============== ===========
**PLAINTEXT** Uncompressed data.
**GZIP** `GZip compression `_.
**ZSTD** `Zstandard compression `_, this requires the `zstandard module `_.
**LZMA** `LZMA compression `_, this requires the 'lzma module `_.
=============== ===========
"""
import io
import random
import sys
import threading
import time
import zlib
import stem
import stem.client
import stem.descriptor
import stem.directory
import stem.prereq
import stem.util.enum
from stem.client.datatype import RelayCommand
from stem.util import log, str_tools
try:
# account for urllib's change between python 2.x and 3.x
import urllib.request as urllib
except ImportError:
import urllib2 as urllib
Compression = stem.util.enum.Enum(
('PLAINTEXT', 'identity'),
('GZIP', 'gzip'), # can also be 'deflate'
('ZSTD', 'x-zstd'),
('LZMA', 'x-tor-lzma'),
)
# Tor has a limited number of descriptors we can fetch explicitly by their
# fingerprint or hashes due to a limit on the url length by squid proxies.
MAX_FINGERPRINTS = 96
MAX_MICRODESCRIPTOR_HASHES = 90
SINGLETON_DOWNLOADER = None
def get_instance():
"""
Provides the singleton :class:`~stem.descriptor.remote.DescriptorDownloader`
used for the following functions...
* :func:`stem.descriptor.remote.get_server_descriptors`
* :func:`stem.descriptor.remote.get_extrainfo_descriptors`
* :func:`stem.descriptor.remote.get_consensus`
.. versionadded:: 1.5.0
:returns: singleton :class:`~stem.descriptor.remote.DescriptorDownloader` instance
"""
global SINGLETON_DOWNLOADER
if SINGLETON_DOWNLOADER is None:
SINGLETON_DOWNLOADER = DescriptorDownloader()
return SINGLETON_DOWNLOADER
def their_server_descriptor(**query_args):
"""
Provides the server descriptor of the relay we're downloading from.
.. versionadded:: 1.7.0
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the server descriptors
"""
return get_instance().their_server_descriptor(**query_args)
def get_server_descriptors(fingerprints = None, **query_args):
"""
Shorthand for
:func:`~stem.descriptor.remote.DescriptorDownloader.get_server_descriptors`
on our singleton instance.
.. versionadded:: 1.5.0
"""
return get_instance().get_server_descriptors(fingerprints, **query_args)
def get_extrainfo_descriptors(fingerprints = None, **query_args):
"""
Shorthand for
:func:`~stem.descriptor.remote.DescriptorDownloader.get_extrainfo_descriptors`
on our singleton instance.
.. versionadded:: 1.5.0
"""
return get_instance().get_extrainfo_descriptors(fingerprints, **query_args)
def get_consensus(authority_v3ident = None, microdescriptor = False, **query_args):
"""
Shorthand for
:func:`~stem.descriptor.remote.DescriptorDownloader.get_consensus`
on our singleton instance.
.. versionadded:: 1.5.0
"""
return get_instance().get_consensus(authority_v3ident, microdescriptor, **query_args)
class Query(object):
"""
Asynchronous request for descriptor content from a directory authority or
mirror. These can either be made through the
:class:`~stem.descriptor.remote.DescriptorDownloader` or directly for more
advanced usage.
To block on the response and get results either call
:func:`~stem.descriptor.remote.Query.run` or iterate over the Query. The
:func:`~stem.descriptor.remote.Query.run` method pass along any errors that
arise...
::
from stem.descriptor.remote import Query
query = Query(
'/tor/server/all',
block = True,
timeout = 30,
)
print('Current relays:')
if not query.error:
for desc in query:
print(desc.fingerprint)
else:
print('Unable to retrieve the server descriptors: %s' % query.error)
... while iterating fails silently...
::
print('Current relays:')
for desc in Query('/tor/server/all', 'server-descriptor 1.0'):
print(desc.fingerprint)
In either case exceptions are available via our 'error' attribute.
Tor provides quite a few different descriptor resources via its directory
protocol (see section 4.2 and later of the `dir-spec
`_).
Commonly useful ones include...
=============================================== ===========
Resource Description
=============================================== ===========
/tor/server/all all present server descriptors
/tor/server/fp/++ server descriptors with the given fingerprints
/tor/extra/all all present extrainfo descriptors
/tor/extra/fp/++ extrainfo descriptors with the given fingerprints
/tor/micro/d/- microdescriptors with the given hashes
/tor/status-vote/current/consensus present consensus
/tor/status-vote/current/consensus-microdesc present microdescriptor consensus
/tor/keys/all key certificates for the authorities
/tor/keys/fp/+ key certificates for specific authorities
=============================================== ===========
**ZSTD** compression requires `zstandard
`_, and **LZMA** requires the `lzma
module `_.
For legacy reasons if our resource has a '.z' suffix then our **compression**
argument is overwritten with Compression.GZIP.
.. versionchanged:: 1.7.0
Added support for downloading from ORPorts.
.. versionchanged:: 1.7.0
Added the compression argument.
.. versionchanged:: 1.7.0
Added the reply_headers attribute.
The class this provides changed between Python versions. In python2
this was called httplib.HTTPMessage, whereas in python3 the class was
renamed to http.client.HTTPMessage.
.. versionchanged:: 1.7.0
Endpoints are now expected to be :class:`~stem.DirPort` or
:class:`~stem.ORPort` instances. Usage of tuples for this
argument is deprecated and will be removed in the future.
.. versionchanged:: 1.7.0
Avoid downloading from tor26. This directory authority throttles its
DirPort to such an extent that requests either time out or take on the
order of minutes.
.. versionchanged:: 1.7.0
Avoid downloading from Bifroest. This is the bridge authority so it
doesn't vote in the consensus, and apparently times out frequently.
:var str resource: resource being fetched, such as '/tor/server/all'
:var str descriptor_type: type of descriptors being fetched (for options see
:func:`~stem.descriptor.__init__.parse_file`), this is guessed from the
resource if **None**
:var list endpoints: :class:`~stem.DirPort` or :class:`~stem.ORPort` of the
authority or mirror we're querying, this uses authorities if undefined
:var list compression: list of :data:`stem.descriptor.remote.Compression`
we're willing to accept, when none are mutually supported downloads fall
back to Compression.PLAINTEXT
:var int retries: number of times to attempt the request if downloading it
fails
:var bool fall_back_to_authority: when retrying request issues the last
request to a directory authority if **True**
:var str content: downloaded descriptor content
:var Exception error: exception if a problem occured
:var bool is_done: flag that indicates if our request has finished
:var float start_time: unix timestamp when we first started running
:var http.client.HTTPMessage reply_headers: headers provided in the response,
**None** if we haven't yet made our request
:var float runtime: time our query took, this is **None** if it's not yet
finished
:var bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:var stem.descriptor.__init__.DocumentHandler document_handler: method in
which to parse a :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
:var dict kwargs: additional arguments for the descriptor constructor
Following are only applicable when downloading from a
:class:`~stem.DirPort`...
:var float timeout: duration before we'll time out our request
:var str download_url: last url used to download the descriptor, this is
unset until we've actually made a download attempt
:param bool start: start making the request when constructed (default is **True**)
:param bool block: only return after the request has been completed, this is
the same as running **query.run(True)** (default is **False**)
"""
def __init__(self, resource, descriptor_type = None, endpoints = None, compression = None, retries = 2, fall_back_to_authority = False, timeout = None, start = True, block = False, validate = False, document_handler = stem.descriptor.DocumentHandler.ENTRIES, **kwargs):
if not resource.startswith('/'):
raise ValueError("Resources should start with a '/': %s" % resource)
if resource.endswith('.z'):
compression = [Compression.GZIP]
resource = resource[:-2]
elif compression is None:
compression = [Compression.PLAINTEXT]
else:
if isinstance(compression, str):
compression = [compression] # caller provided only a single option
if Compression.ZSTD in compression and not stem.prereq.is_zstd_available():
compression.remove(Compression.ZSTD)
if Compression.LZMA in compression and not stem.prereq.is_lzma_available():
compression.remove(Compression.LZMA)
if not compression:
compression = [Compression.PLAINTEXT]
if descriptor_type:
self.descriptor_type = descriptor_type
else:
self.descriptor_type = _guess_descriptor_type(resource)
self.endpoints = []
if endpoints:
for endpoint in endpoints:
if isinstance(endpoint, tuple) and len(endpoint) == 2:
self.endpoints.append(stem.DirPort(endpoint[0], endpoint[1])) # TODO: remove this in stem 2.0
elif isinstance(endpoint, (stem.ORPort, stem.DirPort)):
self.endpoints.append(endpoint)
else:
raise ValueError("Endpoints must be an stem.ORPort, stem.DirPort, or two value tuple. '%s' is a %s." % (endpoint, type(endpoint).__name__))
self.resource = resource
self.compression = compression
self.retries = retries
self.fall_back_to_authority = fall_back_to_authority
self.content = None
self.error = None
self.is_done = False
self.download_url = None
self.start_time = None
self.timeout = timeout
self.runtime = None
self.validate = validate
self.document_handler = document_handler
self.reply_headers = None
self.kwargs = kwargs
self._downloader_thread = None
self._downloader_thread_lock = threading.RLock()
if start:
self.start()
if block:
self.run(True)
def start(self):
"""
Starts downloading the scriptors if we haven't started already.
"""
with self._downloader_thread_lock:
if self._downloader_thread is None:
self._downloader_thread = threading.Thread(
name = 'Descriptor query',
target = self._download_descriptors,
args = (self.retries, self.timeout)
)
self._downloader_thread.setDaemon(True)
self._downloader_thread.start()
def run(self, suppress = False):
"""
Blocks until our request is complete then provides the descriptors. If we
haven't yet started our request then this does so.
:param bool suppress: avoids raising exceptions if **True**
:returns: list for the requested :class:`~stem.descriptor.__init__.Descriptor` instances
:raises:
Using the iterator can fail with the following if **suppress** is
**False**...
* **ValueError** if the descriptor contents is malformed
* **socket.timeout** if our request timed out
* **urllib2.URLError** for most request failures
Note that the urllib2 module may fail with other exception types, in
which case we'll pass it along.
"""
return list(self._run(suppress))
def _run(self, suppress):
with self._downloader_thread_lock:
self.start()
self._downloader_thread.join()
if self.error:
if suppress:
return
raise self.error
else:
if self.content is None:
if suppress:
return
raise ValueError('BUG: _download_descriptors() finished without either results or an error')
try:
results = stem.descriptor.parse_file(
io.BytesIO(self.content),
self.descriptor_type,
validate = self.validate,
document_handler = self.document_handler,
**self.kwargs
)
for desc in results:
yield desc
except ValueError as exc:
self.error = exc # encountered a parsing error
if suppress:
return
raise self.error
def __iter__(self):
for desc in self._run(True):
yield desc
def _pick_endpoint(self, use_authority = False):
"""
Provides an endpoint to query. If we have multiple endpoints then one
is picked at random.
:param bool use_authority: ignores our endpoints and uses a directory
authority instead
:returns: **str** for the url being queried by this request
"""
if use_authority or not self.endpoints:
picked = random.choice([auth for auth in stem.directory.Authority.from_cache().values() if auth.nickname not in ('tor26', 'Bifroest')])
return stem.DirPort(picked.address, picked.dir_port)
else:
return random.choice(self.endpoints)
def _download_descriptors(self, retries, timeout):
try:
self.start_time = time.time()
endpoint = self._pick_endpoint(use_authority = retries == 0 and self.fall_back_to_authority)
if isinstance(endpoint, stem.ORPort):
self.content, self.reply_headers = _download_from_orport(endpoint, self.compression, self.resource)
elif isinstance(endpoint, stem.DirPort):
self.download_url = 'http://%s:%i/%s' % (endpoint.address, endpoint.port, self.resource.lstrip('/'))
self.content, self.reply_headers = _download_from_dirport(self.download_url, self.compression, timeout)
else:
raise ValueError("BUG: endpoints can only be ORPorts or DirPorts, '%s' was a %s" % (endpoint, type(endpoint).__name__))
self.runtime = time.time() - self.start_time
log.trace("Descriptors retrieved from '%s' in %0.2fs" % (self.download_url, self.runtime))
except:
exc = sys.exc_info()[1]
if timeout is not None:
timeout -= time.time() - self.start_time
if retries > 0 and (timeout is None or timeout > 0):
log.debug("Unable to download descriptors from '%s' (%i retries remaining): %s" % (self.download_url, retries, exc))
return self._download_descriptors(retries - 1, timeout)
else:
log.debug("Unable to download descriptors from '%s': %s" % (self.download_url, exc))
self.error = exc
finally:
self.is_done = True
class DescriptorDownloader(object):
"""
Configurable class that issues :class:`~stem.descriptor.remote.Query`
instances on your behalf.
:param bool use_mirrors: downloads the present consensus and uses the directory
mirrors to fetch future requests, this fails silently if the consensus
cannot be downloaded
:param default_args: default arguments for the
:class:`~stem.descriptor.remote.Query` constructor
"""
def __init__(self, use_mirrors = False, **default_args):
self._default_args = default_args
directories = list(stem.directory.Authority.from_cache().values())
self._endpoints = [(directory.address, directory.dir_port) for directory in directories]
if use_mirrors:
try:
start_time = time.time()
self.use_directory_mirrors()
log.debug('Retrieved directory mirrors (took %0.2fs)' % (time.time() - start_time))
except Exception as exc:
log.debug('Unable to retrieve directory mirrors: %s' % exc)
def use_directory_mirrors(self):
"""
Downloads the present consensus and configures ourselves to use directory
mirrors, in addition to authorities.
:returns: :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`
from which we got the directory mirrors
:raises: **Exception** if unable to determine the directory mirrors
"""
directories = stem.directory.Authority.from_cache().values()
new_endpoints = set([(directory.address, directory.dir_port) for directory in directories])
consensus = list(self.get_consensus(document_handler = stem.descriptor.DocumentHandler.DOCUMENT).run())[0]
for desc in consensus.routers.values():
if stem.Flag.V2DIR in desc.flags:
new_endpoints.add((desc.address, desc.dir_port))
# we need our endpoints to be a list rather than set for random.choice()
self._endpoints = list(new_endpoints)
return consensus
def their_server_descriptor(self, **query_args):
"""
Provides the server descriptor of the relay we're downloading from.
.. versionadded:: 1.7.0
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the server descriptors
"""
return self.query('/tor/server/authority', **query_args)
def get_server_descriptors(self, fingerprints = None, **query_args):
"""
Provides the server descriptors with the given fingerprints. If no
fingerprints are provided then this returns all descriptors known
by the relay.
:param str,list fingerprints: fingerprint or list of fingerprints to be
retrieved, gets all descriptors if **None**
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the server descriptors
:raises: **ValueError** if we request more than 96 descriptors by their
fingerprints (this is due to a limit on the url length by squid proxies).
"""
resource = '/tor/server/all'
if isinstance(fingerprints, str):
fingerprints = [fingerprints]
if fingerprints:
if len(fingerprints) > MAX_FINGERPRINTS:
raise ValueError('Unable to request more than %i descriptors at a time by their fingerprints' % MAX_FINGERPRINTS)
resource = '/tor/server/fp/%s' % '+'.join(fingerprints)
return self.query(resource, **query_args)
def get_extrainfo_descriptors(self, fingerprints = None, **query_args):
"""
Provides the extrainfo descriptors with the given fingerprints. If no
fingerprints are provided then this returns all descriptors in the present
consensus.
:param str,list fingerprints: fingerprint or list of fingerprints to be
retrieved, gets all descriptors if **None**
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the extrainfo descriptors
:raises: **ValueError** if we request more than 96 descriptors by their
fingerprints (this is due to a limit on the url length by squid proxies).
"""
resource = '/tor/extra/all'
if isinstance(fingerprints, str):
fingerprints = [fingerprints]
if fingerprints:
if len(fingerprints) > MAX_FINGERPRINTS:
raise ValueError('Unable to request more than %i descriptors at a time by their fingerprints' % MAX_FINGERPRINTS)
resource = '/tor/extra/fp/%s' % '+'.join(fingerprints)
return self.query(resource, **query_args)
# TODO: drop in stem 2.x
def get_microdescriptors(self, hashes, **query_args):
"""
Provides the microdescriptors with the given hashes. To get these see the
'microdescriptor_hashes' attribute of
:class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`. Note
that these are only provided via a microdescriptor consensus (such as
'cached-microdesc-consensus' in your data directory).
.. deprecated:: 1.5.0
This function has never worked, as it was never implemented in tor
(:trac:`9271`).
:param str,list hashes: microdescriptor hash or list of hashes to be
retrieved
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the microdescriptors
:raises: **ValueError** if we request more than 92 microdescriptors by their
hashes (this is due to a limit on the url length by squid proxies).
"""
if isinstance(hashes, str):
hashes = [hashes]
if len(hashes) > MAX_MICRODESCRIPTOR_HASHES:
raise ValueError('Unable to request more than %i microdescriptors at a time by their hashes' % MAX_MICRODESCRIPTOR_HASHES)
return self.query('/tor/micro/d/%s' % '-'.join(hashes), **query_args)
def get_consensus(self, authority_v3ident = None, microdescriptor = False, **query_args):
"""
Provides the present router status entries.
.. versionchanged:: 1.5.0
Added the microdescriptor argument.
:param str authority_v3ident: fingerprint of the authority key for which
to get the consensus, see `'v3ident' in tor's config.c
`_
for the values.
:param bool microdescriptor: provides the microdescriptor consensus if
**True**, standard consensus otherwise
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the router status
entries
"""
if microdescriptor:
resource = '/tor/status-vote/current/consensus-microdesc'
else:
resource = '/tor/status-vote/current/consensus'
if authority_v3ident:
resource += '/%s' % authority_v3ident
consensus_query = self.query(resource, **query_args)
# if we're performing validation then check that it's signed by the
# authority key certificates
if consensus_query.validate and consensus_query.document_handler == stem.descriptor.DocumentHandler.DOCUMENT and stem.prereq.is_crypto_available():
consensus = list(consensus_query.run())[0]
key_certs = self.get_key_certificates(**query_args).run()
consensus.validate_signatures(key_certs)
return consensus_query
def get_vote(self, authority, **query_args):
"""
Provides the present vote for a given directory authority.
:param stem.directory.Authority authority: authority for which to retrieve a vote for
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the router status
entries
"""
resource = '/tor/status-vote/current/authority'
if 'endpoint' not in query_args:
query_args['endpoints'] = [(authority.address, authority.dir_port)]
return self.query(resource, **query_args)
def get_key_certificates(self, authority_v3idents = None, **query_args):
"""
Provides the key certificates for authorities with the given fingerprints.
If no fingerprints are provided then this returns all present key
certificates.
:param str authority_v3idents: fingerprint or list of fingerprints of the
authority keys, see `'v3ident' in tor's config.c
`_
for the values.
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the key certificates
:raises: **ValueError** if we request more than 96 key certificates by
their identity fingerprints (this is due to a limit on the url length by
squid proxies).
"""
resource = '/tor/keys/all'
if isinstance(authority_v3idents, str):
authority_v3idents = [authority_v3idents]
if authority_v3idents:
if len(authority_v3idents) > MAX_FINGERPRINTS:
raise ValueError('Unable to request more than %i key certificates at a time by their identity fingerprints' % MAX_FINGERPRINTS)
resource = '/tor/keys/fp/%s' % '+'.join(authority_v3idents)
return self.query(resource, **query_args)
def query(self, resource, **query_args):
"""
Issues a request for the given resource.
.. versionchanged:: 1.7.0
The **fall_back_to_authority** default when using this method is now
**False**, like the :class:`~stem.descriptor.Query` class.
:param str resource: resource being fetched, such as '/tor/server/all'
:param query_args: additional arguments for the
:class:`~stem.descriptor.remote.Query` constructor
:returns: :class:`~stem.descriptor.remote.Query` for the descriptors
:raises: **ValueError** if resource is clearly invalid or the descriptor
type can't be determined when 'descriptor_type' is **None**
"""
args = dict(self._default_args)
args.update(query_args)
if 'endpoints' not in args:
args['endpoints'] = self._endpoints
return Query(resource, **args)
def _download_from_orport(endpoint, compression, resource):
"""
Downloads descriptors from the given orport. Payload is just like an http
response (headers and all)...
::
HTTP/1.0 200 OK
Date: Mon, 23 Apr 2018 18:43:47 GMT
Content-Type: text/plain
X-Your-Address-Is: 216.161.254.25
Content-Encoding: identity
Expires: Wed, 25 Apr 2018 18:43:47 GMT
router dannenberg 193.23.244.244 443 0 80
identity-ed25519
... rest of the descriptor content...
:param stem.ORPort endpoint: endpoint to download from
:param list compression: compression methods for the request
:param str resource: descriptor resource to download
:returns: two value tuple of the form (data, reply_headers)
:raises:
* :class:`stem.ProtocolError` if not a valid descriptor response
* :class:`stem.SocketError` if unable to establish a connection
"""
link_protocols = endpoint.link_protocols if endpoint.link_protocols else [3]
with stem.client.Relay.connect(endpoint.address, endpoint.port, link_protocols) as relay:
with relay.create_circuit() as circ:
request = '\r\n'.join((
'GET %s HTTP/1.0' % resource,
'Accept-Encoding: %s' % ', '.join(compression),
'User-Agent: %s' % stem.USER_AGENT,
)) + '\r\n\r\n'
circ.send(RelayCommand.BEGIN_DIR, stream_id = 1)
response = b''.join([cell.data for cell in circ.send(RelayCommand.DATA, request, stream_id = 1)])
first_line, data = response.split(b'\r\n', 1)
header_data, body_data = data.split(b'\r\n\r\n', 1)
if not first_line.startswith(b'HTTP/1.0 2'):
raise stem.ProtocolError("Response should begin with HTTP success, but was '%s'" % str_tools._to_unicode(first_line))
headers = {}
for line in str_tools._to_unicode(header_data).splitlines():
if ': ' not in line:
raise stem.ProtocolError("'%s' is not a HTTP header:\n\n%s" % line)
key, value = line.split(': ', 1)
headers[key] = value
return _decompress(body_data, headers.get('Content-Encoding')), headers
def _download_from_dirport(url, compression, timeout):
"""
Downloads descriptors from the given url.
:param str url: dirport url from which to download from
:param list compression: compression methods for the request
:param float timeout: duration before we'll time out our request
:returns: two value tuple of the form (data, reply_headers)
:raises:
* **socket.timeout** if our request timed out
* **urllib2.URLError** for most request failures
"""
response = urllib.urlopen(
urllib.Request(
url,
headers = {
'Accept-Encoding': ', '.join(compression),
'User-Agent': stem.USER_AGENT,
}
),
timeout = timeout,
)
return _decompress(response.read(), response.headers.get('Content-Encoding')), response.headers
def _decompress(data, encoding):
"""
Decompresses descriptor data.
Tor doesn't include compression headers. As such when using gzip we
need to include '32' for automatic header detection...
https://stackoverflow.com/questions/3122145/zlib-error-error-3-while-decompressing-incorrect-header-check/22310760#22310760
... and with zstd we need to use the streaming API.
:param bytes data: data we received
:param str encoding: 'Content-Encoding' header of the response
:raises:
* **ValueError** if encoding is unrecognized
* **ImportError** if missing the decompression module
"""
if encoding == Compression.PLAINTEXT:
return data.strip()
elif encoding in (Compression.GZIP, 'deflate'):
return zlib.decompress(data, zlib.MAX_WBITS | 32).strip()
elif encoding == Compression.ZSTD:
if not stem.prereq.is_zstd_available():
raise ImportError('Decompressing zstd data requires https://pypi.python.org/pypi/zstandard')
import zstd
output_buffer = io.BytesIO()
with zstd.ZstdDecompressor().write_to(output_buffer) as decompressor:
decompressor.write(data)
return output_buffer.getvalue().strip()
elif encoding == Compression.LZMA:
if not stem.prereq.is_lzma_available():
raise ImportError('Decompressing lzma data requires https://docs.python.org/3/library/lzma.html')
import lzma
return lzma.decompress(data).strip()
else:
raise ValueError("'%s' isn't a recognized type of encoding" % encoding)
def _guess_descriptor_type(resource):
# Attempts to determine the descriptor type based on the resource url. This
# raises a ValueError if the resource isn't recognized.
if resource.startswith('/tor/server/'):
return 'server-descriptor 1.0'
elif resource.startswith('/tor/extra/'):
return 'extra-info 1.0'
elif resource.startswith('/tor/micro/'):
return 'microdescriptor 1.0'
elif resource.startswith('/tor/status-vote/current/consensus-microdesc'):
return 'network-status-microdesc-consensus-3 1.0'
elif resource.startswith('/tor/status-vote/'):
return 'network-status-consensus-3 1.0'
elif resource.startswith('/tor/keys/'):
return 'dir-key-certificate-3 1.0'
else:
raise ValueError("Unable to determine the descriptor type for '%s'" % resource)
def get_authorities():
"""
Provides cached Tor directory authority information. The directory
information hardcoded into Tor and occasionally changes, so the information
this provides might not necessarily match your version of tor.
.. deprecated:: 1.7.0
Use stem.directory.Authority.from_cache() instead.
:returns: **dict** of **str** nicknames to :class:`~stem.directory.Authority` instances
"""
return DirectoryAuthority.from_cache()
# TODO: drop aliases in stem 2.0
Directory = stem.directory.Directory
DirectoryAuthority = stem.directory.Authority
FallbackDirectory = stem.directory.Fallback
stem-1.7.1/stem/descriptor/microdescriptor.py 0000664 0001750 0001750 00000026060 13411002341 022113 0 ustar atagar atagar 0000000 0000000 # Copyright 2013-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for Tor microdescriptors, which contain a distilled version of a
relay's server descriptor. As of Tor version 0.2.3.3-alpha Tor no longer
downloads server descriptors by default, opting for microdescriptors instead.
Unlike most descriptor documents these aren't available on the metrics site
(since they don't contain any information that the server descriptors don't).
The limited information in microdescriptors make them rather clunky to use
compared with server descriptors. For instance microdescriptors lack the
relay's fingerprint, making it difficut to use them to look up the relay's
other descriptors.
To do so you need to match the microdescriptor's digest against its
corresponding router status entry. For added fun as of this writing the
controller doesn't even surface those router status entries
(:trac:`7953`).
For instance, here's an example that prints the nickname and fingerprints of
the exit relays.
::
import os
from stem.control import Controller
from stem.descriptor import parse_file
with Controller.from_port(port = 9051) as controller:
controller.authenticate()
exit_digests = set()
data_dir = controller.get_conf('DataDirectory')
for desc in controller.get_microdescriptors():
if desc.exit_policy.is_exiting_allowed():
exit_digests.add(desc.digest)
print 'Exit Relays:'
for desc in parse_file(os.path.join(data_dir, 'cached-microdesc-consensus')):
if desc.digest in exit_digests:
print ' %s (%s)' % (desc.nickname, desc.fingerprint)
Doing the same is trivial with server descriptors...
::
from stem.descriptor import parse_file
print 'Exit Relays:'
for desc in parse_file('/home/atagar/.tor/cached-descriptors'):
if desc.exit_policy.is_exiting_allowed():
print ' %s (%s)' % (desc.nickname, desc.fingerprint)
**Module Overview:**
::
Microdescriptor - Tor microdescriptor.
"""
import hashlib
import stem.exit_policy
import stem.prereq
from stem.descriptor import (
Descriptor,
_descriptor_content,
_descriptor_components,
_read_until_keywords,
_values,
_parse_simple_line,
_parse_protocol_line,
_parse_key_block,
_random_crypto_blob,
)
from stem.descriptor.router_status_entry import (
_parse_a_line,
_parse_p_line,
)
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
REQUIRED_FIELDS = (
'onion-key',
)
SINGLE_FIELDS = (
'onion-key',
'ntor-onion-key',
'family',
'p',
'p6',
'pr',
)
def _parse_file(descriptor_file, validate = False, **kwargs):
"""
Iterates over the microdescriptors in a file.
:param file descriptor_file: file with descriptor content
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param dict kwargs: additional arguments for the descriptor constructor
:returns: iterator for Microdescriptor instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is True
* **IOError** if the file can't be read
"""
while True:
annotations = _read_until_keywords('onion-key', descriptor_file)
# read until we reach an annotation or onion-key line
descriptor_lines = []
# read the onion-key line, done if we're at the end of the document
onion_key_line = descriptor_file.readline()
if onion_key_line:
descriptor_lines.append(onion_key_line)
else:
break
while True:
last_position = descriptor_file.tell()
line = descriptor_file.readline()
if not line:
break # EOF
elif line.startswith(b'@') or line.startswith(b'onion-key'):
descriptor_file.seek(last_position)
break
else:
descriptor_lines.append(line)
if descriptor_lines:
if descriptor_lines[0].startswith(b'@type'):
descriptor_lines = descriptor_lines[1:]
# strip newlines from annotations
annotations = list(map(bytes.strip, annotations))
descriptor_text = bytes.join(b'', descriptor_lines)
yield Microdescriptor(descriptor_text, validate, annotations, **kwargs)
else:
break # done parsing descriptors
def _parse_id_line(descriptor, entries):
identities = {}
for entry in _values('id', entries):
entry_comp = entry.split()
if len(entry_comp) >= 2:
key_type, key_value = entry_comp[0], entry_comp[1]
if key_type in identities:
raise ValueError("There can only be one 'id' line per a key type, but '%s' appeared multiple times" % key_type)
descriptor.identifier_type = key_type
descriptor.identifier = key_value
identities[key_type] = key_value
else:
raise ValueError("'id' lines should contain both the key type and digest: id %s" % entry)
descriptor.identifiers = identities
def _parse_digest(descriptor, entries):
setattr(descriptor, 'digest', hashlib.sha256(descriptor.get_bytes()).hexdigest().upper())
_parse_onion_key_line = _parse_key_block('onion-key', 'onion_key', 'RSA PUBLIC KEY')
_parse_ntor_onion_key_line = _parse_simple_line('ntor-onion-key', 'ntor_onion_key')
_parse_family_line = _parse_simple_line('family', 'family', func = lambda v: v.split(' '))
_parse_p6_line = _parse_simple_line('p6', 'exit_policy_v6', func = lambda v: stem.exit_policy.MicroExitPolicy(v))
_parse_pr_line = _parse_protocol_line('pr', 'protocols')
class Microdescriptor(Descriptor):
"""
Microdescriptor (`descriptor specification
`_)
:var str digest: **\*** hex digest for this microdescriptor, this can be used
to match against the corresponding digest attribute of a
:class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3`
:var str onion_key: **\*** key used to encrypt EXTEND cells
:var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol
:var list or_addresses: **\*** alternative for our address/or_port attributes, each
entry is a tuple of the form (address (**str**), port (**int**), is_ipv6
(**bool**))
:var list family: **\*** nicknames or fingerprints of declared family
:var stem.exit_policy.MicroExitPolicy exit_policy: **\*** relay's exit policy
:var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\*** exit policy for IPv6
:var hash identifiers: mapping of key types (like rsa1024 or ed25519) to
their base64 encoded identity, this is only used for collision prevention
(:trac:`11743`)
:var dict protocols: mapping of protocols to their supported versions
:var str identifier: base64 encoded identity digest (**deprecated**, use
identifiers instead)
:var str identifier_type: identity digest key type (**deprecated**, use
identifiers instead)
**\*** attribute is required when we're parsed with validation
.. versionchanged:: 1.1.0
Added the identifier and identifier_type attributes.
.. versionchanged:: 1.5.0
Added the identifiers attribute, and deprecated identifier and
identifier_type since the field can now appear multiple times.
.. versionchanged:: 1.6.0
Added the protocols attribute.
"""
ATTRIBUTES = {
'onion_key': (None, _parse_onion_key_line),
'ntor_onion_key': (None, _parse_ntor_onion_key_line),
'or_addresses': ([], _parse_a_line),
'family': ([], _parse_family_line),
'exit_policy': (stem.exit_policy.MicroExitPolicy('reject 1-65535'), _parse_p_line),
'exit_policy_v6': (None, _parse_p6_line),
'identifier_type': (None, _parse_id_line), # deprecated in favor of identifiers
'identifier': (None, _parse_id_line), # deprecated in favor of identifiers
'identifiers': ({}, _parse_id_line),
'protocols': ({}, _parse_pr_line),
'digest': (None, _parse_digest),
}
PARSER_FOR_LINE = {
'onion-key': _parse_onion_key_line,
'ntor-onion-key': _parse_ntor_onion_key_line,
'a': _parse_a_line,
'family': _parse_family_line,
'p': _parse_p_line,
'p6': _parse_p6_line,
'pr': _parse_pr_line,
'id': _parse_id_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('onion-key', _random_crypto_blob('RSA PUBLIC KEY')),
))
def __init__(self, raw_contents, validate = False, annotations = None):
super(Microdescriptor, self).__init__(raw_contents, lazy_load = not validate)
self._annotation_lines = annotations if annotations else []
entries = _descriptor_components(raw_contents, validate)
if validate:
self.digest = hashlib.sha256(self.get_bytes()).hexdigest().upper()
self._parse(entries, validate)
self._check_constraints(entries)
else:
self._entries = entries
@lru_cache()
def get_annotations(self):
"""
Provides content that appeared prior to the descriptor. If this comes from
the cached-microdescs then this commonly contains content like...
::
@last-listed 2013-02-24 00:18:30
:returns: **dict** with the key/value pairs in our annotations
"""
annotation_dict = {}
for line in self._annotation_lines:
if b' ' in line:
key, value = line.split(b' ', 1)
annotation_dict[key] = value
else:
annotation_dict[line] = None
return annotation_dict
def get_annotation_lines(self):
"""
Provides the lines of content that appeared prior to the descriptor. This
is the same as the
:func:`~stem.descriptor.microdescriptor.Microdescriptor.get_annotations`
results, but with the unparsed lines and ordering retained.
:returns: **list** with the lines of annotation that came before this descriptor
"""
return self._annotation_lines
def _check_constraints(self, entries):
"""
Does a basic check that the entries conform to this descriptor type's
constraints.
:param dict entries: keyword => (value, pgp key) entries
:raises: **ValueError** if an issue arises in validation
"""
for keyword in REQUIRED_FIELDS:
if keyword not in entries:
raise ValueError("Microdescriptor must have a '%s' entry" % keyword)
for keyword in SINGLE_FIELDS:
if keyword in entries and len(entries[keyword]) > 1:
raise ValueError("The '%s' entry can only appear once in a microdescriptor" % keyword)
if 'onion-key' != list(entries.keys())[0]:
raise ValueError("Microdescriptor must start with a 'onion-key' entry")
def _name(self, is_plural = False):
return 'microdescriptors' if is_plural else 'microdescriptor'
def _compare(self, other, method):
if not isinstance(other, Microdescriptor):
return False
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
stem-1.7.1/stem/version.py 0000664 0001750 0001750 00000034264 13411002341 016217 0 ustar atagar atagar 0000000 0000000 # Copyright 2011-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Tor versioning information and requirements for its features. These can be
easily parsed and compared, for instance...
::
>>> from stem.version import get_system_tor_version, Requirement
>>> my_version = get_system_tor_version()
>>> print(my_version)
0.2.1.30
>>> my_version >= Requirement.TORRC_CONTROL_SOCKET
True
**Module Overview:**
::
get_system_tor_version - gets the version of our system's tor installation
Version - Tor versioning information
.. data:: Requirement (enum)
Enumerations for the version requirements of features.
.. deprecated:: 1.6.0
Requirement entries belonging to tor versions which have been obsolete for
at least six months will be removed when we break backward compatibility
in the 2.x stem release.
===================================== ===========
Requirement Description
===================================== ===========
**AUTH_SAFECOOKIE** SAFECOOKIE authentication method
**DESCRIPTOR_COMPRESSION** `Expanded compression support for ZSTD and LZMA `_
**DROPGUARDS** DROPGUARDS requests
**EVENT_AUTHDIR_NEWDESCS** AUTHDIR_NEWDESC events
**EVENT_BUILDTIMEOUT_SET** BUILDTIMEOUT_SET events
**EVENT_CIRC_MINOR** CIRC_MINOR events
**EVENT_CLIENTS_SEEN** CLIENTS_SEEN events
**EVENT_CONF_CHANGED** CONF_CHANGED events
**EVENT_DESCCHANGED** DESCCHANGED events
**EVENT_GUARD** GUARD events
**EVENT_HS_DESC_CONTENT** HS_DESC_CONTENT events
**EVENT_NETWORK_LIVENESS** NETWORK_LIVENESS events
**EVENT_NEWCONSENSUS** NEWCONSENSUS events
**EVENT_NS** NS events
**EVENT_SIGNAL** SIGNAL events
**EVENT_STATUS** STATUS_GENERAL, STATUS_CLIENT, and STATUS_SERVER events
**EVENT_STREAM_BW** STREAM_BW events
**EVENT_TRANSPORT_LAUNCHED** TRANSPORT_LAUNCHED events
**EVENT_CONN_BW** CONN_BW events
**EVENT_CIRC_BW** CIRC_BW events
**EVENT_CELL_STATS** CELL_STATS events
**EVENT_TB_EMPTY** TB_EMPTY events
**EVENT_HS_DESC** HS_DESC events
**EXTENDCIRCUIT_PATH_OPTIONAL** EXTENDCIRCUIT queries can omit the path if the circuit is zero
**FEATURE_EXTENDED_EVENTS** 'EXTENDED_EVENTS' optional feature
**FEATURE_VERBOSE_NAMES** 'VERBOSE_NAMES' optional feature
**GETINFO_CONFIG_TEXT** 'GETINFO config-text' query
**GETINFO_GEOIP_AVAILABLE** 'GETINFO ip-to-country/ipv4-available' query and its ipv6 counterpart
**GETINFO_MICRODESCRIPTORS** 'GETINFO md/all' query
**HIDDEN_SERVICE_V3** Support for v3 hidden services
**HSFETCH** HSFETCH requests
**HSPOST** HSPOST requests
**ADD_ONION** ADD_ONION and DEL_ONION requests
**ADD_ONION_BASIC_AUTH** ADD_ONION supports basic authentication
**ADD_ONION_NON_ANONYMOUS** ADD_ONION supports non-anonymous mode
**ADD_ONION_MAX_STREAMS** ADD_ONION support for MaxStreamsCloseCircuit
**LOADCONF** LOADCONF requests
**MICRODESCRIPTOR_IS_DEFAULT** Tor gets microdescriptors by default rather than server descriptors
**SAVECONF_FORCE** Added the 'FORCE' flag to SAVECONF
**TAKEOWNERSHIP** TAKEOWNERSHIP requests
**TORRC_CONTROL_SOCKET** 'ControlSocket ' config option
**TORRC_PORT_FORWARDING** 'PortForwarding' config option
**TORRC_DISABLE_DEBUGGER_ATTACHMENT** 'DisableDebuggerAttachment' config option
**TORRC_VIA_STDIN** Allow torrc options via 'tor -f -' (:trac:`13865`)
===================================== ===========
"""
import os
import re
import stem.prereq
import stem.util
import stem.util.enum
import stem.util.system
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
# cache for the get_system_tor_version function
VERSION_CACHE = {}
VERSION_PATTERN = re.compile(r'^([0-9]+)\.([0-9]+)\.([0-9]+)(\.[0-9]+)?(-\S*)?(( \(\S*\))*)$')
def get_system_tor_version(tor_cmd = 'tor'):
"""
Queries tor for its version. This is os dependent, only working on linux,
osx, and bsd.
:param str tor_cmd: command used to run tor
:returns: :class:`~stem.version.Version` provided by the tor command
:raises: **IOError** if unable to query or parse the version
"""
if tor_cmd not in VERSION_CACHE:
version_cmd = '%s --version' % tor_cmd
try:
version_output = stem.util.system.call(version_cmd)
except OSError as exc:
# make the error message nicer if this is due to tor being unavialable
if 'No such file or directory' in str(exc):
if os.path.isabs(tor_cmd):
exc = "Unable to check tor's version. '%s' doesn't exist." % tor_cmd
else:
exc = "Unable to run '%s'. Maybe tor isn't in your PATH?" % version_cmd
raise IOError(exc)
if version_output:
# output example:
# Oct 21 07:19:27.438 [notice] Tor v0.2.1.30. This is experimental software. Do not rely on it for strong anonymity. (Running on Linux i686)
# Tor version 0.2.1.30.
last_line = version_output[-1]
if last_line.startswith('Tor version ') and last_line.endswith('.'):
try:
version_str = last_line[12:-1]
VERSION_CACHE[tor_cmd] = Version(version_str)
except ValueError as exc:
raise IOError(exc)
else:
raise IOError("Unexpected response from '%s': %s" % (version_cmd, last_line))
else:
raise IOError("'%s' didn't have any output" % version_cmd)
return VERSION_CACHE[tor_cmd]
@lru_cache()
def _get_version(version_str):
return Version(version_str)
class Version(object):
"""
Comparable tor version. These are constructed from strings that conform to
the 'new' style in the `tor version-spec
`_,
such as "0.1.4" or "0.2.2.23-alpha (git-7dcd105be34a4f44)".
.. versionchanged:: 1.6.0
Added all_extra parameter.
:var int major: major version
:var int minor: minor version
:var int micro: micro version
:var int patch: patch level (**None** if undefined)
:var str status: status tag such as 'alpha' or 'beta-dev' (**None** if undefined)
:var str extra: first extra information without its parentheses such as
'git-8be6058d8f31e578' (**None** if undefined)
:var list all_extra: all extra information entries, without their parentheses
:var str git_commit: git commit id (**None** if it wasn't provided)
:param str version_str: version to be parsed
:raises: **ValueError** if input isn't a valid tor version
"""
def __init__(self, version_str):
self.version_str = version_str
version_parts = VERSION_PATTERN.match(version_str)
if version_parts:
major, minor, micro, patch, status, extra_str, _ = version_parts.groups()
# The patch and status matches are optional (may be None) and have an extra
# proceeding period or dash if they exist. Stripping those off.
if patch:
patch = int(patch[1:])
if status:
status = status[1:]
self.major = int(major)
self.minor = int(minor)
self.micro = int(micro)
self.patch = patch
self.status = status
self.all_extra = [entry[1:-1] for entry in extra_str.strip().split()] if extra_str else []
self.extra = self.all_extra[0] if self.all_extra else None
self.git_commit = None
for extra in self.all_extra:
if extra and re.match('^git-[0-9a-f]{16}$', extra):
self.git_commit = extra[4:]
break
else:
raise ValueError("'%s' isn't a properly formatted tor version" % version_str)
def __str__(self):
"""
Provides the string used to construct the version.
"""
return self.version_str
def _compare(self, other, method):
"""
Compares version ordering according to the spec.
"""
if not isinstance(other, Version):
return False
for attr in ('major', 'minor', 'micro', 'patch'):
my_version = getattr(self, attr)
other_version = getattr(other, attr)
if my_version is None:
my_version = 0
if other_version is None:
other_version = 0
if my_version != other_version:
return method(my_version, other_version)
# According to the version spec...
#
# If we *do* encounter two versions that differ only by status tag, we
# compare them lexically as ASCII byte strings.
my_status = self.status if self.status else ''
other_status = other.status if other.status else ''
return method(my_status, other_status)
def __hash__(self):
return stem.util._hash_attr(self, 'major', 'minor', 'micro', 'patch', 'status', cache = True)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __gt__(self, other):
"""
Checks if this version meets the requirements for a given feature. We can
be compared to either a :class:`~stem.version.Version` or
:class:`~stem.version._VersionRequirements`.
"""
if isinstance(other, _VersionRequirements):
for rule in other.rules:
if rule(self):
return True
return False
return self._compare(other, lambda s, o: s > o)
def __ge__(self, other):
if isinstance(other, _VersionRequirements):
for rule in other.rules:
if rule(self):
return True
return False
return self._compare(other, lambda s, o: s >= o)
class _VersionRequirements(object):
"""
Series of version constraints that can be compared to. For instance, this
allows for comparisons like 'if I'm greater than version X in the 0.2.2
series, or greater than version Y in the 0.2.3 series'.
This is a logical 'or' of the series of rules.
"""
def __init__(self):
self.rules = []
def greater_than(self, version, inclusive = True):
"""
Adds a constraint that we're greater than the given version.
:param stem.version.Version version: version we're checking against
:param bool inclusive: if comparison is inclusive or not
"""
if inclusive:
self.rules.append(lambda v: version <= v)
else:
self.rules.append(lambda v: version < v)
def less_than(self, version, inclusive = True):
"""
Adds a constraint that we're less than the given version.
:param stem.version.Version version: version we're checking against
:param bool inclusive: if comparison is inclusive or not
"""
if inclusive:
self.rules.append(lambda v: version >= v)
else:
self.rules.append(lambda v: version > v)
def in_range(self, from_version, to_version, from_inclusive = True, to_inclusive = False):
"""
Adds constraint that we're within the range from one version to another.
:param stem.version.Version from_version: beginning of the comparison range
:param stem.version.Version to_version: end of the comparison range
:param bool from_inclusive: if comparison is inclusive with the starting version
:param bool to_inclusive: if comparison is inclusive with the ending version
"""
def new_rule(v):
if from_inclusive and to_inclusive:
return from_version <= v <= to_version
elif from_inclusive:
return from_version <= v < to_version
else:
return from_version < v < to_version
self.rules.append(new_rule)
safecookie_req = _VersionRequirements()
safecookie_req.in_range(Version('0.2.2.36'), Version('0.2.3.0'))
safecookie_req.greater_than(Version('0.2.3.13'))
Requirement = stem.util.enum.Enum(
('AUTH_SAFECOOKIE', safecookie_req),
('DESCRIPTOR_COMPRESSION', Version('0.3.1.1-alpha')),
('DROPGUARDS', Version('0.2.5.1-alpha')),
('EVENT_AUTHDIR_NEWDESCS', Version('0.1.1.10-alpha')),
('EVENT_BUILDTIMEOUT_SET', Version('0.2.2.7-alpha')),
('EVENT_CIRC_MINOR', Version('0.2.3.11-alpha')),
('EVENT_CLIENTS_SEEN', Version('0.2.1.10-alpha')),
('EVENT_CONF_CHANGED', Version('0.2.3.3-alpha')),
('EVENT_DESCCHANGED', Version('0.1.2.2-alpha')),
('EVENT_GUARD', Version('0.1.2.5-alpha')),
('EVENT_HS_DESC_CONTENT', Version('0.2.7.1-alpha')),
('EVENT_NS', Version('0.1.2.3-alpha')),
('EVENT_NETWORK_LIVENESS', Version('0.2.7.2-alpha')),
('EVENT_NEWCONSENSUS', Version('0.2.1.13-alpha')),
('EVENT_SIGNAL', Version('0.2.3.1-alpha')),
('EVENT_STATUS', Version('0.1.2.3-alpha')),
('EVENT_STREAM_BW', Version('0.1.2.8-beta')),
('EVENT_TRANSPORT_LAUNCHED', Version('0.2.5.0-alpha')),
('EVENT_CONN_BW', Version('0.2.5.2-alpha')),
('EVENT_CIRC_BW', Version('0.2.5.2-alpha')),
('EVENT_CELL_STATS', Version('0.2.5.2-alpha')),
('EVENT_TB_EMPTY', Version('0.2.5.2-alpha')),
('EVENT_HS_DESC', Version('0.2.5.2-alpha')),
('EXTENDCIRCUIT_PATH_OPTIONAL', Version('0.2.2.9')),
('FEATURE_EXTENDED_EVENTS', Version('0.2.2.1-alpha')),
('FEATURE_VERBOSE_NAMES', Version('0.2.2.1-alpha')),
('GETINFO_CONFIG_TEXT', Version('0.2.2.7-alpha')),
('GETINFO_GEOIP_AVAILABLE', Version('0.3.2.1-alpha')),
('GETINFO_MICRODESCRIPTORS', Version('0.3.5.1-alpha')),
('HIDDEN_SERVICE_V3', Version('0.3.3.1-alpha')),
('HSFETCH', Version('0.2.7.1-alpha')),
('HSPOST', Version('0.2.7.1-alpha')),
('ADD_ONION', Version('0.2.7.1-alpha')),
('ADD_ONION_BASIC_AUTH', Version('0.2.9.1-alpha')),
('ADD_ONION_NON_ANONYMOUS', Version('0.2.9.3-alpha')),
('ADD_ONION_MAX_STREAMS', Version('0.2.7.2-alpha')),
('LOADCONF', Version('0.2.1.1')),
('MICRODESCRIPTOR_IS_DEFAULT', Version('0.2.3.3')),
('SAVECONF_FORCE', Version('0.3.1.1-alpha')),
('TAKEOWNERSHIP', Version('0.2.2.28-beta')),
('TORRC_CONTROL_SOCKET', Version('0.2.0.30')),
('TORRC_PORT_FORWARDING', Version('0.2.3.1-alpha')),
('TORRC_DISABLE_DEBUGGER_ATTACHMENT', Version('0.2.3.9')),
('TORRC_VIA_STDIN', Version('0.2.6.3-alpha')),
)
stem-1.7.1/stem/client/ 0000775 0001750 0001750 00000000000 13411004021 015422 5 ustar atagar atagar 0000000 0000000 stem-1.7.1/stem/client/datatype.py 0000664 0001750 0001750 00000042774 13356507530 017652 0 ustar atagar atagar 0000000 0000000 # Copyright 2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Support for `Tor's ORPort protocol
`_.
**This module only consists of low level components, and is not intended for
users.** See our :class:`~stem.client.Relay` the API you probably want.
.. versionadded:: 1.7.0
::
split - splits bytes into substrings
LinkProtocol - ORPort protocol version.
Field - Packable and unpackable datatype.
|- Size - Field of a static size.
|- Address - Relay address.
|- Certificate - Relay certificate.
|
|- pack - encodes content
|- unpack - decodes content
+- pop - decodes content with remainder
KDF - KDF-TOR derivatived attributes
+- from_value - parses key material
.. data:: AddrType (enum)
Form an address takes.
===================== ===========
AddressType Description
===================== ===========
**HOSTNAME** relay hostname
**IPv4** IPv4 address
**IPv6** IPv6 address
**ERROR_TRANSIENT** temporarily error retrieving address
**ERROR_PERMANENT** permanent error retrieving address
**UNKNOWN** unrecognized address type
===================== ===========
.. data:: RelayCommand (enum)
Command concerning streams and circuits we've established with a relay.
Commands have two characteristics...
* **forward/backward**: **forward** commands are issued from the orgin,
whereas **backward** come from the relay
* **stream/circuit**: **steam** commands concern an individual steam, whereas
**circuit** concern the entire circuit we've established with a relay
===================== ===========
RelayCommand Description
===================== ===========
**BEGIN** begin a stream (**forward**, **stream**)
**DATA** transmit data (**forward/backward**, **stream**)
**END** end a stream (**forward/backward**, **stream**)
**CONNECTED** BEGIN reply (**backward**, **stream**)
**SENDME** ready to accept more cells (**forward/backward**, **stream/circuit**)
**EXTEND** extend the circuit through another relay (**forward**, **circuit**)
**EXTENDED** EXTEND reply (**backward**, **circuit**)
**TRUNCATE** remove last circuit hop (**forward**, **circuit**)
**TRUNCATED** TRUNCATE reply (**backward**, **circuit**)
**DROP** ignorable no-op (**forward/backward**, **circuit**)
**RESOLVE** request DNS resolution (**forward**, **stream**)
**RESOLVED** RESOLVE reply (**backward**, **stream**)
**BEGIN_DIR** request descriptor (**forward**, **steam**)
**EXTEND2** ntor EXTEND request (**forward**, **circuit**)
**EXTENDED2** EXTEND2 reply (**backward**, **circuit**)
**UNKNOWN** unrecognized command
===================== ===========
.. data:: CertType (enum)
Relay certificate type.
===================== ===========
CertType Description
===================== ===========
**LINK** link key certificate certified by RSA1024 identity
**IDENTITY** RSA1024 Identity certificate
**AUTHENTICATE** RSA1024 AUTHENTICATE cell link certificate
**UNKNOWN** unrecognized certificate type
===================== ===========
.. data:: CloseReason (enum)
Reason a relay is closed.
===================== ===========
CloseReason Description
===================== ===========
**NONE** no reason given
**PROTOCOL** tor protocol violation
**INTERNAL** internal error
**REQUESTED** client sent a TRUNCATE command
**HIBERNATING** relay suspended, trying to save bandwidth
**RESOURCELIMIT** out of memory, sockets, or circuit IDs
**CONNECTFAILED** unable to reach relay
**OR_IDENTITY** connected, but its OR identity was not as expected
**OR_CONN_CLOSED** connection that was carrying this circuit died
**FINISHED** circuit has expired for being dirty or old
**TIMEOUT** circuit construction took too long
**DESTROYED** circuit was destroyed without a client TRUNCATE
**NOSUCHSERVICE** request was for an unknown hidden service
**UNKNOWN** unrecognized reason
===================== ===========
"""
import collections
import hashlib
import struct
import stem.client.cell
import stem.prereq
import stem.util
import stem.util.connection
import stem.util.enum
ZERO = b'\x00'
HASH_LEN = 20
KEY_LEN = 16
class _IntegerEnum(stem.util.enum.Enum):
"""
Integer backed enumeration. Enumerations of this type always have an implicit
**UNKNOWN** value for integer values that lack a mapping.
"""
def __init__(self, *args):
self._enum_to_int = {}
self._int_to_enum = {}
parent_args = []
for entry in args:
if len(entry) == 2:
enum, int_val = entry
str_val = enum
elif len(entry) == 3:
enum, str_val, int_val = entry
else:
raise ValueError('IntegerEnums can only be constructed with two or three value tuples: %s' % repr(entry))
self._enum_to_int[str_val] = int_val
self._int_to_enum[int_val] = str_val
parent_args.append((enum, str_val))
parent_args.append(('UNKNOWN', 'UNKNOWN'))
super(_IntegerEnum, self).__init__(*parent_args)
def get(self, val):
"""
Provides the (enum, int_value) tuple for a given value.
"""
if stem.util._is_int(val):
return self._int_to_enum.get(val, self.UNKNOWN), val
elif val in self:
return val, self._enum_to_int.get(val, val)
else:
raise ValueError("Invalid enumeration '%s', options are %s" % (val, ', '.join(self)))
AddrType = _IntegerEnum(
('HOSTNAME', 0),
('IPv4', 4),
('IPv6', 6),
('ERROR_TRANSIENT', 16),
('ERROR_PERMANENT', 17),
)
RelayCommand = _IntegerEnum(
('BEGIN', 'RELAY_BEGIN', 1),
('DATA', 'RELAY_DATA', 2),
('END', 'RELAY_END', 3),
('CONNECTED', 'RELAY_CONNECTED', 4),
('SENDME', 'RELAY_SENDME', 5),
('EXTEND', 'RELAY_EXTEND', 6),
('EXTENDED', 'RELAY_EXTENDED', 7),
('TRUNCATE', 'RELAY_TRUNCATE', 8),
('TRUNCATED', 'RELAY_TRUNCATED', 9),
('DROP', 'RELAY_DROP', 10),
('RESOLVE', 'RELAY_RESOLVE', 11),
('RESOLVED', 'RELAY_RESOLVED', 12),
('BEGIN_DIR', 'RELAY_BEGIN_DIR', 13),
('EXTEND2', 'RELAY_EXTEND2', 14),
('EXTENDED2', 'RELAY_EXTENDED2', 15),
)
CertType = _IntegerEnum(
('LINK', 1),
('IDENTITY', 2),
('AUTHENTICATE', 3),
)
CloseReason = _IntegerEnum(
('NONE', 0),
('PROTOCOL', 1),
('INTERNAL', 2),
('REQUESTED', 3),
('HIBERNATING', 4),
('RESOURCELIMIT', 5),
('CONNECTFAILED', 6),
('OR_IDENTITY', 7),
('OR_CONN_CLOSED', 8),
('FINISHED', 9),
('TIMEOUT', 10),
('DESTROYED', 11),
('NOSUCHSERVICE', 12),
)
def split(content, size):
"""
Simple split of bytes into two substrings.
:param bytes content: string to split
:param int size: index to split the string on
:returns: two value tuple with the split bytes
"""
return content[:size], content[size:]
class LinkProtocol(int):
"""
Constants that vary by our link protocol version.
:var int version: link protocol version
:var stem.client.datatype.Size circ_id_size: circuit identifier field size
:var int fixed_cell_length: size of cells with a fixed length
:var int first_circ_id: When creating circuits we pick an unused identifier
from a range that's determined by our link protocol.
"""
def __new__(cls, version):
if isinstance(version, LinkProtocol):
return version # already a LinkProtocol
protocol = int.__new__(cls, version)
protocol.version = version
protocol.circ_id_size = Size.LONG if version > 3 else Size.SHORT
protocol.first_circ_id = 0x80000000 if version > 3 else 0x01
cell_header_size = protocol.circ_id_size.size + 1 # circuit id (2 or 4 bytes) + command (1 byte)
protocol.fixed_cell_length = cell_header_size + stem.client.cell.FIXED_PAYLOAD_LEN
return protocol
def __hash__(self):
# All LinkProtocol attributes can be derived from our version, so that's
# all we need in our hash. Offsetting by our type so we don't hash conflict
# with ints.
return self.version * hash(str(type(self)))
def __eq__(self, other):
if isinstance(other, int):
return self.version == other
elif isinstance(other, LinkProtocol):
return hash(self) == hash(other)
else:
return False
def __ne__(self, other):
return not self == other
def __int__(self):
return self.version
class Field(object):
"""
Packable and unpackable datatype.
"""
def pack(self):
"""
Encodes field into bytes.
:returns: **bytes** that can be communicated over Tor's ORPort
:raises: **ValueError** if incorrect type or size
"""
raise NotImplementedError('Not yet available')
@classmethod
def unpack(cls, packed):
"""
Decodes bytes into a field of this type.
:param bytes packed: content to decode
:returns: instance of this class
:raises: **ValueError** if packed data is malformed
"""
unpacked, remainder = cls.pop(packed)
if remainder:
raise ValueError('%s is the wrong size for a %s field' % (repr(packed), cls.__name__))
return unpacked
@staticmethod
def pop(packed):
"""
Decodes bytes as this field type, providing it and the remainder.
:param bytes packed: content to decode
:returns: tuple of the form (unpacked, remainder)
:raises: **ValueError** if packed data is malformed
"""
raise NotImplementedError('Not yet available')
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, Field) else False
def __ne__(self, other):
return not self == other
class Size(Field):
"""
Unsigned `struct.pack format
` for
network-order fields.
==================== ===========
Pack Description
==================== ===========
CHAR Unsigned char (1 byte)
SHORT Unsigned short (2 bytes)
LONG Unsigned long (4 bytes)
LONG_LONG Unsigned long long (8 bytes)
==================== ===========
"""
def __init__(self, name, size, pack_format):
self.name = name
self.size = size
self.format = pack_format
@staticmethod
def pop(packed):
raise NotImplementedError("Use our constant's unpack() and pop() instead")
def pack(self, content):
# TODO: Python 2.6's struct module behaves a little differently in a couple
# respsects...
#
# * Invalid types raise a TypeError rather than a struct.error.
#
# * Negative values are happily packed despite being unsigned fields with
# a message printed to stdout (!) that says...
#
# stem/client/datatype.py:362: DeprecationWarning: struct integer overflow masking is deprecated
# packed = struct.pack(self.format, content)
# stem/client/datatype.py:362: DeprecationWarning: 'B' format requires 0 <= number <= 255
# packed = struct.pack(self.format, content)
#
# Rather than adjust this method to account for these differences doing
# duplicate upfront checks just for python 2.6. When we drop 2.6 support
# this can obviously be dropped.
if stem.prereq._is_python_26():
if not stem.util._is_int(content):
raise ValueError('Size.pack encodes an integer, but was a %s' % type(content).__name__)
elif content < 0:
raise ValueError('Packed values must be positive (attempted to pack %i as a %s)' % (content, self.name))
try:
packed = struct.pack(self.format, content)
except struct.error:
if not stem.util._is_int(content):
raise ValueError('Size.pack encodes an integer, but was a %s' % type(content).__name__)
elif content < 0:
raise ValueError('Packed values must be positive (attempted to pack %i as a %s)' % (content, self.name))
else:
raise # some other struct exception
if self.size != len(packed):
raise ValueError('%s is the wrong size for a %s field' % (repr(packed), self.name))
return packed
def unpack(self, packed):
if self.size != len(packed):
raise ValueError('%s is the wrong size for a %s field' % (repr(packed), self.name))
return struct.unpack(self.format, packed)[0]
def pop(self, packed):
to_unpack, remainder = split(packed, self.size)
return self.unpack(to_unpack), remainder
def __hash__(self):
return stem.util._hash_attr(self, 'name', 'size', 'format', cache = True)
class Address(Field):
"""
Relay address.
:var stem.client.AddrType type: address type
:var int type_int: integer value of the address type
:var unicode value: address value
:var bytes value_bin: encoded address value
"""
def __init__(self, value, addr_type = None):
if addr_type is None:
if stem.util.connection.is_valid_ipv4_address(value):
addr_type = AddrType.IPv4
elif stem.util.connection.is_valid_ipv6_address(value):
addr_type = AddrType.IPv6
else:
raise ValueError("'%s' isn't an IPv4 or IPv6 address" % value)
self.type, self.type_int = AddrType.get(addr_type)
if self.type == AddrType.IPv4:
if stem.util.connection.is_valid_ipv4_address(value):
self.value = value
self.value_bin = b''.join([Size.CHAR.pack(int(v)) for v in value.split('.')])
else:
if len(value) != 4:
raise ValueError('Packed IPv4 addresses should be four bytes, but was: %s' % repr(value))
self.value = '.'.join([str(Size.CHAR.unpack(value[i:i + 1])) for i in range(4)])
self.value_bin = value
elif self.type == AddrType.IPv6:
if stem.util.connection.is_valid_ipv6_address(value):
self.value = stem.util.connection.expand_ipv6_address(value).lower()
self.value_bin = b''.join([Size.SHORT.pack(int(v, 16)) for v in self.value.split(':')])
else:
if len(value) != 16:
raise ValueError('Packed IPv6 addresses should be sixteen bytes, but was: %s' % repr(value))
self.value = ':'.join(['%04x' % Size.SHORT.unpack(value[i * 2:(i + 1) * 2]) for i in range(8)])
self.value_bin = value
else:
# The spec doesn't really tell us what form to expect errors to be. For
# now just leaving the value unset so we can fill it in later when we
# know what would be most useful.
self.value = None
self.value_bin = value
def pack(self):
cell = bytearray()
cell += Size.CHAR.pack(self.type_int)
cell += Size.CHAR.pack(len(self.value_bin))
cell += self.value_bin
return bytes(cell)
@staticmethod
def pop(content):
addr_type, content = Size.CHAR.pop(content)
addr_length, content = Size.CHAR.pop(content)
if len(content) < addr_length:
raise ValueError('Address specified a payload of %i bytes, but only had %i' % (addr_length, len(content)))
addr_value, content = split(content, addr_length)
return Address(addr_value, addr_type), content
def __hash__(self):
return stem.util._hash_attr(self, 'type_int', 'value_bin', cache = True)
class Certificate(Field):
"""
Relay certificate as defined in tor-spec section 4.2.
:var stem.client.CertType type: certificate type
:var int type_int: integer value of the certificate type
:var bytes value: certificate value
"""
def __init__(self, cert_type, value):
self.type, self.type_int = CertType.get(cert_type)
self.value = value
def pack(self):
cell = bytearray()
cell += Size.CHAR.pack(self.type_int)
cell += Size.SHORT.pack(len(self.value))
cell += self.value
return bytes(cell)
@staticmethod
def pop(content):
cert_type, content = Size.CHAR.pop(content)
cert_size, content = Size.SHORT.pop(content)
if cert_size > len(content):
raise ValueError('CERTS cell should have a certificate with %i bytes, but only had %i remaining' % (cert_size, len(content)))
cert_bytes, content = split(content, cert_size)
return Certificate(cert_type, cert_bytes), content
def __hash__(self):
return stem.util._hash_attr(self, 'type_int', 'value')
class KDF(collections.namedtuple('KDF', ['key_hash', 'forward_digest', 'backward_digest', 'forward_key', 'backward_key'])):
"""
Computed KDF-TOR derived values for TAP, CREATE_FAST handshakes, and hidden
service protocols as defined tor-spec section 5.2.1.
:var bytes key_hash: hash that proves knowledge of our shared key
:var bytes forward_digest: forward digest hash seed
:var bytes backward_digest: backward digest hash seed
:var bytes forward_key: forward encryption key
:var bytes backward_key: backward encryption key
"""
@staticmethod
def from_value(key_material):
# Derived key material, as per...
#
# K = H(K0 | [00]) | H(K0 | [01]) | H(K0 | [02]) | ...
derived_key = b''
counter = 0
while len(derived_key) < KEY_LEN * 2 + HASH_LEN * 3:
derived_key += hashlib.sha1(key_material + Size.CHAR.pack(counter)).digest()
counter += 1
key_hash, derived_key = split(derived_key, HASH_LEN)
forward_digest, derived_key = split(derived_key, HASH_LEN)
backward_digest, derived_key = split(derived_key, HASH_LEN)
forward_key, derived_key = split(derived_key, KEY_LEN)
backward_key, derived_key = split(derived_key, KEY_LEN)
return KDF(key_hash, forward_digest, backward_digest, forward_key, backward_key)
setattr(Size, 'CHAR', Size('CHAR', 1, '!B'))
setattr(Size, 'SHORT', Size('SHORT', 2, '!H'))
setattr(Size, 'LONG', Size('LONG', 4, '!L'))
setattr(Size, 'LONG_LONG', Size('LONG_LONG', 8, '!Q'))
stem-1.7.1/stem/client/__init__.py 0000664 0001750 0001750 00000023661 13356173024 017565 0 ustar atagar atagar 0000000 0000000 # Copyright 2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Interaction with a Tor relay's ORPort. :class:`~stem.client.Relay` is
a wrapper for :class:`~stem.socket.RelaySocket`, much the same way as
:class:`~stem.control.Controller` provides higher level functions for
:class:`~stem.socket.ControlSocket`.
.. versionadded:: 1.7.0
::
Relay - Connection with a tor relay's ORPort.
| +- connect - Establishes a connection with a relay.
|
|- is_alive - reports if our connection is open or closed
|- connection_time - time when we last connected or disconnected
|- close - shuts down our connection
|
+- create_circuit - establishes a new circuit
Circuit - Circuit we've established through a relay.
|- send - sends a message through this circuit
+- close - closes this circuit
"""
import hashlib
import threading
import stem
import stem.client.cell
import stem.socket
import stem.util.connection
from stem.client.datatype import ZERO, LinkProtocol, Address, KDF, split
__all__ = [
'cell',
'datatype',
]
DEFAULT_LINK_PROTOCOLS = (3, 4, 5)
class Relay(object):
"""
Connection with a Tor relay's ORPort.
:var int link_protocol: link protocol version we established
"""
def __init__(self, orport, link_protocol):
self.link_protocol = LinkProtocol(link_protocol)
self._orport = orport
self._orport_lock = threading.RLock()
self._circuits = {}
@staticmethod
def connect(address, port, link_protocols = DEFAULT_LINK_PROTOCOLS):
"""
Establishes a connection with the given ORPort.
:param str address: ip address of the relay
:param int port: ORPort of the relay
:param tuple link_protocols: acceptable link protocol versions
:raises:
* **ValueError** if address or port are invalid
* :class:`stem.SocketError` if we're unable to establish a connection
"""
relay_addr = Address(address)
if not stem.util.connection.is_valid_port(port):
raise ValueError("'%s' isn't a valid port" % port)
elif not link_protocols:
raise ValueError("Connection can't be established without a link protocol.")
try:
conn = stem.socket.RelaySocket(address, port)
except stem.SocketError as exc:
if 'Connection refused' in str(exc):
raise stem.SocketError("Failed to connect to %s:%i. Maybe it isn't an ORPort?" % (address, port))
# If not an ORPort (for instance, mistakenly connecting to a ControlPort
# instead) we'll likely fail during SSL negotiation. This can result
# in a variety of responses so normalizing what we can...
#
# Debian 9.5: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:661)
# Ubuntu 16.04: [SSL: UNKNOWN_PROTOCOL] unknown protocol (_ssl.c:590)
# Ubuntu 12.04: [Errno 1] _ssl.c:504: error:140770FC:SSL routines:SSL23_GET_SERVER_HELLO:unknown protocol
if 'unknown protocol' in str(exc) or 'wrong version number' in str(exc):
raise stem.SocketError("Failed to SSL authenticate to %s:%i. Maybe it isn't an ORPort?" % (address, port))
raise
# To negotiate our link protocol the first VERSIONS cell is expected to use
# a circuit ID field size from protocol version 1-3 for backward
# compatibility...
#
# The first VERSIONS cell, and any cells sent before the
# first VERSIONS cell, always have CIRCID_LEN == 2 for backward
# compatibility.
conn.send(stem.client.cell.VersionsCell(link_protocols).pack(2))
response = conn.recv()
# Link negotiation ends right away if we lack a common protocol
# version. (#25139)
if not response:
conn.close()
raise stem.SocketError('Unable to establish a common link protocol with %s:%i' % (address, port))
versions_reply = stem.client.cell.Cell.pop(response, 2)[0]
common_protocols = set(link_protocols).intersection(versions_reply.versions)
if not common_protocols:
conn.close()
raise stem.SocketError('Unable to find a common link protocol. We support %s but %s:%i supports %s.' % (', '.join(link_protocols), address, port, ', '.join(versions_reply.versions)))
# Establishing connections requires sending a NETINFO, but including our
# address is optional. We can revisit including it when we have a usecase
# where it would help.
link_protocol = max(common_protocols)
conn.send(stem.client.cell.NetinfoCell(relay_addr, []).pack(link_protocol))
return Relay(conn, link_protocol)
def is_alive(self):
"""
Checks if our socket is currently connected. This is a pass-through for our
socket's :func:`~stem.socket.BaseSocket.is_alive` method.
:returns: **bool** that's **True** if our socket is connected and **False** otherwise
"""
return self._orport.is_alive()
def connection_time(self):
"""
Provides the unix timestamp for when our socket was either connected or
disconnected. That is to say, the time we connected if we're currently
connected and the time we disconnected if we're not connected.
:returns: **float** for when we last connected or disconnected, zero if
we've never connected
"""
return self._orport.connection_time()
def close(self):
"""
Closes our socket connection. This is a pass-through for our socket's
:func:`~stem.socket.BaseSocket.close` method.
"""
with self._orport_lock:
return self._orport.close()
def create_circuit(self):
"""
Establishes a new circuit.
"""
with self._orport_lock:
circ_id = max(self._circuits) + 1 if self._circuits else self.link_protocol.first_circ_id
create_fast_cell = stem.client.cell.CreateFastCell(circ_id)
self._orport.send(create_fast_cell.pack(self.link_protocol))
response = stem.client.cell.Cell.unpack(self._orport.recv(), self.link_protocol)
created_fast_cells = filter(lambda cell: isinstance(cell, stem.client.cell.CreatedFastCell), response)
if not created_fast_cells:
raise ValueError('We should get a CREATED_FAST response from a CREATE_FAST request')
created_fast_cell = list(created_fast_cells)[0]
kdf = KDF.from_value(create_fast_cell.key_material + created_fast_cell.key_material)
if created_fast_cell.derivative_key != kdf.key_hash:
raise ValueError('Remote failed to prove that it knows our shared key')
circ = Circuit(self, circ_id, kdf)
self._circuits[circ.id] = circ
return circ
def __iter__(self):
with self._orport_lock:
for circ in self._circuits.values():
yield circ
def __enter__(self):
return self
def __exit__(self, exit_type, value, traceback):
self.close()
class Circuit(object):
"""
Circuit through which requests can be made of a `Tor relay's ORPort
`_.
:var stem.client.Relay relay: relay through which this circuit has been established
:var int id: circuit id
:var hashlib.sha1 forward_digest: digest for forward integrity check
:var hashlib.sha1 backward_digest: digest for backward integrity check
:var bytes forward_key: forward encryption key
:var bytes backward_key: backward encryption key
"""
def __init__(self, relay, circ_id, kdf):
if not stem.prereq.is_crypto_available():
raise ImportError('Circuit construction requires the cryptography module')
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
ctr = modes.CTR(ZERO * (algorithms.AES.block_size // 8))
self.relay = relay
self.id = circ_id
self.forward_digest = hashlib.sha1(kdf.forward_digest)
self.backward_digest = hashlib.sha1(kdf.backward_digest)
self.forward_key = Cipher(algorithms.AES(kdf.forward_key), ctr, default_backend()).encryptor()
self.backward_key = Cipher(algorithms.AES(kdf.backward_key), ctr, default_backend()).decryptor()
def send(self, command, data = '', stream_id = 0):
"""
Sends a message over the circuit.
:param stem.client.datatype.RelayCommand command: command to be issued
:param bytes data: message payload
:param int stream_id: specific stream this concerns
:returns: **list** of :class:`~stem.client.cell.RelayCell` responses
"""
with self.relay._orport_lock:
# Encrypt and send the cell. Our digest/key only updates if the cell is
# successfully sent.
cell = stem.client.cell.RelayCell(self.id, command, data, stream_id = stream_id)
payload, forward_key, forward_digest = cell.encrypt(self.relay.link_protocol, self.forward_key, self.forward_digest)
self.relay._orport.send(payload)
self.forward_digest = forward_digest
self.forward_key = forward_key
# Decrypt relay cells received in response. Again, our digest/key only
# updates when handled successfully.
reply = self.relay._orport.recv()
reply_cells = []
if len(reply) % self.relay.link_protocol.fixed_cell_length != 0:
raise stem.ProtocolError('Circuit response should be a series of RELAY cells, but received an unexpected size for a response: %i' % len(reply))
while reply:
encrypted_cell, reply = split(reply, self.relay.link_protocol.fixed_cell_length)
decrypted_cell, backward_key, backward_digest = stem.client.cell.RelayCell.decrypt(self.relay.link_protocol, encrypted_cell, self.backward_key, self.backward_digest)
if self.id != decrypted_cell.circ_id:
raise stem.ProtocolError('Response should be for circuit id %i, not %i' % (self.id, decrypted_cell.circ_id))
self.backward_digest = backward_digest
self.backward_key = backward_key
reply_cells.append(decrypted_cell)
return reply_cells
def close(self):
with self.relay._orport_lock:
self.relay._orport.send(stem.client.cell.DestroyCell(self.id).pack(self.relay.link_protocol))
del self.relay._circuits[self.id]
def __enter__(self):
return self
def __exit__(self, exit_type, value, traceback):
self.close()
stem-1.7.1/stem/client/cell.py 0000664 0001750 0001750 00000065110 13341474573 016747 0 ustar atagar atagar 0000000 0000000 # Copyright 2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Messages communicated over a Tor relay's ORPort.
.. versionadded:: 1.7.0
**Module Overview:**
::
Cell - Base class for ORPort messages.
|- CircuitCell - Circuit management.
| |- CreateCell - Create a circuit. (section 5.1)
| |- CreatedCell - Acknowledge create. (section 5.1)
| |- RelayCell - End-to-end data. (section 6.1)
| |- DestroyCell - Stop using a circuit. (section 5.4)
| |- CreateFastCell - Create a circuit, no PK. (section 5.1)
| |- CreatedFastCell - Circuit created, no PK. (section 5.1)
| |- RelayEarlyCell - End-to-end data; limited. (section 5.6)
| |- Create2Cell - Extended CREATE cell. (section 5.1)
| +- Created2Cell - Extended CREATED cell. (section 5.1)
|
|- PaddingCell - Padding negotiation. (section 7.2)
|- VersionsCell - Negotiate proto version. (section 4)
|- NetinfoCell - Time and address info. (section 4.5)
|- PaddingNegotiateCell - Padding negotiation. (section 7.2)
|- VPaddingCell - Variable-length padding. (section 7.2)
|- CertsCell - Relay certificates. (section 4.2)
|- AuthChallengeCell - Challenge value. (section 4.3)
|- AuthenticateCell - Client authentication. (section 4.5)
|- AuthorizeCell - Client authorization. (not yet used)
|
|- pack - encodes cell into bytes
|- unpack - decodes series of cells
+- pop - decodes cell with remainder
"""
import copy
import datetime
import inspect
import os
import sys
import stem.util
from stem import UNDEFINED
from stem.client.datatype import HASH_LEN, ZERO, LinkProtocol, Address, Certificate, CloseReason, RelayCommand, Size, split
from stem.util import datetime_to_unix, str_tools
FIXED_PAYLOAD_LEN = 509 # PAYLOAD_LEN, per tor-spec section 0.2
AUTH_CHALLENGE_SIZE = 32
RELAY_DIGEST_SIZE = Size.LONG
STREAM_ID_REQUIRED = (
RelayCommand.BEGIN,
RelayCommand.DATA,
RelayCommand.END,
RelayCommand.CONNECTED,
RelayCommand.RESOLVE,
RelayCommand.RESOLVED,
RelayCommand.BEGIN_DIR,
)
STREAM_ID_DISALLOWED = (
RelayCommand.EXTEND,
RelayCommand.EXTENDED,
RelayCommand.TRUNCATE,
RelayCommand.TRUNCATED,
RelayCommand.DROP,
RelayCommand.EXTEND2,
RelayCommand.EXTENDED2,
)
class Cell(object):
"""
Metadata for ORPort cells.
Unused padding are **not** used in equality checks or hashing. If two cells
differ only in their *unused* attribute they are functionally equal.
The following cell types explicitly don't have *unused* content:
* PaddingCell (we consider all content part of payload)
* VersionsCell (all content is unpacked and treated as a version specification)
* VPaddingCell (we consider all content part of payload)
:var bytes unused: unused filler that padded the cell to the expected size
"""
NAME = 'UNKNOWN'
VALUE = -1
IS_FIXED_SIZE = False
def __init__(self, unused = b''):
super(Cell, self).__init__()
self.unused = unused
@staticmethod
def by_name(name):
"""
Provides cell attributes by its name.
:param str name: cell command to fetch
:raises: **ValueError** if cell type is invalid
"""
for _, cls in inspect.getmembers(sys.modules[__name__]):
if name == getattr(cls, 'NAME', UNDEFINED):
return cls
raise ValueError("'%s' isn't a valid cell type" % name)
@staticmethod
def by_value(value):
"""
Provides cell attributes by its value.
:param int value: cell value to fetch
:raises: **ValueError** if cell type is invalid
"""
for _, cls in inspect.getmembers(sys.modules[__name__]):
if value == getattr(cls, 'VALUE', UNDEFINED):
return cls
raise ValueError("'%s' isn't a valid cell value" % value)
def pack(self, link_protocol):
raise NotImplementedError('Packing not yet implemented for %s cells' % type(self).NAME)
@staticmethod
def unpack(content, link_protocol):
"""
Unpacks all cells from a response.
:param bytes content: payload to decode
:param int link_protocol: link protocol version
:returns: :class:`~stem.client.cell.Cell` generator
:raises:
* ValueError if content is malformed
* NotImplementedError if unable to unpack any of the cell types
"""
while content:
cell, content = Cell.pop(content, link_protocol)
yield cell
@staticmethod
def pop(content, link_protocol):
"""
Unpacks the first cell.
:param bytes content: payload to decode
:param int link_protocol: link protocol version
:returns: (:class:`~stem.client.cell.Cell`, remainder) tuple
:raises:
* ValueError if content is malformed
* NotImplementedError if unable to unpack this cell type
"""
link_protocol = LinkProtocol(link_protocol)
circ_id, content = link_protocol.circ_id_size.pop(content)
command, content = Size.CHAR.pop(content)
cls = Cell.by_value(command)
if cls.IS_FIXED_SIZE:
payload_len = FIXED_PAYLOAD_LEN
else:
payload_len, content = Size.SHORT.pop(content)
if len(content) < payload_len:
raise ValueError('%s cell should have a payload of %i bytes, but only had %i' % (cls.NAME, payload_len, len(content)))
payload, content = split(content, payload_len)
return cls._unpack(payload, circ_id, link_protocol), content
@classmethod
def _pack(cls, link_protocol, payload, unused = b'', circ_id = None):
"""
Provides bytes that can be used on the wire for these cell attributes.
Format of a properly packed cell depends on if it's fixed or variable
sized...
::
Fixed: [ CircuitID ][ Command ][ Payload ][ Padding ]
Variable: [ CircuitID ][ Command ][ Size ][ Payload ]
:param str name: cell command
:param int link_protocol: link protocol version
:param bytes payload: cell payload
:param int circ_id: circuit id, if a CircuitCell
:returns: **bytes** with the encoded payload
:raises: **ValueError** if cell type invalid or payload makes cell too large
"""
if issubclass(cls, CircuitCell):
if circ_id is None:
raise ValueError('%s cells require a circuit identifier' % cls.NAME)
elif circ_id < 1:
raise ValueError('Circuit identifiers must a positive integer, not %s' % circ_id)
else:
if circ_id is not None:
raise ValueError('%s cells should not specify a circuit identifier' % cls.NAME)
circ_id = 0 # cell doesn't concern a circuit, default field to zero
link_protocol = LinkProtocol(link_protocol)
cell = bytearray()
cell += link_protocol.circ_id_size.pack(circ_id)
cell += Size.CHAR.pack(cls.VALUE)
cell += b'' if cls.IS_FIXED_SIZE else Size.SHORT.pack(len(payload) + len(unused))
cell += payload
# include the unused portion (typically from unpacking)
cell += unused
# pad fixed sized cells to the required length
if cls.IS_FIXED_SIZE:
if len(cell) > link_protocol.fixed_cell_length:
raise ValueError('Cell of type %s is too large (%i bytes), must not be more than %i. Check payload size (was %i bytes)' % (cls.NAME, len(cell), link_protocol.fixed_cell_length, len(payload)))
cell += ZERO * (link_protocol.fixed_cell_length - len(cell))
return bytes(cell)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
"""
Subclass implementation for unpacking cell content.
:param bytes content: payload to decode
:param stem.client.datatype.LinkProtocol link_protocol: link protocol version
:param int circ_id: circuit id cell is for
:returns: instance of this cell type
:raises: **ValueError** if content is malformed
"""
raise NotImplementedError('Unpacking not yet implemented for %s cells' % cls.NAME)
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, Cell) else False
def __ne__(self, other):
return not self == other
class CircuitCell(Cell):
"""
Cell concerning circuits.
:var int circ_id: circuit id
"""
def __init__(self, circ_id, unused = b''):
super(CircuitCell, self).__init__(unused)
self.circ_id = circ_id
class PaddingCell(Cell):
"""
Randomized content to either keep activity going on a circuit.
:var bytes payload: randomized payload
"""
NAME = 'PADDING'
VALUE = 0
IS_FIXED_SIZE = True
def __init__(self, payload = None):
if not payload:
payload = os.urandom(FIXED_PAYLOAD_LEN)
elif len(payload) != FIXED_PAYLOAD_LEN:
raise ValueError('Padding payload should be %i bytes, but was %i' % (FIXED_PAYLOAD_LEN, len(payload)))
super(PaddingCell, self).__init__()
self.payload = payload
def pack(self, link_protocol):
return PaddingCell._pack(link_protocol, self.payload)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
return PaddingCell(content)
def __hash__(self):
return stem.util._hash_attr(self, 'payload', cache = True)
class CreateCell(CircuitCell):
NAME = 'CREATE'
VALUE = 1
IS_FIXED_SIZE = True
def __init__(self):
super(CreateCell, self).__init__() # TODO: implement
class CreatedCell(CircuitCell):
NAME = 'CREATED'
VALUE = 2
IS_FIXED_SIZE = True
def __init__(self):
super(CreatedCell, self).__init__() # TODO: implement
class RelayCell(CircuitCell):
"""
Command concerning a relay circuit.
Our 'recognized' attribute provides a cheap (but incomplete) check for if our
cell payload is encrypted. If non-zero our payload *IS* encrypted, but if
zero we're *PROBABLY* fully decrypted. This uncertainty is because encrypted
cells have a small chance of coincidently producing zero for this value as
well.
:var stem.client.RelayCommand command: command to be issued
:var int command_int: integer value of our command
:var bytes data: payload of the cell
:var int recognized: non-zero if payload is encrypted
:var int digest: running digest held with the relay
:var int stream_id: specific stream this concerns
"""
NAME = 'RELAY'
VALUE = 3
IS_FIXED_SIZE = True
def __init__(self, circ_id, command, data, digest = 0, stream_id = 0, recognized = 0, unused = b''):
if 'HASH' in str(type(digest)):
# Unfortunately hashlib generates from a dynamic private class so
# isinstance() isn't such a great option. With python2/python3 the
# name is 'hashlib.HASH' whereas PyPy calls it just 'HASH'.
digest_packed = digest.digest()[:RELAY_DIGEST_SIZE.size]
digest = RELAY_DIGEST_SIZE.unpack(digest_packed)
elif stem.util._is_str(digest):
digest_packed = digest[:RELAY_DIGEST_SIZE.size]
digest = RELAY_DIGEST_SIZE.unpack(digest_packed)
elif stem.util._is_int(digest):
pass
else:
raise ValueError('RELAY cell digest must be a hash, string, or int but was a %s' % type(digest).__name__)
super(RelayCell, self).__init__(circ_id, unused)
self.command, self.command_int = RelayCommand.get(command)
self.recognized = recognized
self.stream_id = stream_id
self.digest = digest
self.data = str_tools._to_bytes(data)
if digest == 0:
if not stream_id and self.command in STREAM_ID_REQUIRED:
raise ValueError('%s relay cells require a stream id' % self.command)
elif stream_id and self.command in STREAM_ID_DISALLOWED:
raise ValueError('%s relay cells concern the circuit itself and cannot have a stream id' % self.command)
def pack(self, link_protocol):
payload = bytearray()
payload += Size.CHAR.pack(self.command_int)
payload += Size.SHORT.pack(self.recognized)
payload += Size.SHORT.pack(self.stream_id)
payload += Size.LONG.pack(self.digest)
payload += Size.SHORT.pack(len(self.data))
payload += self.data
return RelayCell._pack(link_protocol, bytes(payload), self.unused, self.circ_id)
@staticmethod
def decrypt(link_protocol, content, key, digest):
"""
Decrypts content as a relay cell addressed to us. This provides back a
tuple of the form...
::
(cell (RelayCell), new_key (CipherContext), new_digest (HASH))
:param int link_protocol: link protocol version
:param bytes content: cell content to be decrypted
:param cryptography.hazmat.primitives.ciphers.CipherContext key:
key established with the relay we received this cell from
:param HASH digest: running digest held with the relay
:returns: **tuple** with our decrypted cell and updated key/digest
:raises: :class:`stem.ProtocolError` if content doesn't belong to a relay
cell
"""
new_key = copy.copy(key)
new_digest = digest.copy()
if len(content) != link_protocol.fixed_cell_length:
raise stem.ProtocolError('RELAY cells should be %i bytes, but received %i' % (link_protocol.fixed_cell_length, len(content)))
circ_id, content = link_protocol.circ_id_size.pop(content)
command, encrypted_payload = Size.CHAR.pop(content)
if command != RelayCell.VALUE:
raise stem.ProtocolError('Cannot decrypt as a RELAY cell. This had command %i instead.' % command)
payload = new_key.update(encrypted_payload)
cell = RelayCell._unpack(payload, circ_id, link_protocol)
# TODO: Implement our decryption digest. It is used to support relaying
# within multi-hop circuits. On first glance this should go something
# like...
#
# # Our updated digest is calculated based on this cell with a blanked
# # digest field.
#
# digest_cell = RelayCell(self.circ_id, self.command, self.data, 0, self.stream_id, self.recognized, self.unused)
# new_digest.update(digest_cell.pack(link_protocol))
#
# is_encrypted == cell.recognized != 0 or self.digest == new_digest
#
# ... or something like that. Until we attempt to support relaying this is
# both moot and difficult to exercise in order to ensure we get it right.
return cell, new_key, new_digest
def encrypt(self, link_protocol, key, digest):
"""
Encrypts our cell content to be sent with the given key. This provides back
a tuple of the form...
::
(payload (bytes), new_key (CipherContext), new_digest (HASH))
:param int link_protocol: link protocol version
:param cryptography.hazmat.primitives.ciphers.CipherContext key:
key established with the relay we're sending this cell to
:param HASH digest: running digest held with the relay
:returns: **tuple** with our encrypted payload and updated key/digest
"""
new_key = copy.copy(key)
new_digest = digest.copy()
# Digests are computed from our payload, not including our header's circuit
# id (2 or 4 bytes) and command (1 byte).
header_size = link_protocol.circ_id_size.size + 1
payload_without_digest = self.pack(link_protocol)[header_size:]
new_digest.update(payload_without_digest)
# Pack a copy of ourselves with our newly calculated digest, and encrypt
# the payload. Header remains plaintext.
cell = RelayCell(self.circ_id, self.command, self.data, new_digest, self.stream_id, self.recognized, self.unused)
header, payload = split(cell.pack(link_protocol), header_size)
return header + new_key.update(payload), new_key, new_digest
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
command, content = Size.CHAR.pop(content)
recognized, content = Size.SHORT.pop(content) # 'recognized' field
stream_id, content = Size.SHORT.pop(content)
digest, content = Size.LONG.pop(content)
data_len, content = Size.SHORT.pop(content)
data, unused = split(content, data_len)
if len(data) != data_len:
raise ValueError('%s cell said it had %i bytes of data, but only had %i' % (cls.NAME, data_len, len(data)))
return RelayCell(circ_id, command, data, digest, stream_id, recognized, unused)
def __hash__(self):
return stem.util._hash_attr(self, 'command_int', 'stream_id', 'digest', 'data', cache = True)
class DestroyCell(CircuitCell):
"""
Closes the given circuit.
:var stem.client.CloseReason reason: reason the circuit is being closed
:var int reason_int: integer value of our closure reason
"""
NAME = 'DESTROY'
VALUE = 4
IS_FIXED_SIZE = True
def __init__(self, circ_id, reason = CloseReason.NONE, unused = b''):
super(DestroyCell, self).__init__(circ_id, unused)
self.reason, self.reason_int = CloseReason.get(reason)
def pack(self, link_protocol):
return DestroyCell._pack(link_protocol, Size.CHAR.pack(self.reason_int), self.unused, self.circ_id)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
reason, unused = Size.CHAR.pop(content)
return DestroyCell(circ_id, reason, unused)
def __hash__(self):
return stem.util._hash_attr(self, 'circ_id', 'reason_int', cache = True)
class CreateFastCell(CircuitCell):
"""
Create a circuit with our first hop. This is lighter weight than further hops
because we've already established the relay's identity and secret key.
:var bytes key_material: randomized key material
"""
NAME = 'CREATE_FAST'
VALUE = 5
IS_FIXED_SIZE = True
def __init__(self, circ_id, key_material = None, unused = b''):
if not key_material:
key_material = os.urandom(HASH_LEN)
elif len(key_material) != HASH_LEN:
raise ValueError('Key material should be %i bytes, but was %i' % (HASH_LEN, len(key_material)))
super(CreateFastCell, self).__init__(circ_id, unused)
self.key_material = key_material
def pack(self, link_protocol):
return CreateFastCell._pack(link_protocol, self.key_material, self.unused, self.circ_id)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
key_material, unused = split(content, HASH_LEN)
if len(key_material) != HASH_LEN:
raise ValueError('Key material should be %i bytes, but was %i' % (HASH_LEN, len(key_material)))
return CreateFastCell(circ_id, key_material, unused)
def __hash__(self):
return stem.util._hash_attr(self, 'circ_id', 'key_material', cache = True)
class CreatedFastCell(CircuitCell):
"""
CREATE_FAST reply.
:var bytes key_material: randomized key material
:var bytes derivative_key: hash proving the relay knows our shared key
"""
NAME = 'CREATED_FAST'
VALUE = 6
IS_FIXED_SIZE = True
def __init__(self, circ_id, derivative_key, key_material = None, unused = b''):
if not key_material:
key_material = os.urandom(HASH_LEN)
elif len(key_material) != HASH_LEN:
raise ValueError('Key material should be %i bytes, but was %i' % (HASH_LEN, len(key_material)))
if len(derivative_key) != HASH_LEN:
raise ValueError('Derivatived key should be %i bytes, but was %i' % (HASH_LEN, len(derivative_key)))
super(CreatedFastCell, self).__init__(circ_id, unused)
self.key_material = key_material
self.derivative_key = derivative_key
def pack(self, link_protocol):
return CreatedFastCell._pack(link_protocol, self.key_material + self.derivative_key, self.unused, self.circ_id)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
if len(content) < HASH_LEN * 2:
raise ValueError('Key material and derivatived key should be %i bytes, but was %i' % (HASH_LEN * 2, len(content)))
key_material, content = split(content, HASH_LEN)
derivative_key, content = split(content, HASH_LEN)
return CreatedFastCell(circ_id, derivative_key, key_material, content)
def __hash__(self):
return stem.util._hash_attr(self, 'circ_id', 'derivative_key', 'key_material', cache = True)
class VersionsCell(Cell):
"""
Link version negotiation cell.
:var list versions: link versions
"""
NAME = 'VERSIONS'
VALUE = 7
IS_FIXED_SIZE = False
def __init__(self, versions):
super(VersionsCell, self).__init__()
self.versions = versions
def pack(self, link_protocol):
payload = b''.join([Size.SHORT.pack(v) for v in self.versions])
return VersionsCell._pack(link_protocol, payload)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
link_protocols = []
while content:
version, content = Size.SHORT.pop(content)
link_protocols.append(version)
return VersionsCell(link_protocols)
def __hash__(self):
return stem.util._hash_attr(self, 'versions', cache = True)
class NetinfoCell(Cell):
"""
Information relays exchange about each other.
:var datetime timestamp: current time
:var stem.client.Address receiver_address: receiver's OR address
:var list sender_addresses: sender's OR addresses
"""
NAME = 'NETINFO'
VALUE = 8
IS_FIXED_SIZE = True
def __init__(self, receiver_address, sender_addresses, timestamp = None, unused = b''):
super(NetinfoCell, self).__init__(unused)
self.timestamp = timestamp if timestamp else datetime.datetime.now()
self.receiver_address = receiver_address
self.sender_addresses = sender_addresses
def pack(self, link_protocol):
payload = bytearray()
payload += Size.LONG.pack(int(datetime_to_unix(self.timestamp)))
payload += self.receiver_address.pack()
payload += Size.CHAR.pack(len(self.sender_addresses))
for addr in self.sender_addresses:
payload += addr.pack()
return NetinfoCell._pack(link_protocol, bytes(payload), self.unused)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
timestamp, content = Size.LONG.pop(content)
receiver_address, content = Address.pop(content)
sender_addresses = []
sender_addr_count, content = Size.CHAR.pop(content)
for i in range(sender_addr_count):
addr, content = Address.pop(content)
sender_addresses.append(addr)
return NetinfoCell(receiver_address, sender_addresses, datetime.datetime.utcfromtimestamp(timestamp), unused = content)
def __hash__(self):
return stem.util._hash_attr(self, 'timestamp', 'receiver_address', 'sender_addresses', cache = True)
class RelayEarlyCell(CircuitCell):
NAME = 'RELAY_EARLY'
VALUE = 9
IS_FIXED_SIZE = True
def __init__(self):
super(RelayEarlyCell, self).__init__() # TODO: implement
class Create2Cell(CircuitCell):
NAME = 'CREATE2'
VALUE = 10
IS_FIXED_SIZE = True
def __init__(self):
super(Create2Cell, self).__init__() # TODO: implement
class Created2Cell(Cell):
NAME = 'CREATED2'
VALUE = 11
IS_FIXED_SIZE = True
def __init__(self):
super(Created2Cell, self).__init__() # TODO: implement
class PaddingNegotiateCell(Cell):
NAME = 'PADDING_NEGOTIATE'
VALUE = 12
IS_FIXED_SIZE = True
def __init__(self):
super(PaddingNegotiateCell, self).__init__() # TODO: implement
class VPaddingCell(Cell):
"""
Variable length randomized content to either keep activity going on a circuit.
:var bytes payload: randomized payload
"""
NAME = 'VPADDING'
VALUE = 128
IS_FIXED_SIZE = False
def __init__(self, size = None, payload = None):
if size is None and payload is None:
raise ValueError('VPaddingCell constructor must specify payload or size')
elif size is not None and size < 0:
raise ValueError('VPaddingCell size (%s) cannot be negative' % size)
elif size is not None and payload is not None and size != len(payload):
raise ValueError('VPaddingCell constructor specified both a size of %i bytes and payload of %i bytes' % (size, len(payload)))
super(VPaddingCell, self).__init__()
self.payload = payload if payload is not None else os.urandom(size)
def pack(self, link_protocol):
return VPaddingCell._pack(link_protocol, self.payload)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
return VPaddingCell(payload = content)
def __hash__(self):
return stem.util._hash_attr(self, 'payload', cache = True)
class CertsCell(Cell):
"""
Certificate held by the relay we're communicating with.
:var list certificates: :class:`~stem.client.Certificate` of the relay
"""
NAME = 'CERTS'
VALUE = 129
IS_FIXED_SIZE = False
def __init__(self, certs, unused = b''):
super(CertsCell, self).__init__(unused)
self.certificates = certs
def pack(self, link_protocol):
return CertsCell._pack(link_protocol, Size.CHAR.pack(len(self.certificates)) + b''.join([cert.pack() for cert in self.certificates]), self.unused)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
cert_count, content = Size.CHAR.pop(content)
certs = []
for i in range(cert_count):
if not content:
raise ValueError('CERTS cell indicates it should have %i certificates, but only contained %i' % (cert_count, len(certs)))
cert, content = Certificate.pop(content)
certs.append(cert)
return CertsCell(certs, unused = content)
def __hash__(self):
return stem.util._hash_attr(self, 'certificates', cache = True)
class AuthChallengeCell(Cell):
"""
First step of the authentication handshake.
:var bytes challenge: random bytes for us to sign to authenticate
:var list methods: authentication methods supported by the relay we're
communicating with
"""
NAME = 'AUTH_CHALLENGE'
VALUE = 130
IS_FIXED_SIZE = False
def __init__(self, methods, challenge = None, unused = b''):
if not challenge:
challenge = os.urandom(AUTH_CHALLENGE_SIZE)
elif len(challenge) != AUTH_CHALLENGE_SIZE:
raise ValueError('AUTH_CHALLENGE must be %i bytes, but was %i' % (AUTH_CHALLENGE_SIZE, len(challenge)))
super(AuthChallengeCell, self).__init__(unused)
self.challenge = challenge
self.methods = methods
def pack(self, link_protocol):
payload = bytearray()
payload += self.challenge
payload += Size.SHORT.pack(len(self.methods))
for method in self.methods:
payload += Size.SHORT.pack(method)
return AuthChallengeCell._pack(link_protocol, bytes(payload), self.unused)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
min_size = AUTH_CHALLENGE_SIZE + Size.SHORT.size
if len(content) < min_size:
raise ValueError('AUTH_CHALLENGE payload should be at least %i bytes, but was %i' % (min_size, len(content)))
challenge, content = split(content, AUTH_CHALLENGE_SIZE)
method_count, content = Size.SHORT.pop(content)
if len(content) < method_count * Size.SHORT.size:
raise ValueError('AUTH_CHALLENGE should have %i methods, but only had %i bytes for it' % (method_count, len(content)))
methods = []
for i in range(method_count):
method, content = Size.SHORT.pop(content)
methods.append(method)
return AuthChallengeCell(methods, challenge, unused = content)
def __hash__(self):
return stem.util._hash_attr(self, 'challenge', 'methods', cache = True)
class AuthenticateCell(Cell):
NAME = 'AUTHENTICATE'
VALUE = 131
IS_FIXED_SIZE = False
def __init__(self):
super(AuthenticateCell, self).__init__() # TODO: implement
class AuthorizeCell(Cell):
NAME = 'AUTHORIZE'
VALUE = 132
IS_FIXED_SIZE = False
def __init__(self):
super(AuthorizeCell, self).__init__() # TODO: implement
stem-1.7.1/stem/interpreter/ 0000775 0001750 0001750 00000000000 13411004021 016507 5 ustar atagar atagar 0000000 0000000 stem-1.7.1/stem/interpreter/help.py 0000664 0001750 0001750 00000007303 13341474573 020045 0 ustar atagar atagar 0000000 0000000 # Copyright 2014-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Provides our /help responses.
"""
import stem.prereq
from stem.interpreter import (
STANDARD_OUTPUT,
BOLD_OUTPUT,
ERROR_OUTPUT,
msg,
uses_settings,
)
from stem.util.term import format
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
def response(controller, arg):
"""
Provides our /help response.
:param stem.control.Controller controller: tor control connection
:param str arg: controller or interpreter command to provide help output for
:returns: **str** with our help response
"""
# Normalizing inputs first so we can better cache responses.
return _response(controller, _normalize(arg))
def _normalize(arg):
arg = arg.upper()
# If there's multiple arguments then just take the first. This is
# particularly likely if they're trying to query a full command (for
# instance "/help GETINFO version")
arg = arg.split(' ')[0]
# strip slash if someone enters an interpreter command (ex. "/help /help")
if arg.startswith('/'):
arg = arg[1:]
return arg
@lru_cache()
@uses_settings
def _response(controller, arg, config):
if not arg:
return _general_help()
usage_info = config.get('help.usage', {})
if arg not in usage_info:
return format("No help information available for '%s'..." % arg, *ERROR_OUTPUT)
output = format(usage_info[arg] + '\n', *BOLD_OUTPUT)
description = config.get('help.description.%s' % arg.lower(), '')
for line in description.splitlines():
output += format(' ' + line, *STANDARD_OUTPUT) + '\n'
output += '\n'
if arg == 'GETINFO':
results = controller.get_info('info/names', None)
if results:
for line in results.splitlines():
if ' -- ' in line:
opt, summary = line.split(' -- ', 1)
output += format('%-33s' % opt, *BOLD_OUTPUT)
output += format(' - %s' % summary, *STANDARD_OUTPUT) + '\n'
elif arg == 'GETCONF':
results = controller.get_info('config/names', None)
if results:
options = [opt.split(' ', 1)[0] for opt in results.splitlines()]
for i in range(0, len(options), 2):
line = ''
for entry in options[i:i + 2]:
line += '%-42s' % entry
output += format(line.rstrip(), *STANDARD_OUTPUT) + '\n'
elif arg == 'SIGNAL':
signal_options = config.get('help.signal.options', {})
for signal, summary in signal_options.items():
output += format('%-15s' % signal, *BOLD_OUTPUT)
output += format(' - %s' % summary, *STANDARD_OUTPUT) + '\n'
elif arg == 'SETEVENTS':
results = controller.get_info('events/names', None)
if results:
entries = results.split()
# displays four columns of 20 characters
for i in range(0, len(entries), 4):
line = ''
for entry in entries[i:i + 4]:
line += '%-20s' % entry
output += format(line.rstrip(), *STANDARD_OUTPUT) + '\n'
elif arg == 'USEFEATURE':
results = controller.get_info('features/names', None)
if results:
output += format(results, *STANDARD_OUTPUT) + '\n'
elif arg in ('LOADCONF', 'POSTDESCRIPTOR'):
# gives a warning that this option isn't yet implemented
output += format(msg('msg.multiline_unimplemented_notice'), *ERROR_OUTPUT) + '\n'
return output.rstrip()
def _general_help():
lines = []
for line in msg('help.general').splitlines():
div = line.find(' - ')
if div != -1:
cmd, description = line[:div], line[div:]
lines.append(format(cmd, *BOLD_OUTPUT) + format(description, *STANDARD_OUTPUT))
else:
lines.append(format(line, *BOLD_OUTPUT))
return '\n'.join(lines)
stem-1.7.1/stem/interpreter/arguments.py 0000664 0001750 0001750 00000005365 13341034346 021116 0 ustar atagar atagar 0000000 0000000 # Copyright 2015-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Commandline argument parsing for our interpreter prompt.
"""
import collections
import getopt
import os
import stem.interpreter
import stem.util.connection
DEFAULT_ARGS = {
'control_address': '127.0.0.1',
'control_port': 'default',
'user_provided_port': False,
'control_socket': '/var/run/tor/control',
'user_provided_socket': False,
'tor_path': 'tor',
'run_cmd': None,
'run_path': None,
'disable_color': False,
'print_help': False,
}
OPT = 'i:s:h'
OPT_EXPANDED = ['interface=', 'socket=', 'tor=', 'run=', 'no-color', 'help']
def parse(argv):
"""
Parses our arguments, providing a named tuple with their values.
:param list argv: input arguments to be parsed
:returns: a **named tuple** with our parsed arguments
:raises: **ValueError** if we got an invalid argument
"""
args = dict(DEFAULT_ARGS)
try:
recognized_args, unrecognized_args = getopt.getopt(argv, OPT, OPT_EXPANDED)
if unrecognized_args:
error_msg = "aren't recognized arguments" if len(unrecognized_args) > 1 else "isn't a recognized argument"
raise getopt.GetoptError("'%s' %s" % ("', '".join(unrecognized_args), error_msg))
except Exception as exc:
raise ValueError('%s (for usage provide --help)' % exc)
for opt, arg in recognized_args:
if opt in ('-i', '--interface'):
if ':' in arg:
address, port = arg.rsplit(':', 1)
else:
address, port = None, arg
if address is not None:
if not stem.util.connection.is_valid_ipv4_address(address):
raise ValueError("'%s' isn't a valid IPv4 address" % address)
args['control_address'] = address
if not stem.util.connection.is_valid_port(port):
raise ValueError("'%s' isn't a valid port number" % port)
args['control_port'] = int(port)
args['user_provided_port'] = True
elif opt in ('-s', '--socket'):
args['control_socket'] = arg
args['user_provided_socket'] = True
elif opt in ('--tor'):
args['tor_path'] = arg
elif opt in ('--run'):
if os.path.exists(arg):
args['run_path'] = arg
else:
args['run_cmd'] = arg
elif opt == '--no-color':
args['disable_color'] = True
elif opt in ('-h', '--help'):
args['print_help'] = True
# translates our args dict into a named tuple
Args = collections.namedtuple('Args', args.keys())
return Args(**args)
def get_help():
"""
Provides our --help usage information.
:returns: **str** with our usage information
"""
return stem.interpreter.msg(
'msg.help',
address = DEFAULT_ARGS['control_address'],
port = DEFAULT_ARGS['control_port'],
socket = DEFAULT_ARGS['control_socket'],
)
stem-1.7.1/stem/interpreter/commands.py 0000664 0001750 0001750 00000027545 13341034346 020716 0 ustar atagar atagar 0000000 0000000 # Copyright 2014-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Handles making requests and formatting the responses.
"""
import code
import contextlib
import socket
import sys
import stem
import stem.control
import stem.descriptor.remote
import stem.interpreter.help
import stem.util.connection
import stem.util.str_tools
import stem.util.tor_tools
from stem.interpreter import STANDARD_OUTPUT, BOLD_OUTPUT, ERROR_OUTPUT, uses_settings, msg
from stem.util.term import format
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
MAX_EVENTS = 100
def _get_fingerprint(arg, controller):
"""
Resolves user input into a relay fingerprint. This accepts...
* Fingerprints
* Nicknames
* IPv4 addresses, either with or without an ORPort
* Empty input, which is resolved to ourselves if we're a relay
:param str arg: input to be resolved to a relay fingerprint
:param stem.control.Controller controller: tor control connection
:returns: **str** for the relay fingerprint
:raises: **ValueError** if we're unable to resolve the input to a relay
"""
if not arg:
try:
return controller.get_info('fingerprint')
except:
raise ValueError("We aren't a relay, no information to provide")
elif stem.util.tor_tools.is_valid_fingerprint(arg):
return arg
elif stem.util.tor_tools.is_valid_nickname(arg):
try:
return controller.get_network_status(arg).fingerprint
except:
raise ValueError("Unable to find a relay with the nickname of '%s'" % arg)
elif ':' in arg or stem.util.connection.is_valid_ipv4_address(arg):
if ':' in arg:
address, port = arg.rsplit(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address):
raise ValueError("'%s' isn't a valid IPv4 address" % address)
elif port and not stem.util.connection.is_valid_port(port):
raise ValueError("'%s' isn't a valid port" % port)
port = int(port)
else:
address, port = arg, None
matches = {}
for desc in controller.get_network_statuses():
if desc.address == address:
if not port or desc.or_port == port:
matches[desc.or_port] = desc.fingerprint
if len(matches) == 0:
raise ValueError('No relays found at %s' % arg)
elif len(matches) == 1:
return list(matches.values())[0]
else:
response = "There's multiple relays at %s, include a port to specify which.\n\n" % arg
for i, or_port in enumerate(matches):
response += ' %i. %s:%s, fingerprint: %s\n' % (i + 1, address, or_port, matches[or_port])
raise ValueError(response)
else:
raise ValueError("'%s' isn't a fingerprint, nickname, or IP address" % arg)
@contextlib.contextmanager
def redirect(stdout, stderr):
original = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stdout, stderr
try:
yield
finally:
sys.stdout, sys.stderr = original
class ControlInterpreter(code.InteractiveConsole):
"""
Handles issuing requests and providing nicely formed responses, with support
for special irc style subcommands.
"""
def __init__(self, controller):
self._received_events = []
code.InteractiveConsole.__init__(self, {
'stem': stem,
'stem.control': stem.control,
'controller': controller,
'events': self.get_events,
})
self._controller = controller
self._run_python_commands = True
# Indicates if we're processing a multiline command, such as conditional
# block or loop.
self.is_multiline_context = False
# Intercept events our controller hears about at a pretty low level since
# the user will likely be requesting them by direct 'SETEVENTS' calls.
handle_event_real = self._controller._handle_event
def handle_event_wrapper(event_message):
handle_event_real(event_message)
self._received_events.insert(0, event_message)
if len(self._received_events) > MAX_EVENTS:
self._received_events.pop()
self._controller._handle_event = handle_event_wrapper
def get_events(self, *event_types):
events = list(self._received_events)
event_types = list(map(str.upper, event_types)) # make filtering case insensitive
if event_types:
events = [e for e in events if e.type in event_types]
return events
def do_help(self, arg):
"""
Performs the '/help' operation, giving usage information for the given
argument or a general summary if there wasn't one.
"""
return stem.interpreter.help.response(self._controller, arg)
def do_events(self, arg):
"""
Performs the '/events' operation, dumping the events that we've received
belonging to the given types. If no types are specified then this provides
all buffered events.
If the user runs '/events clear' then this clears the list of events we've
received.
"""
event_types = arg.upper().split()
if 'CLEAR' in event_types:
del self._received_events[:]
return format('cleared event backlog', *STANDARD_OUTPUT)
return '\n'.join([format(str(e), *STANDARD_OUTPUT) for e in self.get_events(*event_types)])
def do_info(self, arg):
"""
Performs the '/info' operation, looking up a relay by fingerprint, IP
address, or nickname and printing its descriptor and consensus entries in a
pretty fashion.
"""
try:
fingerprint = _get_fingerprint(arg, self._controller)
except ValueError as exc:
return format(str(exc), *ERROR_OUTPUT)
ns_desc = self._controller.get_network_status(fingerprint, None)
server_desc = self._controller.get_server_descriptor(fingerprint, None)
extrainfo_desc = None
micro_desc = self._controller.get_microdescriptor(fingerprint, None)
# We'll mostly rely on the router status entry. Either the server
# descriptor or microdescriptor will be missing, so we'll treat them as
# being optional.
if not ns_desc:
return format('Unable to find consensus information for %s' % fingerprint, *ERROR_OUTPUT)
# More likely than not we'll have the microdescriptor but not server and
# extrainfo descriptors. If so then fetching them.
downloader = stem.descriptor.remote.DescriptorDownloader(timeout = 5)
server_desc_query = downloader.get_server_descriptors(fingerprint)
extrainfo_desc_query = downloader.get_extrainfo_descriptors(fingerprint)
for desc in server_desc_query:
server_desc = desc
for desc in extrainfo_desc_query:
extrainfo_desc = desc
address_extrainfo = []
try:
address_extrainfo.append(socket.gethostbyaddr(ns_desc.address)[0])
except:
pass
try:
address_extrainfo.append(self._controller.get_info('ip-to-country/%s' % ns_desc.address))
except:
pass
address_extrainfo_label = ' (%s)' % ', '.join(address_extrainfo) if address_extrainfo else ''
if server_desc:
exit_policy_label = str(server_desc.exit_policy)
elif micro_desc:
exit_policy_label = str(micro_desc.exit_policy)
else:
exit_policy_label = 'Unknown'
lines = [
'%s (%s)' % (ns_desc.nickname, fingerprint),
format('address: ', *BOLD_OUTPUT) + '%s:%s%s' % (ns_desc.address, ns_desc.or_port, address_extrainfo_label),
]
if server_desc:
lines.append(format('tor version: ', *BOLD_OUTPUT) + str(server_desc.tor_version))
lines.append(format('flags: ', *BOLD_OUTPUT) + ', '.join(ns_desc.flags))
lines.append(format('exit policy: ', *BOLD_OUTPUT) + exit_policy_label)
if server_desc and server_desc.contact:
contact = stem.util.str_tools._to_unicode(server_desc.contact)
# clears up some highly common obscuring
for alias in (' at ', ' AT '):
contact = contact.replace(alias, '@')
for alias in (' dot ', ' DOT '):
contact = contact.replace(alias, '.')
lines.append(format('contact: ', *BOLD_OUTPUT) + contact)
descriptor_section = [
('Server Descriptor:', server_desc),
('Extrainfo Descriptor:', extrainfo_desc),
('Microdescriptor:', micro_desc),
('Router Status Entry:', ns_desc),
]
div = format('-' * 80, *STANDARD_OUTPUT)
for label, desc in descriptor_section:
if desc:
lines += ['', div, format(label, *BOLD_OUTPUT), div, '']
lines += [format(l, *STANDARD_OUTPUT) for l in str(desc).splitlines()]
return '\n'.join(lines)
def do_python(self, arg):
"""
Performs the '/python' operation, toggling if we accept python commands or
not.
"""
if not arg:
status = 'enabled' if self._run_python_commands else 'disabled'
return format('Python support is currently %s.' % status, *STANDARD_OUTPUT)
elif arg.lower() == 'enable':
self._run_python_commands = True
elif arg.lower() == 'disable':
self._run_python_commands = False
else:
return format("'%s' is not recognized. Please run either '/python enable' or '/python disable'." % arg, *ERROR_OUTPUT)
if self._run_python_commands:
response = "Python support enabled, we'll now run non-interpreter commands as python."
else:
response = "Python support disabled, we'll now pass along all commands to tor."
return format(response, *STANDARD_OUTPUT)
@uses_settings
def run_command(self, command, config, print_response = False):
"""
Runs the given command. Requests starting with a '/' are special commands
to the interpreter, and anything else is sent to the control port.
:param stem.control.Controller controller: tor control connection
:param str command: command to be processed
:param bool print_response: prints the response to stdout if true
:returns: **list** out output lines, each line being a list of
(msg, format) tuples
:raises: **stem.SocketClosed** if the control connection has been severed
"""
# Commands fall into three categories:
#
# * Interpreter commands. These start with a '/'.
#
# * Controller commands stem knows how to handle. We use our Controller's
# methods for these to take advantage of caching and present nicer
# output.
#
# * Other tor commands. We pass these directly on to the control port.
cmd, arg = command.strip(), ''
if ' ' in cmd:
cmd, arg = cmd.split(' ', 1)
output = ''
if cmd.startswith('/'):
cmd = cmd.lower()
if cmd == '/quit':
raise stem.SocketClosed()
elif cmd == '/events':
output = self.do_events(arg)
elif cmd == '/info':
output = self.do_info(arg)
elif cmd == '/python':
output = self.do_python(arg)
elif cmd == '/help':
output = self.do_help(arg)
else:
output = format("'%s' isn't a recognized command" % command, *ERROR_OUTPUT)
else:
cmd = cmd.upper() # makes commands uppercase to match the spec
if cmd.replace('+', '') in ('LOADCONF', 'POSTDESCRIPTOR'):
# provides a notice that multi-line controller input isn't yet implemented
output = format(msg('msg.multiline_unimplemented_notice'), *ERROR_OUTPUT)
elif cmd == 'QUIT':
self._controller.msg(command)
raise stem.SocketClosed()
else:
is_tor_command = cmd in config.get('help.usage', {}) and cmd.lower() != 'events'
if self._run_python_commands and not is_tor_command:
console_output = StringIO()
with redirect(console_output, console_output):
self.is_multiline_context = code.InteractiveConsole.push(self, command)
output = console_output.getvalue().strip()
else:
try:
output = format(self._controller.msg(command).raw_content().strip(), *STANDARD_OUTPUT)
except stem.ControllerError as exc:
if isinstance(exc, stem.SocketClosed):
raise
else:
output = format(str(exc), *ERROR_OUTPUT)
if output:
output += '\n' # give ourselves an extra line before the next prompt
if print_response:
print(output)
return output
stem-1.7.1/stem/interpreter/__init__.py 0000664 0001750 0001750 00000013661 13341474573 020660 0 ustar atagar atagar 0000000 0000000 # Copyright 2015-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Interactive interpreter for interacting with Tor directly. This adds usability
features such as tab completion, history, and IRC-style functions (like /help).
"""
import os
import sys
import stem
import stem.connection
import stem.prereq
import stem.process
import stem.util.conf
import stem.util.system
import stem.util.term
from stem.util.term import Attr, Color, format
__all__ = [
'arguments',
'autocomplete',
'commands',
'help',
]
PROMPT = format('>>> ', Color.GREEN, Attr.BOLD, Attr.READLINE_ESCAPE)
STANDARD_OUTPUT = (Color.BLUE, Attr.LINES)
BOLD_OUTPUT = (Color.BLUE, Attr.BOLD, Attr.LINES)
HEADER_OUTPUT = (Color.GREEN, Attr.LINES)
HEADER_BOLD_OUTPUT = (Color.GREEN, Attr.BOLD, Attr.LINES)
ERROR_OUTPUT = (Attr.BOLD, Color.RED, Attr.LINES)
settings_path = os.path.join(os.path.dirname(__file__), 'settings.cfg')
uses_settings = stem.util.conf.uses_settings('stem_interpreter', settings_path)
@uses_settings
def msg(message, config, **attr):
return config.get(message).format(**attr)
def main():
import readline
import stem.interpreter.arguments
import stem.interpreter.autocomplete
import stem.interpreter.commands
try:
args = stem.interpreter.arguments.parse(sys.argv[1:])
except ValueError as exc:
print(exc)
sys.exit(1)
if args.print_help:
print(stem.interpreter.arguments.get_help())
sys.exit()
if args.disable_color or not sys.stdout.isatty():
global PROMPT
stem.util.term.DISABLE_COLOR_SUPPORT = True
PROMPT = '>>> '
# If the user isn't connecting to something in particular then offer to start
# tor if it isn't running.
if not (args.user_provided_port or args.user_provided_socket):
is_tor_running = stem.util.system.is_running('tor') or stem.util.system.is_running('tor.real')
if not is_tor_running:
if args.tor_path == 'tor' and not stem.util.system.is_available('tor'):
print(format(msg('msg.tor_unavailable'), *ERROR_OUTPUT))
sys.exit(1)
else:
if not args.run_cmd and not args.run_path:
print(format(msg('msg.starting_tor'), *HEADER_OUTPUT))
control_port = '9051' if args.control_port == 'default' else str(args.control_port)
try:
stem.process.launch_tor_with_config(
config = {
'SocksPort': '0',
'ControlPort': control_port,
'CookieAuthentication': '1',
'ExitPolicy': 'reject *:*',
},
tor_cmd = args.tor_path,
completion_percent = 5,
take_ownership = True,
)
except OSError as exc:
print(format(msg('msg.unable_to_start_tor', error = exc), *ERROR_OUTPUT))
sys.exit(1)
control_port = (args.control_address, args.control_port)
control_socket = args.control_socket
# If the user explicitely specified an endpoint then just try to connect to
# that.
if args.user_provided_socket and not args.user_provided_port:
control_port = None
elif args.user_provided_port and not args.user_provided_socket:
control_socket = None
controller = stem.connection.connect(
control_port = control_port,
control_socket = control_socket,
password_prompt = True,
)
if controller is None:
sys.exit(1)
with controller:
autocompleter = stem.interpreter.autocomplete.Autocompleter(controller)
readline.parse_and_bind('tab: complete')
readline.set_completer(autocompleter.complete)
readline.set_completer_delims('\n')
interpreter = stem.interpreter.commands.ControlInterpreter(controller)
showed_close_confirmation = False
if args.run_cmd:
if args.run_cmd.upper().startswith('SETEVENTS '):
# TODO: we can use a lambda here when dropping python 2.x support, but
# until then print's status as a keyword prevents it from being used in
# lambdas
def handle_event(event_message):
print(format(str(event_message), *STANDARD_OUTPUT))
controller._handle_event = handle_event
if sys.stdout.isatty():
events = args.run_cmd.upper().split(' ', 1)[1]
print(format('Listening to %s events. Press any key to quit.\n' % events, *HEADER_BOLD_OUTPUT))
controller.msg(args.run_cmd)
try:
raw_input()
except (KeyboardInterrupt, stem.SocketClosed):
pass
else:
interpreter.run_command(args.run_cmd, print_response = True)
elif args.run_path:
try:
for line in open(args.run_path).readlines():
interpreter.run_command(line.strip(), print_response = True)
except IOError as exc:
print(format(msg('msg.unable_to_read_file', path = args.run_path, error = exc), *ERROR_OUTPUT))
sys.exit(1)
else:
for line in msg('msg.startup_banner').splitlines():
line_format = HEADER_BOLD_OUTPUT if line.startswith(' ') else HEADER_OUTPUT
print(format(line, *line_format))
print('')
while True:
try:
prompt = '... ' if interpreter.is_multiline_context else PROMPT
user_input = input(prompt) if stem.prereq.is_python_3() else raw_input(prompt)
interpreter.run_command(user_input, print_response = True)
except stem.SocketClosed:
if showed_close_confirmation:
print(format('Unable to run tor commands. The control connection has been closed.', *ERROR_OUTPUT))
else:
prompt = format("Tor's control port has closed. Do you want to continue this interpreter? (y/n) ", *HEADER_BOLD_OUTPUT)
user_input = input(prompt) if stem.prereq.is_python_3() else raw_input(prompt)
print('') # blank line
if user_input.lower() in ('y', 'yes'):
showed_close_confirmation = True
else:
break
except (KeyboardInterrupt, EOFError, stem.SocketClosed):
print('') # move cursor to the following line
break
stem-1.7.1/stem/interpreter/settings.cfg 0000664 0001750 0001750 00000030141 13341034346 021046 0 ustar atagar atagar 0000000 0000000 ################################################################################
#
# Configuration data used by Stem's interpreter prompt.
#
################################################################################
##################
# GENERAL MESSAGES #
##################
msg.multiline_unimplemented_notice Multi-line control options like this are not yet implemented.
msg.help
|Interactive interpreter for Tor. This provides you with direct access
|to Tor's control interface via either python or direct requests.
|
| -i, --interface [ADDRESS:]PORT change control interface from {address}:{port}
| -s, --socket SOCKET_PATH attach using unix domain socket if present,
| SOCKET_PATH defaults to: {socket}
| --tor PATH tor binary if tor isn't already running
| --run executes the given command or file of commands
| --no-color disables colorized output
| -h, --help presents this help
|
msg.startup_banner
|Welcome to Stem's interpreter prompt. This provides you with direct access to
|Tor's control interface.
|
|This acts like a standard python interpreter with a Tor connection available
|via your 'controller' variable...
|
| >>> controller.get_info('version')
| '0.2.5.1-alpha-dev (git-245ecfff36c0cecc)'
|
|You can also issue requests directly to Tor...
|
| >>> GETINFO version
| 250-version=0.2.5.1-alpha-dev (git-245ecfff36c0cecc)
| 250 OK
|
|For more information run '/help'.
|
msg.tor_unavailable Tor isn't running and the command currently isn't in your PATH.
msg.unable_to_start_tor Unable to start tor: {error}
msg.unable_to_read_file Unable to read {path}: {error}
msg.starting_tor
|Tor isn't running. Starting a temporary Tor instance for our interpreter to
|interact with. This will have a minimal non-relaying configuration, and be
|shut down when you're done.
|
|--------------------------------------------------------------------------------
|
#################
# OUTPUT OF /HELP #
#################
# Response for the '/help' command without any arguments.
help.general
|Interpreter commands include:
| /help - provides information for interpreter and tor commands
| /events - prints events that we've received
| /info - general information for a relay
| /python - enable or disable support for running python commands
| /quit - shuts down the interpreter
|
|Tor commands include:
| GETINFO - queries information from tor
| GETCONF, SETCONF, RESETCONF - show or edit a configuration option
| SIGNAL - issues control signal to the process (for resetting, stopping, etc)
| SETEVENTS - configures the events tor will notify us of
|
| USEFEATURE - enables custom behavior for the controller
| SAVECONF - writes tor's current configuration to our torrc
| LOADCONF - loads the given input like it was part of our torrc
| MAPADDRESS - replaces requests for one address with another
| POSTDESCRIPTOR - adds a relay descriptor to our cache
| EXTENDCIRCUIT - create or extend a tor circuit
| SETCIRCUITPURPOSE - configures the purpose associated with a circuit
| CLOSECIRCUIT - closes the given circuit
| ATTACHSTREAM - associates an application's stream with a tor circuit
| REDIRECTSTREAM - sets a stream's destination
| CLOSESTREAM - closes the given stream
| ADD_ONION - create a new hidden service
| DEL_ONION - delete a hidden service that was created with ADD_ONION
| HSFETCH - retrieve a hidden service descriptor, providing it in a HS_DESC_CONTENT event
| HSPOST - uploads a hidden service descriptor
| RESOLVE - issues an asynchronous dns or rdns request over tor
| TAKEOWNERSHIP - instructs tor to quit when this control connection is closed
| PROTOCOLINFO - queries version and controller authentication information
| QUIT - disconnect the control connection
|
|For more information use '/help [OPTION]'.
# Usage of tor and interpreter commands.
help.usage HELP => /help [OPTION]
help.usage EVENTS => /events [types]
help.usage INFO => /info [relay fingerprint, nickname, or IP address]
help.usage PYTHON => /python [enable,disable]
help.usage QUIT => /quit
help.usage GETINFO => GETINFO OPTION
help.usage GETCONF => GETCONF OPTION
help.usage SETCONF => SETCONF PARAM[=VALUE]
help.usage RESETCONF => RESETCONF PARAM[=VALUE]
help.usage SIGNAL => SIGNAL SIG
help.usage SETEVENTS => SETEVENTS [EXTENDED] [EVENTS]
help.usage USEFEATURE => USEFEATURE OPTION
help.usage SAVECONF => SAVECONF
help.usage LOADCONF => LOADCONF...
help.usage MAPADDRESS => MAPADDRESS SOURCE_ADDR=DESTINATION_ADDR
help.usage POSTDESCRIPTOR => POSTDESCRIPTOR [purpose=general/controller/bridge] [cache=yes/no]...
help.usage EXTENDCIRCUIT => EXTENDCIRCUIT CircuitID [PATH] [purpose=general/controller]
help.usage SETCIRCUITPURPOSE => SETCIRCUITPURPOSE CircuitID purpose=general/controller
help.usage CLOSECIRCUIT => CLOSECIRCUIT CircuitID [IfUnused]
help.usage ATTACHSTREAM => ATTACHSTREAM StreamID CircuitID [HOP=HopNum]
help.usage REDIRECTSTREAM => REDIRECTSTREAM StreamID Address [Port]
help.usage CLOSESTREAM => CLOSESTREAM StreamID Reason [Flag]
help.usage ADD_ONION => KeyType:KeyBlob [Flags=Flag] (Port=Port [,Target])...
help.usage DEL_ONION => ServiceID
help.usage HSFETCH => HSFETCH (HSAddress/v2-DescId) [SERVER=Server]...
help.usage HSPOST => [SERVER=Server] DESCRIPTOR
help.usage RESOLVE => RESOLVE [mode=reverse] address
help.usage TAKEOWNERSHIP => TAKEOWNERSHIP
help.usage PROTOCOLINFO => PROTOCOLINFO [ProtocolVersion]
# Longer description of what tor and interpreter commands do.
help.description.help
|Provides usage information for the given interpreter, tor command, or tor
|configuration option.
|
|Example:
| /help info # provides a description of the '/info' option
| /help GETINFO # usage information for tor's GETINFO controller option
help.description.events
|Provides events that we've received belonging to the given event types. If
|no types are specified then this provides all the messages that we've
|received.
|
|You can also run '/events clear' to clear the backlog of events we've
|received.
help.description.info
|Provides information for a relay that's currently in the consensus. If no
|relay is specified then this provides information on ourselves.
help.description.python
|Enables or disables support for running python commands. This determines how
|we treat commands this interpreter doesn't recognize...
|
|* If enabled then unrecognized commands are executed as python.
|* If disabled then unrecognized commands are passed along to tor.
help.description.quit
|Terminates the interpreter.
help.description.getinfo
|Queries the tor process for information. Options are...
|
help.description.getconf
|Provides the current value for a given configuration value. Options include...
|
help.description.setconf
|Sets the given configuration parameters. Values can be quoted or non-quoted
|strings, and reverts the option to 0 or NULL if not provided.
|
|Examples:
| * Sets a contact address and resets our family to NULL
| SETCONF MyFamily ContactInfo=foo@bar.com
|
| * Sets an exit policy that only includes port 80/443
| SETCONF ExitPolicy=\"accept *:80, accept *:443, reject *:*\"\
help.description.resetconf
|Reverts the given configuration options to their default values. If a value
|is provided then this behaves in the same way as SETCONF.
|
|Examples:
| * Returns both of our accounting parameters to their defaults
| RESETCONF AccountingMax AccountingStart
|
| * Uses the default exit policy and sets our nickname to be 'Goomba'
| RESETCONF ExitPolicy Nickname=Goomba
help.description.signal
|Issues a signal that tells the tor process to reload its torrc, dump its
|stats, halt, etc.
help.description.setevents
|Sets the events that we will receive. This turns off any events that aren't
|listed so sending 'SETEVENTS' without any values will turn off all event reporting.
|
|For Tor versions between 0.1.1.9 and 0.2.2.1 adding 'EXTENDED' causes some
|events to give us additional information. After version 0.2.2.1 this is
|always on.
|
|Events include...
|
help.description.usefeature
|Customizes the behavior of the control port. Options include...
|
help.description.saveconf
|Writes Tor's current configuration to its torrc.
help.description.loadconf
|Reads the given text like it belonged to our torrc.
|
|Example:
| +LOADCONF
| # sets our exit policy to just accept ports 80 and 443
| ExitPolicy accept *:80
| ExitPolicy accept *:443
| ExitPolicy reject *:*
| .
help.description.mapaddress
|Replaces future requests for one address with another.
|
|Example:
| MAPADDRESS 0.0.0.0=torproject.org 1.2.3.4=tor.freehaven.net
help.description.postdescriptor
|Simulates getting a new relay descriptor.
help.description.extendcircuit
|Extends the given circuit or create a new one if the CircuitID is zero. The
|PATH is a comma separated list of fingerprints. If it isn't set then this
|uses Tor's normal path selection.
help.description.setcircuitpurpose
|Sets the purpose attribute for a circuit.
help.description.closecircuit
|Closes the given circuit. If "IfUnused" is included then this only closes
|the circuit if it isn't currently being used.
help.description.attachstream
|Attaches a stream with the given built circuit (tor picks one on its own if
|CircuitID is zero). If HopNum is given then this hop is used to exit the
|circuit, otherwise the last relay is used.
help.description.redirectstream
|Sets the destination for a given stream. This can only be done after a
|stream is created but before it's attached to a circuit.
help.description.closestream
|Closes the given stream, the reason being an integer matching a reason as
|per section 6.3 of the tor-spec.
help.description.add_onion
|Creates a new hidden service. Unlike 'SETCONF HiddenServiceDir...' this
|doesn't persist the service to disk.
help.description.del_onion
|Delete a hidden service that was created with ADD_ONION.
help.description.hsfetch
|Retrieves the descriptor for a hidden service. This is an asynchronous
|request, with the descriptor provided by a HS_DESC_CONTENT event.
help.description.hspost
|Uploads a descriptor to a hidden service directory.
help.description.resolve
|Performs IPv4 DNS resolution over tor, doing a reverse lookup instead if
|"mode=reverse" is included. This request is processed in the background and
|results in a ADDRMAP event with the response.
help.description.takeownership
|Instructs Tor to gracefully shut down when this control connection is closed.
help.description.protocolinfo
|Provides bootstrapping information that a controller might need when first
|starting, like Tor's version and controller authentication. This can be done
|before authenticating to the control port.
help.signal.options RELOAD / HUP => reload our torrc
help.signal.options SHUTDOWN / INT => gracefully shut down, waiting 30 seconds if we're a relay
help.signal.options DUMP / USR1 => logs information about open connections and circuits
help.signal.options DEBUG / USR2 => makes us log at the DEBUG runlevel
help.signal.options HALT / TERM => immediately shut down
help.signal.options CLEARDNSCACHE => clears any cached DNS results
help.signal.options NEWNYM => clears the DNS cache and uses new circuits for future connections
################
# TAB COMPLETION #
################
# Commands we'll autocomplete when the user hits tab. This is just the start of
# our autocompletion list - more are determined dynamically by checking what
# tor supports.
autocomplete /help
autocomplete /events
autocomplete /info
autocomplete /quit
autocomplete SAVECONF
autocomplete MAPADDRESS
autocomplete EXTENDCIRCUIT
autocomplete SETCIRCUITPURPOSE
autocomplete SETROUTERPURPOSE
autocomplete ATTACHSTREAM
#autocomplete +POSTDESCRIPTOR # TODO: needs multi-line support
autocomplete REDIRECTSTREAM
autocomplete CLOSESTREAM
autocomplete CLOSECIRCUIT
autocomplete QUIT
autocomplete RESOLVE
autocomplete PROTOCOLINFO
#autocomplete +LOADCONF # TODO: needs multi-line support
autocomplete TAKEOWNERSHIP
autocomplete AUTHCHALLENGE
autocomplete DROPGUARDS
autocomplete ADD_ONION NEW:BEST
autocomplete ADD_ONION NEW:RSA1024
autocomplete ADD_ONION NEW:ED25519-V3
autocomplete ADD_ONION RSA1024:
autocomplete ADD_ONION ED25519-V3:
autocomplete DEL_ONION
autocomplete HSFETCH
autocomplete HSPOST
stem-1.7.1/stem/interpreter/autocomplete.py 0000664 0001750 0001750 00000005743 13341474573 021624 0 ustar atagar atagar 0000000 0000000 # Copyright 2014-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Tab completion for our interpreter prompt.
"""
import stem.prereq
from stem.interpreter import uses_settings
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
@uses_settings
def _get_commands(controller, config):
"""
Provides commands recognized by tor.
"""
commands = config.get('autocomplete', [])
if controller is None:
return commands
# GETINFO commands. Lines are of the form '[option] -- [description]'. This
# strips '*' from options that accept values.
results = controller.get_info('info/names', None)
if results:
for line in results.splitlines():
option = line.split(' ', 1)[0].rstrip('*')
commands.append('GETINFO %s' % option)
else:
commands.append('GETINFO ')
# GETCONF, SETCONF, and RESETCONF commands. Lines are of the form
# '[option] [type]'.
results = controller.get_info('config/names', None)
if results:
for line in results.splitlines():
option = line.split(' ', 1)[0]
commands.append('GETCONF %s' % option)
commands.append('SETCONF %s' % option)
commands.append('RESETCONF %s' % option)
else:
commands += ['GETCONF ', 'SETCONF ', 'RESETCONF ']
# SETEVENT, USEFEATURE, and SIGNAL commands. For each of these the GETINFO
# results are simply a space separated lists of the values they can have.
options = (
('SETEVENTS ', 'events/names'),
('USEFEATURE ', 'features/names'),
('SIGNAL ', 'signal/names'),
)
for prefix, getinfo_cmd in options:
results = controller.get_info(getinfo_cmd, None)
if results:
commands += [prefix + value for value in results.split()]
else:
commands.append(prefix)
# Adds /help commands.
usage_info = config.get('help.usage', {})
for cmd in usage_info.keys():
commands.append('/help ' + cmd)
return commands
class Autocompleter(object):
def __init__(self, controller):
self._commands = _get_commands(controller)
@lru_cache()
def matches(self, text):
"""
Provides autocompletion matches for the given text.
:param str text: text to check for autocompletion matches with
:returns: **list** with possible matches
"""
lowercase_text = text.lower()
return [cmd for cmd in self._commands if cmd.lower().startswith(lowercase_text)]
def complete(self, text, state):
"""
Provides case insensetive autocompletion options, acting as a functor for
the readlines set_completer function.
:param str text: text to check for autocompletion matches with
:param int state: index of result to be provided, readline fetches matches
until this function provides None
:returns: **str** with the autocompletion match, **None** if eithe none
exists or state is higher than our number of matches
"""
try:
return self.matches(text)[state]
except IndexError:
return None
stem-1.7.1/stem/cached_fallbacks.cfg 0000664 0001750 0001750 00000143367 13341034346 020073 0 ustar atagar atagar 0000000 0000000 tor_commit a42e52dded44a2c58a7200511e27a5c0e01cd78b
stem_commit 4d7cc882b5b8966f69232d8489bb5b07226abc81
header.timestamp 20180106205601
header.version 2.0.0
header.type fallback
001524DD403D729F08F7E5D77813EF12756CFA8D.address 185.13.39.197
001524DD403D729F08F7E5D77813EF12756CFA8D.or_port 443
001524DD403D729F08F7E5D77813EF12756CFA8D.dir_port 80
001524DD403D729F08F7E5D77813EF12756CFA8D.nickname Neldoreth
001524DD403D729F08F7E5D77813EF12756CFA8D.has_extrainfo false
0111BA9B604669E636FFD5B503F382A4B7AD6E80.address 176.10.104.240
0111BA9B604669E636FFD5B503F382A4B7AD6E80.or_port 443
0111BA9B604669E636FFD5B503F382A4B7AD6E80.dir_port 80
0111BA9B604669E636FFD5B503F382A4B7AD6E80.nickname DigiGesTor1e1
0111BA9B604669E636FFD5B503F382A4B7AD6E80.has_extrainfo false
025B66CEBC070FCB0519D206CF0CF4965C20C96E.address 185.100.85.61
025B66CEBC070FCB0519D206CF0CF4965C20C96E.or_port 443
025B66CEBC070FCB0519D206CF0CF4965C20C96E.dir_port 80
025B66CEBC070FCB0519D206CF0CF4965C20C96E.nickname nibbana
025B66CEBC070FCB0519D206CF0CF4965C20C96E.has_extrainfo false
0756B7CD4DFC8182BE23143FAC0642F515182CEB.address 5.9.110.236
0756B7CD4DFC8182BE23143FAC0642F515182CEB.or_port 9001
0756B7CD4DFC8182BE23143FAC0642F515182CEB.dir_port 9030
0756B7CD4DFC8182BE23143FAC0642F515182CEB.nickname rueckgrat
0756B7CD4DFC8182BE23143FAC0642F515182CEB.has_extrainfo true
0756B7CD4DFC8182BE23143FAC0642F515182CEB.orport6_address 2a01:4f8:162:51e2::2
0756B7CD4DFC8182BE23143FAC0642F515182CEB.orport6_port 9001
0B85617241252517E8ECF2CFC7F4C1A32DCD153F.address 163.172.149.155
0B85617241252517E8ECF2CFC7F4C1A32DCD153F.or_port 443
0B85617241252517E8ECF2CFC7F4C1A32DCD153F.dir_port 80
0B85617241252517E8ECF2CFC7F4C1A32DCD153F.nickname niij02
0B85617241252517E8ECF2CFC7F4C1A32DCD153F.has_extrainfo false
0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.address 5.39.92.199
0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.or_port 443
0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.dir_port 80
0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.nickname BaelorTornodePw
0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.has_extrainfo false
0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.orport6_address 2001:41d0:8:b1c7::1
0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.orport6_port 443
0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F.address 163.172.25.118
0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F.or_port 22
0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F.dir_port 80
0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F.nickname torpidsFRonline4
0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F.has_extrainfo false
0D3EBA17E1C78F1E9900BABDB23861D46FCAF163.address 178.62.197.82
0D3EBA17E1C78F1E9900BABDB23861D46FCAF163.or_port 443
0D3EBA17E1C78F1E9900BABDB23861D46FCAF163.dir_port 80
0D3EBA17E1C78F1E9900BABDB23861D46FCAF163.nickname HY100
0D3EBA17E1C78F1E9900BABDB23861D46FCAF163.has_extrainfo false
0E8C0C8315B66DB5F703804B3889A1DD66C67CE0.address 185.100.86.100
0E8C0C8315B66DB5F703804B3889A1DD66C67CE0.or_port 443
0E8C0C8315B66DB5F703804B3889A1DD66C67CE0.dir_port 80
0E8C0C8315B66DB5F703804B3889A1DD66C67CE0.nickname saveyourprivacyex1
0E8C0C8315B66DB5F703804B3889A1DD66C67CE0.has_extrainfo false
11DF0017A43AF1F08825CD5D973297F81AB00FF3.address 37.120.174.249
11DF0017A43AF1F08825CD5D973297F81AB00FF3.or_port 443
11DF0017A43AF1F08825CD5D973297F81AB00FF3.dir_port 80
11DF0017A43AF1F08825CD5D973297F81AB00FF3.nickname gGDHjdcC6zAlM8k08lX
11DF0017A43AF1F08825CD5D973297F81AB00FF3.has_extrainfo false
11DF0017A43AF1F08825CD5D973297F81AB00FF3.orport6_address 2a03:4000:6:724c:df98:15f9:b34d:443
11DF0017A43AF1F08825CD5D973297F81AB00FF3.orport6_port 443
12AD30E5D25AA67F519780E2111E611A455FDC89.address 193.11.114.43
12AD30E5D25AA67F519780E2111E611A455FDC89.or_port 9001
12AD30E5D25AA67F519780E2111E611A455FDC89.dir_port 9030
12AD30E5D25AA67F519780E2111E611A455FDC89.nickname mdfnet1
12AD30E5D25AA67F519780E2111E611A455FDC89.has_extrainfo false
12AD30E5D25AA67F519780E2111E611A455FDC89.orport6_address 2001:6b0:30:1000::99
12AD30E5D25AA67F519780E2111E611A455FDC89.orport6_port 9050
12FD624EE73CEF37137C90D38B2406A66F68FAA2.address 37.157.195.87
12FD624EE73CEF37137C90D38B2406A66F68FAA2.or_port 443
12FD624EE73CEF37137C90D38B2406A66F68FAA2.dir_port 8030
12FD624EE73CEF37137C90D38B2406A66F68FAA2.nickname thanatosCZ
12FD624EE73CEF37137C90D38B2406A66F68FAA2.has_extrainfo false
136F9299A5009A4E0E96494E723BDB556FB0A26B.address 178.16.208.59
136F9299A5009A4E0E96494E723BDB556FB0A26B.or_port 443
136F9299A5009A4E0E96494E723BDB556FB0A26B.dir_port 80
136F9299A5009A4E0E96494E723BDB556FB0A26B.nickname bakunin2
136F9299A5009A4E0E96494E723BDB556FB0A26B.has_extrainfo false
136F9299A5009A4E0E96494E723BDB556FB0A26B.orport6_address 2a00:1c20:4089:1234:bff6:e1bb:1ce3:8dc6
136F9299A5009A4E0E96494E723BDB556FB0A26B.orport6_port 443
16102E458460349EE45C0901DAA6C30094A9BBEA.address 163.172.138.22
16102E458460349EE45C0901DAA6C30094A9BBEA.or_port 443
16102E458460349EE45C0901DAA6C30094A9BBEA.dir_port 80
16102E458460349EE45C0901DAA6C30094A9BBEA.nickname mkultra
16102E458460349EE45C0901DAA6C30094A9BBEA.has_extrainfo false
16102E458460349EE45C0901DAA6C30094A9BBEA.orport6_address 2001:bc8:4400:2100::1:3
16102E458460349EE45C0901DAA6C30094A9BBEA.orport6_port 443
175921396C7C426309AB03775A9930B6F611F794.address 178.62.60.37
175921396C7C426309AB03775A9930B6F611F794.or_port 443
175921396C7C426309AB03775A9930B6F611F794.dir_port 80
175921396C7C426309AB03775A9930B6F611F794.nickname lovejoy
175921396C7C426309AB03775A9930B6F611F794.has_extrainfo false
185663B7C12777F052B2C2D23D7A239D8DA88A0F.address 171.25.193.25
185663B7C12777F052B2C2D23D7A239D8DA88A0F.or_port 443
185663B7C12777F052B2C2D23D7A239D8DA88A0F.dir_port 80
185663B7C12777F052B2C2D23D7A239D8DA88A0F.nickname DFRI5
185663B7C12777F052B2C2D23D7A239D8DA88A0F.has_extrainfo false
185663B7C12777F052B2C2D23D7A239D8DA88A0F.orport6_address 2001:67c:289c::25
185663B7C12777F052B2C2D23D7A239D8DA88A0F.orport6_port 443
1938EBACBB1A7BFA888D9623C90061130E63BB3F.address 149.56.141.138
1938EBACBB1A7BFA888D9623C90061130E63BB3F.or_port 9001
1938EBACBB1A7BFA888D9623C90061130E63BB3F.dir_port 9030
1938EBACBB1A7BFA888D9623C90061130E63BB3F.nickname Aerodynamik04
1938EBACBB1A7BFA888D9623C90061130E63BB3F.has_extrainfo false
1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.address 81.7.14.253
1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.or_port 443
1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.dir_port 9001
1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.nickname Ichotolot60
1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.has_extrainfo false
1C90D3AEADFF3BCD079810632C8B85637924A58E.address 163.172.53.84
1C90D3AEADFF3BCD079810632C8B85637924A58E.or_port 21
1C90D3AEADFF3BCD079810632C8B85637924A58E.dir_port 143
1C90D3AEADFF3BCD079810632C8B85637924A58E.nickname Multivac
1C90D3AEADFF3BCD079810632C8B85637924A58E.has_extrainfo false
1C90D3AEADFF3BCD079810632C8B85637924A58E.orport6_address 2001:bc8:24f8::
1C90D3AEADFF3BCD079810632C8B85637924A58E.orport6_port 21
1DBAED235E3957DE1ABD25B4206BE71406FB61F8.address 46.101.151.222
1DBAED235E3957DE1ABD25B4206BE71406FB61F8.or_port 443
1DBAED235E3957DE1ABD25B4206BE71406FB61F8.dir_port 80
1DBAED235E3957DE1ABD25B4206BE71406FB61F8.nickname flanders
1DBAED235E3957DE1ABD25B4206BE71406FB61F8.has_extrainfo false
1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7.address 91.219.237.229
1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7.or_port 443
1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7.dir_port 80
1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7.nickname JakeDidNothingWrong
1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7.has_extrainfo false
1F6ABD086F40B890A33C93CC4606EE68B31C9556.address 199.184.246.250
1F6ABD086F40B890A33C93CC4606EE68B31C9556.or_port 443
1F6ABD086F40B890A33C93CC4606EE68B31C9556.dir_port 80
1F6ABD086F40B890A33C93CC4606EE68B31C9556.nickname dao
1F6ABD086F40B890A33C93CC4606EE68B31C9556.has_extrainfo false
1F6ABD086F40B890A33C93CC4606EE68B31C9556.orport6_address 2620:124:1009:1::171
1F6ABD086F40B890A33C93CC4606EE68B31C9556.orport6_port 443
1FA8F638298645BE58AC905276680889CB795A94.address 185.129.249.124
1FA8F638298645BE58AC905276680889CB795A94.or_port 9001
1FA8F638298645BE58AC905276680889CB795A94.dir_port 9030
1FA8F638298645BE58AC905276680889CB795A94.nickname treadstone
1FA8F638298645BE58AC905276680889CB795A94.has_extrainfo false
20462CBA5DA4C2D963567D17D0B7249718114A68.address 212.47.229.2
20462CBA5DA4C2D963567D17D0B7249718114A68.or_port 9001
20462CBA5DA4C2D963567D17D0B7249718114A68.dir_port 9030
20462CBA5DA4C2D963567D17D0B7249718114A68.nickname scaletor
20462CBA5DA4C2D963567D17D0B7249718114A68.has_extrainfo false
20462CBA5DA4C2D963567D17D0B7249718114A68.orport6_address 2001:bc8:4400:2100::f03
20462CBA5DA4C2D963567D17D0B7249718114A68.orport6_port 9001
204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.address 77.247.181.164
204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.or_port 443
204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.dir_port 80
204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.nickname HaveHeart
204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.has_extrainfo false
230A8B2A8BA861210D9B4BA97745AEC217A94207.address 163.172.176.167
230A8B2A8BA861210D9B4BA97745AEC217A94207.or_port 443
230A8B2A8BA861210D9B4BA97745AEC217A94207.dir_port 80
230A8B2A8BA861210D9B4BA97745AEC217A94207.nickname niij01
230A8B2A8BA861210D9B4BA97745AEC217A94207.has_extrainfo false
231C2B9C8C31C295C472D031E06964834B745996.address 37.200.98.5
231C2B9C8C31C295C472D031E06964834B745996.or_port 443
231C2B9C8C31C295C472D031E06964834B745996.dir_port 80
231C2B9C8C31C295C472D031E06964834B745996.nickname torpidsDEdomainf
231C2B9C8C31C295C472D031E06964834B745996.has_extrainfo false
231C2B9C8C31C295C472D031E06964834B745996.orport6_address 2a00:1158:3::11a
231C2B9C8C31C295C472D031E06964834B745996.orport6_port 993
2BA2C8E96B2590E1072AECE2BDB5C48921BF8510.address 138.201.250.33
2BA2C8E96B2590E1072AECE2BDB5C48921BF8510.or_port 9011
2BA2C8E96B2590E1072AECE2BDB5C48921BF8510.dir_port 9012
2BA2C8E96B2590E1072AECE2BDB5C48921BF8510.nickname storm
2BA2C8E96B2590E1072AECE2BDB5C48921BF8510.has_extrainfo false
2CDCFED0142B28B002E89D305CBA2E26063FADE2.address 178.16.208.56
2CDCFED0142B28B002E89D305CBA2E26063FADE2.or_port 443
2CDCFED0142B28B002E89D305CBA2E26063FADE2.dir_port 80
2CDCFED0142B28B002E89D305CBA2E26063FADE2.nickname jaures
2CDCFED0142B28B002E89D305CBA2E26063FADE2.has_extrainfo false
2CDCFED0142B28B002E89D305CBA2E26063FADE2.orport6_address 2a00:1c20:4089:1234:cd49:b58a:9ebe:67ec
2CDCFED0142B28B002E89D305CBA2E26063FADE2.orport6_port 443
2F0F32AB1E5B943CA7D062C03F18960C86E70D94.address 97.74.237.196
2F0F32AB1E5B943CA7D062C03F18960C86E70D94.or_port 9001
2F0F32AB1E5B943CA7D062C03F18960C86E70D94.dir_port 9030
2F0F32AB1E5B943CA7D062C03F18960C86E70D94.nickname Minotaur
2F0F32AB1E5B943CA7D062C03F18960C86E70D94.has_extrainfo false
30C19B81981F450C402306E2E7CFB6C3F79CB6B2.address 64.113.32.29
30C19B81981F450C402306E2E7CFB6C3F79CB6B2.or_port 9001
30C19B81981F450C402306E2E7CFB6C3F79CB6B2.dir_port 9030
30C19B81981F450C402306E2E7CFB6C3F79CB6B2.nickname Libero
30C19B81981F450C402306E2E7CFB6C3F79CB6B2.has_extrainfo false
328E54981C6DDD7D89B89E418724A4A7881E3192.address 80.127.117.180
328E54981C6DDD7D89B89E418724A4A7881E3192.or_port 443
328E54981C6DDD7D89B89E418724A4A7881E3192.dir_port 80
328E54981C6DDD7D89B89E418724A4A7881E3192.nickname sjc01
328E54981C6DDD7D89B89E418724A4A7881E3192.has_extrainfo false
328E54981C6DDD7D89B89E418724A4A7881E3192.orport6_address 2001:985:e77:10::4
328E54981C6DDD7D89B89E418724A4A7881E3192.orport6_port 443
330CD3DB6AD266DC70CDB512B036957D03D9BC59.address 185.100.84.212
330CD3DB6AD266DC70CDB512B036957D03D9BC59.or_port 443
330CD3DB6AD266DC70CDB512B036957D03D9BC59.dir_port 80
330CD3DB6AD266DC70CDB512B036957D03D9BC59.nickname TeamTardis
330CD3DB6AD266DC70CDB512B036957D03D9BC59.has_extrainfo false
330CD3DB6AD266DC70CDB512B036957D03D9BC59.orport6_address 2a06:1700:0:7::1
330CD3DB6AD266DC70CDB512B036957D03D9BC59.orport6_port 443
33DA0CAB7C27812EFF2E22C9705630A54D101FEB.address 163.172.13.165
33DA0CAB7C27812EFF2E22C9705630A54D101FEB.or_port 9001
33DA0CAB7C27812EFF2E22C9705630A54D101FEB.dir_port 9030
33DA0CAB7C27812EFF2E22C9705630A54D101FEB.nickname mullbinde9
33DA0CAB7C27812EFF2E22C9705630A54D101FEB.has_extrainfo false
33DA0CAB7C27812EFF2E22C9705630A54D101FEB.orport6_address 2001:bc8:38cb:201::8
33DA0CAB7C27812EFF2E22C9705630A54D101FEB.orport6_port 9001
3711E80B5B04494C971FB0459D4209AB7F2EA799.address 91.121.23.100
3711E80B5B04494C971FB0459D4209AB7F2EA799.or_port 9001
3711E80B5B04494C971FB0459D4209AB7F2EA799.dir_port 9030
3711E80B5B04494C971FB0459D4209AB7F2EA799.nickname 0x3d002
3711E80B5B04494C971FB0459D4209AB7F2EA799.has_extrainfo false
379FB450010D17078B3766C2273303C358C3A442.address 176.126.252.12
379FB450010D17078B3766C2273303C358C3A442.or_port 8080
379FB450010D17078B3766C2273303C358C3A442.dir_port 21
379FB450010D17078B3766C2273303C358C3A442.nickname aurora
379FB450010D17078B3766C2273303C358C3A442.has_extrainfo true
379FB450010D17078B3766C2273303C358C3A442.orport6_address 2a02:59e0:0:7::12
379FB450010D17078B3766C2273303C358C3A442.orport6_port 81
387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.address 62.210.92.11
387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.or_port 9101
387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.dir_port 9130
387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.nickname redjohn1
387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.has_extrainfo false
387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.orport6_address 2001:bc8:338c::1
387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.orport6_port 9101
39F096961ED2576975C866D450373A9913AFDC92.address 198.50.191.95
39F096961ED2576975C866D450373A9913AFDC92.or_port 443
39F096961ED2576975C866D450373A9913AFDC92.dir_port 80
39F096961ED2576975C866D450373A9913AFDC92.nickname thomas
39F096961ED2576975C866D450373A9913AFDC92.has_extrainfo false
3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B.address 164.132.77.175
3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B.or_port 9001
3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B.dir_port 9030
3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B.nickname rofltor1
3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B.has_extrainfo false
3C79699D4FBC37DE1A212D5033B56DAE079AC0EF.address 212.83.154.33
3C79699D4FBC37DE1A212D5033B56DAE079AC0EF.or_port 443
3C79699D4FBC37DE1A212D5033B56DAE079AC0EF.dir_port 8888
3C79699D4FBC37DE1A212D5033B56DAE079AC0EF.nickname bauruine203
3C79699D4FBC37DE1A212D5033B56DAE079AC0EF.has_extrainfo false
3D7E274A87D9A89AF064C13D1EE4CA1F184F2600.address 176.10.107.180
3D7E274A87D9A89AF064C13D1EE4CA1F184F2600.or_port 9001
3D7E274A87D9A89AF064C13D1EE4CA1F184F2600.dir_port 9030
3D7E274A87D9A89AF064C13D1EE4CA1F184F2600.nickname schokomilch
3D7E274A87D9A89AF064C13D1EE4CA1F184F2600.has_extrainfo false
3E53D3979DB07EFD736661C934A1DED14127B684.address 217.79.179.177
3E53D3979DB07EFD736661C934A1DED14127B684.or_port 9001
3E53D3979DB07EFD736661C934A1DED14127B684.dir_port 9030
3E53D3979DB07EFD736661C934A1DED14127B684.nickname Unnamed
3E53D3979DB07EFD736661C934A1DED14127B684.has_extrainfo false
3E53D3979DB07EFD736661C934A1DED14127B684.orport6_address 2001:4ba0:fff9:131:6c4f::90d3
3E53D3979DB07EFD736661C934A1DED14127B684.orport6_port 9001
4061C553CA88021B8302F0814365070AAE617270.address 185.100.85.101
4061C553CA88021B8302F0814365070AAE617270.or_port 9001
4061C553CA88021B8302F0814365070AAE617270.dir_port 9030
4061C553CA88021B8302F0814365070AAE617270.nickname TorExitRomania
4061C553CA88021B8302F0814365070AAE617270.has_extrainfo false
40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD.address 199.249.223.61
40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD.or_port 443
40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD.dir_port 80
40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD.nickname Quintex12
40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD.has_extrainfo false
41C59606AFE1D1AA6EC6EF6719690B856F0B6587.address 178.17.170.156
41C59606AFE1D1AA6EC6EF6719690B856F0B6587.or_port 9001
41C59606AFE1D1AA6EC6EF6719690B856F0B6587.dir_port 9030
41C59606AFE1D1AA6EC6EF6719690B856F0B6587.nickname TorExitMoldova2
41C59606AFE1D1AA6EC6EF6719690B856F0B6587.has_extrainfo false
439D0447772CB107B886F7782DBC201FA26B92D1.address 178.62.86.96
439D0447772CB107B886F7782DBC201FA26B92D1.or_port 9001
439D0447772CB107B886F7782DBC201FA26B92D1.dir_port 9030
439D0447772CB107B886F7782DBC201FA26B92D1.nickname pablobm001
439D0447772CB107B886F7782DBC201FA26B92D1.has_extrainfo false
439D0447772CB107B886F7782DBC201FA26B92D1.orport6_address 2a03:b0c0:1:d0::3cf:7001
439D0447772CB107B886F7782DBC201FA26B92D1.orport6_port 9050
4623A9EC53BFD83155929E56D6F7B55B5E718C24.address 163.172.157.213
4623A9EC53BFD83155929E56D6F7B55B5E718C24.or_port 443
4623A9EC53BFD83155929E56D6F7B55B5E718C24.dir_port 8080
4623A9EC53BFD83155929E56D6F7B55B5E718C24.nickname Cotopaxi
4623A9EC53BFD83155929E56D6F7B55B5E718C24.has_extrainfo false
46791D156C9B6C255C2665D4D8393EC7DBAA7798.address 31.31.78.49
46791D156C9B6C255C2665D4D8393EC7DBAA7798.or_port 443
46791D156C9B6C255C2665D4D8393EC7DBAA7798.dir_port 80
46791D156C9B6C255C2665D4D8393EC7DBAA7798.nickname KrigHaBandolo
46791D156C9B6C255C2665D4D8393EC7DBAA7798.has_extrainfo false
484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.address 193.70.43.76
484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.or_port 9001
484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.dir_port 9030
484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.nickname Aerodynamik03
484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.has_extrainfo false
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.address 37.187.102.186
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.or_port 9001
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.dir_port 9030
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.nickname txtfileTorNode65536
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.has_extrainfo false
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.orport6_address 2001:41d0:a:26ba::1
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.orport6_port 9001
4CC9CC9195EC38645B699A33307058624F660CCF.address 51.254.101.242
4CC9CC9195EC38645B699A33307058624F660CCF.or_port 9001
4CC9CC9195EC38645B699A33307058624F660CCF.dir_port 9002
4CC9CC9195EC38645B699A33307058624F660CCF.nickname devsum
4CC9CC9195EC38645B699A33307058624F660CCF.has_extrainfo false
4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.address 108.53.208.157
4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.or_port 443
4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.dir_port 80
4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.nickname Binnacle
4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.has_extrainfo true
50586E25BE067FD1F739998550EDDCB1A14CA5B2.address 212.51.134.123
50586E25BE067FD1F739998550EDDCB1A14CA5B2.or_port 9001
50586E25BE067FD1F739998550EDDCB1A14CA5B2.dir_port 9030
50586E25BE067FD1F739998550EDDCB1A14CA5B2.nickname Jans
50586E25BE067FD1F739998550EDDCB1A14CA5B2.has_extrainfo false
51E1CF613FD6F9F11FE24743C91D6F9981807D82.address 81.7.16.182
51E1CF613FD6F9F11FE24743C91D6F9981807D82.or_port 443
51E1CF613FD6F9F11FE24743C91D6F9981807D82.dir_port 80
51E1CF613FD6F9F11FE24743C91D6F9981807D82.nickname torpidsDEisppro3
51E1CF613FD6F9F11FE24743C91D6F9981807D82.has_extrainfo false
51E1CF613FD6F9F11FE24743C91D6F9981807D82.orport6_address 2a02:180:1:1::517:10b6
51E1CF613FD6F9F11FE24743C91D6F9981807D82.orport6_port 993
52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9.address 85.25.159.65
52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9.or_port 80
52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9.dir_port 995
52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9.nickname BeastieJoy63
52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9.has_extrainfo false
587E0A9552E4274B251F29B5B2673D38442EE4BF.address 95.130.12.119
587E0A9552E4274B251F29B5B2673D38442EE4BF.or_port 443
587E0A9552E4274B251F29B5B2673D38442EE4BF.dir_port 80
587E0A9552E4274B251F29B5B2673D38442EE4BF.nickname Nuath
587E0A9552E4274B251F29B5B2673D38442EE4BF.has_extrainfo false
58ED9C9C35E433EE58764D62892B4FFD518A3CD0.address 185.21.100.50
58ED9C9C35E433EE58764D62892B4FFD518A3CD0.or_port 9001
58ED9C9C35E433EE58764D62892B4FFD518A3CD0.dir_port 9030
58ED9C9C35E433EE58764D62892B4FFD518A3CD0.nickname SamAAdams2
58ED9C9C35E433EE58764D62892B4FFD518A3CD0.has_extrainfo false
58ED9C9C35E433EE58764D62892B4FFD518A3CD0.orport6_address 2a00:1158:2:cd00:0:74:6f:72
58ED9C9C35E433EE58764D62892B4FFD518A3CD0.orport6_port 443
5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.address 172.98.193.43
5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.or_port 443
5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.dir_port 80
5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.nickname Backplane
5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.has_extrainfo false
5F4CD12099AF20FAF9ADFDCEC65316A376D0201C.address 199.249.223.74
5F4CD12099AF20FAF9ADFDCEC65316A376D0201C.or_port 443
5F4CD12099AF20FAF9ADFDCEC65316A376D0201C.dir_port 80
5F4CD12099AF20FAF9ADFDCEC65316A376D0201C.nickname QuintexAirVPN7
5F4CD12099AF20FAF9ADFDCEC65316A376D0201C.has_extrainfo false
616081EC829593AF4232550DE6FFAA1D75B37A90.address 95.128.43.164
616081EC829593AF4232550DE6FFAA1D75B37A90.or_port 443
616081EC829593AF4232550DE6FFAA1D75B37A90.dir_port 80
616081EC829593AF4232550DE6FFAA1D75B37A90.nickname AquaRayTerminus
616081EC829593AF4232550DE6FFAA1D75B37A90.has_extrainfo false
616081EC829593AF4232550DE6FFAA1D75B37A90.orport6_address 2a02:ec0:209:10::4
616081EC829593AF4232550DE6FFAA1D75B37A90.orport6_port 443
68F175CCABE727AA2D2309BCD8789499CEE36ED7.address 163.172.139.104
68F175CCABE727AA2D2309BCD8789499CEE36ED7.or_port 443
68F175CCABE727AA2D2309BCD8789499CEE36ED7.dir_port 8080
68F175CCABE727AA2D2309BCD8789499CEE36ED7.nickname Pichincha
68F175CCABE727AA2D2309BCD8789499CEE36ED7.has_extrainfo false
6A7551EEE18F78A9813096E82BF84F740D32B911.address 85.214.62.48
6A7551EEE18F78A9813096E82BF84F740D32B911.or_port 443
6A7551EEE18F78A9813096E82BF84F740D32B911.dir_port 80
6A7551EEE18F78A9813096E82BF84F740D32B911.nickname TorMachine
6A7551EEE18F78A9813096E82BF84F740D32B911.has_extrainfo false
6EF897645B79B6CB35E853B32506375014DE3621.address 80.127.137.19
6EF897645B79B6CB35E853B32506375014DE3621.or_port 443
6EF897645B79B6CB35E853B32506375014DE3621.dir_port 80
6EF897645B79B6CB35E853B32506375014DE3621.nickname d6relay
6EF897645B79B6CB35E853B32506375014DE3621.has_extrainfo false
6EF897645B79B6CB35E853B32506375014DE3621.orport6_address 2001:981:47c1:1::6
6EF897645B79B6CB35E853B32506375014DE3621.orport6_port 443
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.address 85.235.250.88
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.or_port 443
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.dir_port 80
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.nickname TykRelay01
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.has_extrainfo false
7600680249A22080ECC6173FBBF64D6FCF330A61.address 81.7.14.31
7600680249A22080ECC6173FBBF64D6FCF330A61.or_port 443
7600680249A22080ECC6173FBBF64D6FCF330A61.dir_port 9001
7600680249A22080ECC6173FBBF64D6FCF330A61.nickname Ichotolot62
7600680249A22080ECC6173FBBF64D6FCF330A61.has_extrainfo false
763C9556602BD6207771A7A3D958091D44C43228.address 134.119.36.135
763C9556602BD6207771A7A3D958091D44C43228.or_port 443
763C9556602BD6207771A7A3D958091D44C43228.dir_port 80
763C9556602BD6207771A7A3D958091D44C43228.nickname torpidsDEdomainf2
763C9556602BD6207771A7A3D958091D44C43228.has_extrainfo false
763C9556602BD6207771A7A3D958091D44C43228.orport6_address 2a00:1158:3::2a8
763C9556602BD6207771A7A3D958091D44C43228.orport6_port 993
774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.address 188.166.133.133
774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.or_port 9001
774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.dir_port 9030
774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.nickname dropsy
774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.has_extrainfo false
774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.orport6_address 2a03:b0c0:2:d0::26c0:1
774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.orport6_port 9001
775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.address 5.196.23.64
775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.or_port 9001
775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.dir_port 9030
775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.nickname Aerodynamik01
775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.has_extrainfo false
789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.address 81.30.158.213
789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.or_port 9001
789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.dir_port 9030
789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.nickname dumpster
789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.has_extrainfo false
789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.orport6_address 2001:4ba0:cafe:e84::1
789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.orport6_port 9001
78E2BE744A53631B4AAB781468E94C52AB73968B.address 104.200.20.46
78E2BE744A53631B4AAB781468E94C52AB73968B.or_port 9001
78E2BE744A53631B4AAB781468E94C52AB73968B.dir_port 80
78E2BE744A53631B4AAB781468E94C52AB73968B.nickname bynumlawtor
78E2BE744A53631B4AAB781468E94C52AB73968B.has_extrainfo false
79E169B25E4C7CE99584F6ED06F379478F23E2B8.address 62.210.129.246
79E169B25E4C7CE99584F6ED06F379478F23E2B8.or_port 443
79E169B25E4C7CE99584F6ED06F379478F23E2B8.dir_port 80
79E169B25E4C7CE99584F6ED06F379478F23E2B8.nickname MilesPrower
79E169B25E4C7CE99584F6ED06F379478F23E2B8.has_extrainfo false
7A32C9519D80CA458FC8B034A28F5F6815649A98.address 82.223.21.74
7A32C9519D80CA458FC8B034A28F5F6815649A98.or_port 9001
7A32C9519D80CA458FC8B034A28F5F6815649A98.dir_port 9030
7A32C9519D80CA458FC8B034A28F5F6815649A98.nickname silentrocket
7A32C9519D80CA458FC8B034A28F5F6815649A98.has_extrainfo false
7A32C9519D80CA458FC8B034A28F5F6815649A98.orport6_address 2001:470:53e0::cafe
7A32C9519D80CA458FC8B034A28F5F6815649A98.orport6_port 9050
7BB70F8585DFC27E75D692970C0EEB0F22983A63.address 51.254.136.195
7BB70F8585DFC27E75D692970C0EEB0F22983A63.or_port 443
7BB70F8585DFC27E75D692970C0EEB0F22983A63.dir_port 80
7BB70F8585DFC27E75D692970C0EEB0F22983A63.nickname torproxy02
7BB70F8585DFC27E75D692970C0EEB0F22983A63.has_extrainfo false
7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.address 77.247.181.162
7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.or_port 443
7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.dir_port 80
7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.nickname sofia
7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.has_extrainfo false
7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E.address 185.100.84.82
7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E.or_port 443
7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E.dir_port 80
7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E.nickname saveyourprivacyexit
7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E.has_extrainfo false
7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3.address 199.249.223.69
7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3.or_port 443
7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3.dir_port 80
7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3.nickname Quintex20
7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3.has_extrainfo false
80AAF8D5956A43C197104CEF2550CD42D165C6FB.address 193.11.114.45
80AAF8D5956A43C197104CEF2550CD42D165C6FB.or_port 9002
80AAF8D5956A43C197104CEF2550CD42D165C6FB.dir_port 9031
80AAF8D5956A43C197104CEF2550CD42D165C6FB.nickname mdfnet2
80AAF8D5956A43C197104CEF2550CD42D165C6FB.has_extrainfo false
8456DFA94161CDD99E480C2A2992C366C6564410.address 62.210.254.132
8456DFA94161CDD99E480C2A2992C366C6564410.or_port 443
8456DFA94161CDD99E480C2A2992C366C6564410.dir_port 80
8456DFA94161CDD99E480C2A2992C366C6564410.nickname turingmachine
8456DFA94161CDD99E480C2A2992C366C6564410.has_extrainfo false
855BC2DABE24C861CD887DB9B2E950424B49FC34.address 85.230.184.93
855BC2DABE24C861CD887DB9B2E950424B49FC34.or_port 443
855BC2DABE24C861CD887DB9B2E950424B49FC34.dir_port 9030
855BC2DABE24C861CD887DB9B2E950424B49FC34.nickname Logforme
855BC2DABE24C861CD887DB9B2E950424B49FC34.has_extrainfo false
8567AD0A6369ED08527A8A8533A5162AC00F7678.address 72.52.75.27
8567AD0A6369ED08527A8A8533A5162AC00F7678.or_port 9001
8567AD0A6369ED08527A8A8533A5162AC00F7678.dir_port 9030
8567AD0A6369ED08527A8A8533A5162AC00F7678.nickname piecoopdotnet
8567AD0A6369ED08527A8A8533A5162AC00F7678.has_extrainfo false
86C281AD135058238D7A337D546C902BE8505DDE.address 185.96.88.29
86C281AD135058238D7A337D546C902BE8505DDE.or_port 443
86C281AD135058238D7A337D546C902BE8505DDE.dir_port 80
86C281AD135058238D7A337D546C902BE8505DDE.nickname TykRelay05
86C281AD135058238D7A337D546C902BE8505DDE.has_extrainfo false
88487BDD980BF6E72092EE690E8C51C0AA4A538C.address 176.10.104.243
88487BDD980BF6E72092EE690E8C51C0AA4A538C.or_port 443
88487BDD980BF6E72092EE690E8C51C0AA4A538C.dir_port 80
88487BDD980BF6E72092EE690E8C51C0AA4A538C.nickname DigiGesTor2e1
88487BDD980BF6E72092EE690E8C51C0AA4A538C.has_extrainfo false
8C00FA7369A7A308F6A137600F0FA07990D9D451.address 163.172.194.53
8C00FA7369A7A308F6A137600F0FA07990D9D451.or_port 9001
8C00FA7369A7A308F6A137600F0FA07990D9D451.dir_port 9030
8C00FA7369A7A308F6A137600F0FA07990D9D451.nickname GrmmlLitavis
8C00FA7369A7A308F6A137600F0FA07990D9D451.has_extrainfo false
8C00FA7369A7A308F6A137600F0FA07990D9D451.orport6_address 2001:bc8:225f:142:6c69:7461:7669:73
8C00FA7369A7A308F6A137600F0FA07990D9D451.orport6_port 9001
8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.address 5.189.169.190
8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.or_port 8080
8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.dir_port 8030
8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.nickname thanatosDE
8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.has_extrainfo false
9007C1D8E4F03D506A4A011B907A9E8D04E3C605.address 151.80.42.103
9007C1D8E4F03D506A4A011B907A9E8D04E3C605.or_port 9001
9007C1D8E4F03D506A4A011B907A9E8D04E3C605.dir_port 9030
9007C1D8E4F03D506A4A011B907A9E8D04E3C605.nickname matlink
9007C1D8E4F03D506A4A011B907A9E8D04E3C605.has_extrainfo false
9007C1D8E4F03D506A4A011B907A9E8D04E3C605.orport6_address 2001:41d0:e:f67::114
9007C1D8E4F03D506A4A011B907A9E8D04E3C605.orport6_port 9001
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.address 37.187.20.59
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.or_port 443
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.dir_port 80
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.nickname torpidsFRovh
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.has_extrainfo false
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.orport6_address 2001:41d0:a:143b::1
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.orport6_port 993
9285B22F7953D7874604EEE2B470609AD81C74E9.address 62.138.7.171
9285B22F7953D7874604EEE2B470609AD81C74E9.or_port 8001
9285B22F7953D7874604EEE2B470609AD81C74E9.dir_port 8030
9285B22F7953D7874604EEE2B470609AD81C74E9.nickname 0x3d005
9285B22F7953D7874604EEE2B470609AD81C74E9.has_extrainfo false
92CFD9565B24646CAC2D172D3DB503D69E777B8A.address 178.16.208.57
92CFD9565B24646CAC2D172D3DB503D69E777B8A.or_port 443
92CFD9565B24646CAC2D172D3DB503D69E777B8A.dir_port 80
92CFD9565B24646CAC2D172D3DB503D69E777B8A.nickname bakunin
92CFD9565B24646CAC2D172D3DB503D69E777B8A.has_extrainfo false
92CFD9565B24646CAC2D172D3DB503D69E777B8A.orport6_address 2a00:1c20:4089:1234:7825:2c5d:1ecd:c66f
92CFD9565B24646CAC2D172D3DB503D69E777B8A.orport6_port 443
92ECC9E0E2AF81BB954719B189AC362E254AD4A5.address 91.219.237.244
92ECC9E0E2AF81BB954719B189AC362E254AD4A5.or_port 443
92ECC9E0E2AF81BB954719B189AC362E254AD4A5.dir_port 80
92ECC9E0E2AF81BB954719B189AC362E254AD4A5.nickname lewwerDuarUesSlaav
92ECC9E0E2AF81BB954719B189AC362E254AD4A5.has_extrainfo false
9772EFB535397C942C3AB8804FB35CFFAD012438.address 37.153.1.10
9772EFB535397C942C3AB8804FB35CFFAD012438.or_port 9001
9772EFB535397C942C3AB8804FB35CFFAD012438.dir_port 9030
9772EFB535397C942C3AB8804FB35CFFAD012438.nickname smallsweatnode
9772EFB535397C942C3AB8804FB35CFFAD012438.has_extrainfo false
998BF3ED7F70E33D1C307247B9626D9E7573C438.address 163.172.223.200
998BF3ED7F70E33D1C307247B9626D9E7573C438.or_port 443
998BF3ED7F70E33D1C307247B9626D9E7573C438.dir_port 80
998BF3ED7F70E33D1C307247B9626D9E7573C438.nickname Outfall2
998BF3ED7F70E33D1C307247B9626D9E7573C438.has_extrainfo false
9A0D54D3A6D2E0767596BF1515E6162A75B3293F.address 91.229.20.27
9A0D54D3A6D2E0767596BF1515E6162A75B3293F.or_port 9001
9A0D54D3A6D2E0767596BF1515E6162A75B3293F.dir_port 9030
9A0D54D3A6D2E0767596BF1515E6162A75B3293F.nickname gordonkeybag
9A0D54D3A6D2E0767596BF1515E6162A75B3293F.has_extrainfo false
9A68B85A02318F4E7E87F2828039FBD5D75B0142.address 66.111.2.20
9A68B85A02318F4E7E87F2828039FBD5D75B0142.or_port 9001
9A68B85A02318F4E7E87F2828039FBD5D75B0142.dir_port 9030
9A68B85A02318F4E7E87F2828039FBD5D75B0142.nickname NYCBUG0
9A68B85A02318F4E7E87F2828039FBD5D75B0142.has_extrainfo false
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.address 185.100.86.128
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.or_port 9001
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.dir_port 9030
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.nickname TorExitFinland
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.has_extrainfo false
9EC5E097663862DF861A18C32B37C5F82284B27D.address 146.185.177.103
9EC5E097663862DF861A18C32B37C5F82284B27D.or_port 9030
9EC5E097663862DF861A18C32B37C5F82284B27D.dir_port 80
9EC5E097663862DF861A18C32B37C5F82284B27D.nickname Winter
9EC5E097663862DF861A18C32B37C5F82284B27D.has_extrainfo false
9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A.address 199.249.223.64
9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A.or_port 443
9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A.dir_port 80
9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A.nickname Quintex15
9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A.has_extrainfo false
9F7D6E6420183C2B76D3CE99624EBC98A21A967E.address 46.28.110.244
9F7D6E6420183C2B76D3CE99624EBC98A21A967E.or_port 443
9F7D6E6420183C2B76D3CE99624EBC98A21A967E.dir_port 80
9F7D6E6420183C2B76D3CE99624EBC98A21A967E.nickname Nivrim
9F7D6E6420183C2B76D3CE99624EBC98A21A967E.has_extrainfo false
9FBEB75E8BC142565F12CBBE078D63310236A334.address 91.121.84.137
9FBEB75E8BC142565F12CBBE078D63310236A334.or_port 4052
9FBEB75E8BC142565F12CBBE078D63310236A334.dir_port 4952
9FBEB75E8BC142565F12CBBE078D63310236A334.nickname lindon
9FBEB75E8BC142565F12CBBE078D63310236A334.has_extrainfo false
A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.address 46.165.230.5
A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.or_port 443
A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.dir_port 80
A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.nickname Dhalgren
A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.has_extrainfo true
A10C4F666D27364036B562823E5830BC448E046A.address 171.25.193.77
A10C4F666D27364036B562823E5830BC448E046A.or_port 443
A10C4F666D27364036B562823E5830BC448E046A.dir_port 80
A10C4F666D27364036B562823E5830BC448E046A.nickname DFRI1
A10C4F666D27364036B562823E5830BC448E046A.has_extrainfo false
A10C4F666D27364036B562823E5830BC448E046A.orport6_address 2001:67c:289c:3::77
A10C4F666D27364036B562823E5830BC448E046A.orport6_port 443
A2E6BB5C391CD46B38C55B4329C35304540771F1.address 81.7.3.67
A2E6BB5C391CD46B38C55B4329C35304540771F1.or_port 443
A2E6BB5C391CD46B38C55B4329C35304540771F1.dir_port 993
A2E6BB5C391CD46B38C55B4329C35304540771F1.nickname BeastieJoy62
A2E6BB5C391CD46B38C55B4329C35304540771F1.has_extrainfo false
A478E421F83194C114F41E94F95999672AED51FE.address 171.25.193.78
A478E421F83194C114F41E94F95999672AED51FE.or_port 443
A478E421F83194C114F41E94F95999672AED51FE.dir_port 80
A478E421F83194C114F41E94F95999672AED51FE.nickname DFRI4
A478E421F83194C114F41E94F95999672AED51FE.has_extrainfo false
A478E421F83194C114F41E94F95999672AED51FE.orport6_address 2001:67c:289c:3::78
A478E421F83194C114F41E94F95999672AED51FE.orport6_port 443
A4C98CEA3F34E05299417E9F885A642C88EF6029.address 178.16.208.58
A4C98CEA3F34E05299417E9F885A642C88EF6029.or_port 443
A4C98CEA3F34E05299417E9F885A642C88EF6029.dir_port 80
A4C98CEA3F34E05299417E9F885A642C88EF6029.nickname jaures2
A4C98CEA3F34E05299417E9F885A642C88EF6029.has_extrainfo false
A4C98CEA3F34E05299417E9F885A642C88EF6029.orport6_address 2a00:1c20:4089:1234:cdae:1b3e:cc38:3d45
A4C98CEA3F34E05299417E9F885A642C88EF6029.orport6_port 443
A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.address 163.172.149.122
A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.or_port 443
A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.dir_port 80
A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.nickname niij03
A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.has_extrainfo false
AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C.address 195.154.164.243
AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C.or_port 443
AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C.dir_port 80
AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C.nickname torpidsFRonline3
AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C.has_extrainfo false
ACD889D86E02EDDAB1AFD81F598C0936238DC6D0.address 86.59.119.88
ACD889D86E02EDDAB1AFD81F598C0936238DC6D0.or_port 443
ACD889D86E02EDDAB1AFD81F598C0936238DC6D0.dir_port 80
ACD889D86E02EDDAB1AFD81F598C0936238DC6D0.nickname ph3x
ACD889D86E02EDDAB1AFD81F598C0936238DC6D0.has_extrainfo false
ACDD9E85A05B127BA010466C13C8C47212E8A38F.address 185.129.62.62
ACDD9E85A05B127BA010466C13C8C47212E8A38F.or_port 9001
ACDD9E85A05B127BA010466C13C8C47212E8A38F.dir_port 9030
ACDD9E85A05B127BA010466C13C8C47212E8A38F.nickname kramse
ACDD9E85A05B127BA010466C13C8C47212E8A38F.has_extrainfo false
ACDD9E85A05B127BA010466C13C8C47212E8A38F.orport6_address 2a06:d380:0:3700::62
ACDD9E85A05B127BA010466C13C8C47212E8A38F.orport6_port 9001
AD19490C7DBB26D3A68EFC824F67E69B0A96E601.address 188.40.128.246
AD19490C7DBB26D3A68EFC824F67E69B0A96E601.or_port 9001
AD19490C7DBB26D3A68EFC824F67E69B0A96E601.dir_port 9030
AD19490C7DBB26D3A68EFC824F67E69B0A96E601.nickname sputnik
AD19490C7DBB26D3A68EFC824F67E69B0A96E601.has_extrainfo false
AD19490C7DBB26D3A68EFC824F67E69B0A96E601.orport6_address 2a01:4f8:221:1ac1:dead:beef:7005:9001
AD19490C7DBB26D3A68EFC824F67E69B0A96E601.orport6_port 9001
B0279A521375F3CB2AE210BDBFC645FDD2E1973A.address 176.126.252.11
B0279A521375F3CB2AE210BDBFC645FDD2E1973A.or_port 9001
B0279A521375F3CB2AE210BDBFC645FDD2E1973A.dir_port 443
B0279A521375F3CB2AE210BDBFC645FDD2E1973A.nickname chulak
B0279A521375F3CB2AE210BDBFC645FDD2E1973A.has_extrainfo true
B0279A521375F3CB2AE210BDBFC645FDD2E1973A.orport6_address 2a02:59e0:0:7::11
B0279A521375F3CB2AE210BDBFC645FDD2E1973A.orport6_port 9003
B0553175AADB0501E5A61FC61CEA3970BE130FF2.address 5.9.147.226
B0553175AADB0501E5A61FC61CEA3970BE130FF2.or_port 9001
B0553175AADB0501E5A61FC61CEA3970BE130FF2.dir_port 9030
B0553175AADB0501E5A61FC61CEA3970BE130FF2.nickname zwiubel
B0553175AADB0501E5A61FC61CEA3970BE130FF2.has_extrainfo false
B0553175AADB0501E5A61FC61CEA3970BE130FF2.orport6_address 2a01:4f8:190:30e1::2
B0553175AADB0501E5A61FC61CEA3970BE130FF2.orport6_port 9001
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.address 178.17.174.14
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.or_port 9001
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.dir_port 9030
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.nickname TorExitMoldova
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.has_extrainfo false
B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249.address 199.249.223.40
B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249.or_port 443
B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249.dir_port 80
B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249.nickname Quintex31
B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249.has_extrainfo false
B143D439B72D239A419F8DCE07B8A8EB1B486FA7.address 212.129.62.232
B143D439B72D239A419F8DCE07B8A8EB1B486FA7.or_port 443
B143D439B72D239A419F8DCE07B8A8EB1B486FA7.dir_port 80
B143D439B72D239A419F8DCE07B8A8EB1B486FA7.nickname wardsback
B143D439B72D239A419F8DCE07B8A8EB1B486FA7.has_extrainfo false
B291D30517D23299AD7CEE3E60DFE60D0E3A4664.address 136.243.214.137
B291D30517D23299AD7CEE3E60DFE60D0E3A4664.or_port 443
B291D30517D23299AD7CEE3E60DFE60D0E3A4664.dir_port 80
B291D30517D23299AD7CEE3E60DFE60D0E3A4664.nickname TorKIT
B291D30517D23299AD7CEE3E60DFE60D0E3A4664.has_extrainfo false
B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.address 212.47.233.86
B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.or_port 9001
B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.dir_port 9030
B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.nickname netimanmu
B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.has_extrainfo false
B5212DB685A2A0FCFBAE425738E478D12361710D.address 93.115.97.242
B5212DB685A2A0FCFBAE425738E478D12361710D.or_port 9001
B5212DB685A2A0FCFBAE425738E478D12361710D.dir_port 9030
B5212DB685A2A0FCFBAE425738E478D12361710D.nickname firstor
B5212DB685A2A0FCFBAE425738E478D12361710D.has_extrainfo false
B6904ADD4C0D10CDA7179E051962350A69A63243.address 81.2.209.10
B6904ADD4C0D10CDA7179E051962350A69A63243.or_port 80
B6904ADD4C0D10CDA7179E051962350A69A63243.dir_port 443
B6904ADD4C0D10CDA7179E051962350A69A63243.nickname torzabehlice
B6904ADD4C0D10CDA7179E051962350A69A63243.has_extrainfo false
B6904ADD4C0D10CDA7179E051962350A69A63243.orport6_address 2001:15e8:201:1::d10a
B6904ADD4C0D10CDA7179E051962350A69A63243.orport6_port 80
B83DC1558F0D34353BB992EF93AFEAFDB226A73E.address 193.11.114.46
B83DC1558F0D34353BB992EF93AFEAFDB226A73E.or_port 9003
B83DC1558F0D34353BB992EF93AFEAFDB226A73E.dir_port 9032
B83DC1558F0D34353BB992EF93AFEAFDB226A73E.nickname mdfnet3
B83DC1558F0D34353BB992EF93AFEAFDB226A73E.has_extrainfo false
B86137AE9681701901C6720E55C16805B46BD8E3.address 81.7.11.186
B86137AE9681701901C6720E55C16805B46BD8E3.or_port 443
B86137AE9681701901C6720E55C16805B46BD8E3.dir_port 1080
B86137AE9681701901C6720E55C16805B46BD8E3.nickname BeastieJoy60
B86137AE9681701901C6720E55C16805B46BD8E3.has_extrainfo false
BC630CBBB518BE7E9F4E09712AB0269E9DC7D626.address 197.231.221.211
BC630CBBB518BE7E9F4E09712AB0269E9DC7D626.or_port 443
BC630CBBB518BE7E9F4E09712AB0269E9DC7D626.dir_port 9030
BC630CBBB518BE7E9F4E09712AB0269E9DC7D626.nickname IPredator
BC630CBBB518BE7E9F4E09712AB0269E9DC7D626.has_extrainfo false
BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.address 198.96.155.3
BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.or_port 5001
BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.dir_port 8080
BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.nickname gurgle
BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.has_extrainfo false
BCEF908195805E03E92CCFE669C48738E556B9C5.address 128.199.55.207
BCEF908195805E03E92CCFE669C48738E556B9C5.or_port 9001
BCEF908195805E03E92CCFE669C48738E556B9C5.dir_port 9030
BCEF908195805E03E92CCFE669C48738E556B9C5.nickname EldritchReaper
BCEF908195805E03E92CCFE669C48738E556B9C5.has_extrainfo false
BCEF908195805E03E92CCFE669C48738E556B9C5.orport6_address 2a03:b0c0:2:d0::158:3001
BCEF908195805E03E92CCFE669C48738E556B9C5.orport6_port 9001
BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.address 213.141.138.174
BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.or_port 9001
BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.dir_port 9030
BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.nickname Schakalium
BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.has_extrainfo false
BF735F669481EE1CCC348F0731551C933D1E2278.address 104.192.5.248
BF735F669481EE1CCC348F0731551C933D1E2278.or_port 9001
BF735F669481EE1CCC348F0731551C933D1E2278.dir_port 9030
BF735F669481EE1CCC348F0731551C933D1E2278.nickname Freeway11
BF735F669481EE1CCC348F0731551C933D1E2278.has_extrainfo false
C2AAB088555850FC434E68943F551072042B85F1.address 31.185.104.21
C2AAB088555850FC434E68943F551072042B85F1.or_port 443
C2AAB088555850FC434E68943F551072042B85F1.dir_port 80
C2AAB088555850FC434E68943F551072042B85F1.nickname Digitalcourage3ip3
C2AAB088555850FC434E68943F551072042B85F1.has_extrainfo false
C37BC191AC389179674578C3E6944E925FE186C2.address 213.239.217.18
C37BC191AC389179674578C3E6944E925FE186C2.or_port 1337
C37BC191AC389179674578C3E6944E925FE186C2.dir_port 1338
C37BC191AC389179674578C3E6944E925FE186C2.nickname xzdsb
C37BC191AC389179674578C3E6944E925FE186C2.has_extrainfo false
C37BC191AC389179674578C3E6944E925FE186C2.orport6_address 2a01:4f8:a0:746a:101:1:1:1
C37BC191AC389179674578C3E6944E925FE186C2.orport6_port 1337
C414F28FD2BEC1553024299B31D4E726BEB8E788.address 188.138.112.60
C414F28FD2BEC1553024299B31D4E726BEB8E788.or_port 1521
C414F28FD2BEC1553024299B31D4E726BEB8E788.dir_port 1433
C414F28FD2BEC1553024299B31D4E726BEB8E788.nickname zebra620
C414F28FD2BEC1553024299B31D4E726BEB8E788.has_extrainfo false
C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2.address 199.249.223.66
C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2.or_port 443
C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2.dir_port 80
C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2.nickname Quintex17
C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2.has_extrainfo false
CE47F0356D86CF0A1A2008D97623216D560FB0A8.address 85.25.213.211
CE47F0356D86CF0A1A2008D97623216D560FB0A8.or_port 80
CE47F0356D86CF0A1A2008D97623216D560FB0A8.dir_port 465
CE47F0356D86CF0A1A2008D97623216D560FB0A8.nickname BeastieJoy61
CE47F0356D86CF0A1A2008D97623216D560FB0A8.has_extrainfo false
CED527EAC230E7B56E5B363F839671829C3BA01B.address 51.15.13.245
CED527EAC230E7B56E5B363F839671829C3BA01B.or_port 9001
CED527EAC230E7B56E5B363F839671829C3BA01B.dir_port 9030
CED527EAC230E7B56E5B363F839671829C3BA01B.nickname 0x3d006
CED527EAC230E7B56E5B363F839671829C3BA01B.has_extrainfo false
D30E9D4D639068611D6D96861C95C2099140B805.address 46.38.237.221
D30E9D4D639068611D6D96861C95C2099140B805.or_port 9001
D30E9D4D639068611D6D96861C95C2099140B805.dir_port 9030
D30E9D4D639068611D6D96861C95C2099140B805.nickname mine
D30E9D4D639068611D6D96861C95C2099140B805.has_extrainfo false
D3E5EDDBE5159388704D6785BE51930AAFACEC6F.address 31.171.155.108
D3E5EDDBE5159388704D6785BE51930AAFACEC6F.or_port 9001
D3E5EDDBE5159388704D6785BE51930AAFACEC6F.dir_port 9030
D3E5EDDBE5159388704D6785BE51930AAFACEC6F.nickname TorNodeAlbania
D3E5EDDBE5159388704D6785BE51930AAFACEC6F.has_extrainfo false
D64366987CB39F61AD21DBCF8142FA0577B92811.address 37.221.162.226
D64366987CB39F61AD21DBCF8142FA0577B92811.or_port 9001
D64366987CB39F61AD21DBCF8142FA0577B92811.dir_port 9030
D64366987CB39F61AD21DBCF8142FA0577B92811.nickname kasperskytor01
D64366987CB39F61AD21DBCF8142FA0577B92811.has_extrainfo false
D760C5B436E42F93D77EF2D969157EEA14F9B39C.address 46.101.169.151
D760C5B436E42F93D77EF2D969157EEA14F9B39C.or_port 9001
D760C5B436E42F93D77EF2D969157EEA14F9B39C.dir_port 9030
D760C5B436E42F93D77EF2D969157EEA14F9B39C.nickname DanWin1210
D760C5B436E42F93D77EF2D969157EEA14F9B39C.has_extrainfo false
D760C5B436E42F93D77EF2D969157EEA14F9B39C.orport6_address 2a03:b0c0:3:d0::74f:a001
D760C5B436E42F93D77EF2D969157EEA14F9B39C.orport6_port 9001
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.address 85.10.201.47
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.or_port 9001
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.dir_port 9030
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.nickname sif
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.has_extrainfo false
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.orport6_address 2a01:4f8:a0:43eb::beef
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.orport6_port 9001
DAA39FC00B196B353C2A271459C305C429AF09E4.address 193.35.52.53
DAA39FC00B196B353C2A271459C305C429AF09E4.or_port 9001
DAA39FC00B196B353C2A271459C305C429AF09E4.dir_port 9030
DAA39FC00B196B353C2A271459C305C429AF09E4.nickname Arne
DAA39FC00B196B353C2A271459C305C429AF09E4.has_extrainfo false
DD823AFB415380A802DCAEB9461AE637604107FB.address 178.33.183.251
DD823AFB415380A802DCAEB9461AE637604107FB.or_port 443
DD823AFB415380A802DCAEB9461AE637604107FB.dir_port 80
DD823AFB415380A802DCAEB9461AE637604107FB.nickname grenouille
DD823AFB415380A802DCAEB9461AE637604107FB.has_extrainfo false
DD823AFB415380A802DCAEB9461AE637604107FB.orport6_address 2001:41d0:2:a683::251
DD823AFB415380A802DCAEB9461AE637604107FB.orport6_port 443
DD8BD7307017407FCC36F8D04A688F74A0774C02.address 171.25.193.20
DD8BD7307017407FCC36F8D04A688F74A0774C02.or_port 443
DD8BD7307017407FCC36F8D04A688F74A0774C02.dir_port 80
DD8BD7307017407FCC36F8D04A688F74A0774C02.nickname DFRI0
DD8BD7307017407FCC36F8D04A688F74A0774C02.has_extrainfo false
DD8BD7307017407FCC36F8D04A688F74A0774C02.orport6_address 2001:67c:289c::20
DD8BD7307017407FCC36F8D04A688F74A0774C02.orport6_port 443
DED6892FF89DBD737BA689698A171B2392EB3E82.address 92.222.38.67
DED6892FF89DBD737BA689698A171B2392EB3E82.or_port 443
DED6892FF89DBD737BA689698A171B2392EB3E82.dir_port 80
DED6892FF89DBD737BA689698A171B2392EB3E82.nickname ThorExit
DED6892FF89DBD737BA689698A171B2392EB3E82.has_extrainfo false
E3DB2E354B883B59E8DC56B3E7A353DDFD457812.address 166.70.207.2
E3DB2E354B883B59E8DC56B3E7A353DDFD457812.or_port 9001
E3DB2E354B883B59E8DC56B3E7A353DDFD457812.dir_port 9030
E3DB2E354B883B59E8DC56B3E7A353DDFD457812.nickname xmission
E3DB2E354B883B59E8DC56B3E7A353DDFD457812.has_extrainfo false
E480D577F58E782A5BC4FA6F49A6650E9389302F.address 199.249.223.43
E480D577F58E782A5BC4FA6F49A6650E9389302F.or_port 443
E480D577F58E782A5BC4FA6F49A6650E9389302F.dir_port 80
E480D577F58E782A5BC4FA6F49A6650E9389302F.nickname Quintex34
E480D577F58E782A5BC4FA6F49A6650E9389302F.has_extrainfo false
E589316576A399C511A9781A73DA4545640B479D.address 46.252.26.2
E589316576A399C511A9781A73DA4545640B479D.or_port 49991
E589316576A399C511A9781A73DA4545640B479D.dir_port 45212
E589316576A399C511A9781A73DA4545640B479D.nickname marlen
E589316576A399C511A9781A73DA4545640B479D.has_extrainfo false
E781F4EC69671B3F1864AE2753E0890351506329.address 176.31.180.157
E781F4EC69671B3F1864AE2753E0890351506329.or_port 22
E781F4EC69671B3F1864AE2753E0890351506329.dir_port 143
E781F4EC69671B3F1864AE2753E0890351506329.nickname armbrust
E781F4EC69671B3F1864AE2753E0890351506329.has_extrainfo false
E781F4EC69671B3F1864AE2753E0890351506329.orport6_address 2001:41d0:8:eb9d::1
E781F4EC69671B3F1864AE2753E0890351506329.orport6_port 22
E81EF60A73B3809F8964F73766B01BAA0A171E20.address 212.47.244.38
E81EF60A73B3809F8964F73766B01BAA0A171E20.or_port 443
E81EF60A73B3809F8964F73766B01BAA0A171E20.dir_port 8080
E81EF60A73B3809F8964F73766B01BAA0A171E20.nickname Chimborazo
E81EF60A73B3809F8964F73766B01BAA0A171E20.has_extrainfo false
EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.address 217.182.75.181
EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.or_port 9001
EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.dir_port 9030
EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.nickname Aerodynamik02
EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.has_extrainfo false
F4263275CF54A6836EE7BD527B1328836A6F06E1.address 37.187.102.108
F4263275CF54A6836EE7BD527B1328836A6F06E1.or_port 443
F4263275CF54A6836EE7BD527B1328836A6F06E1.dir_port 80
F4263275CF54A6836EE7BD527B1328836A6F06E1.nickname EvilMoe
F4263275CF54A6836EE7BD527B1328836A6F06E1.has_extrainfo false
F4263275CF54A6836EE7BD527B1328836A6F06E1.orport6_address 2001:41d0:a:266c::1
F4263275CF54A6836EE7BD527B1328836A6F06E1.orport6_port 443
F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.address 46.28.109.231
F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.or_port 9001
F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.dir_port 9030
F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.nickname wedostor
F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.has_extrainfo false
F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.orport6_address 2a02:2b88:2:1::4205:1
F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.orport6_port 9001
F93D8F37E35C390BCAD9F9069E13085B745EC216.address 185.96.180.29
F93D8F37E35C390BCAD9F9069E13085B745EC216.or_port 443
F93D8F37E35C390BCAD9F9069E13085B745EC216.dir_port 80
F93D8F37E35C390BCAD9F9069E13085B745EC216.nickname TykRelay06
F93D8F37E35C390BCAD9F9069E13085B745EC216.has_extrainfo false
FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B.address 86.59.119.83
FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B.or_port 443
FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B.dir_port 80
FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B.nickname ph3x
FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B.has_extrainfo false
FE296180018833AF03A8EACD5894A614623D3F76.address 149.56.45.200
FE296180018833AF03A8EACD5894A614623D3F76.or_port 9001
FE296180018833AF03A8EACD5894A614623D3F76.dir_port 9030
FE296180018833AF03A8EACD5894A614623D3F76.nickname PiotrTorpotkinOne
FE296180018833AF03A8EACD5894A614623D3F76.has_extrainfo false
FE296180018833AF03A8EACD5894A614623D3F76.orport6_address 2607:5300:201:3000::17d3
FE296180018833AF03A8EACD5894A614623D3F76.orport6_port 9002
stem-1.7.1/stem/connection.py 0000664 0001750 0001750 00000134756 13341034346 016714 0 ustar atagar atagar 0000000 0000000 # Copyright 2011-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Functions for connecting and authenticating to the tor process.
The :func:`~stem.connection.connect` function give an easy, one line
method for getting an authenticated control connection. This is handy for CLI
applications and the python interactive interpreter, but does several things
that makes it undesirable for applications (uses stdin/stdout, suppresses
exceptions, etc).
::
import sys
from stem.connection import connect
if __name__ == '__main__':
controller = connect()
if not controller:
sys.exit(1) # unable to get a connection
print 'Tor is running version %s' % controller.get_version()
controller.close()
::
% python example.py
Tor is running version 0.2.4.10-alpha-dev (git-8be6058d8f31e578)
... or if Tor isn't running...
::
% python example.py
[Errno 111] Connection refused
The :func:`~stem.connection.authenticate` function, however, gives easy but
fine-grained control over the authentication process. For instance...
::
import sys
import getpass
import stem.connection
import stem.socket
try:
control_socket = stem.socket.ControlPort(port = 9051)
except stem.SocketError as exc:
print 'Unable to connect to port 9051 (%s)' % exc
sys.exit(1)
try:
stem.connection.authenticate(control_socket)
except stem.connection.IncorrectSocketType:
print 'Please check in your torrc that 9051 is the ControlPort.'
print 'Maybe you configured it to be the ORPort or SocksPort instead?'
sys.exit(1)
except stem.connection.MissingPassword:
controller_password = getpass.getpass('Controller password: ')
try:
stem.connection.authenticate_password(control_socket, controller_password)
except stem.connection.PasswordAuthFailed:
print 'Unable to authenticate, password is incorrect'
sys.exit(1)
except stem.connection.AuthenticationFailure as exc:
print 'Unable to authenticate: %s' % exc
sys.exit(1)
**Module Overview:**
::
connect - Simple method for getting authenticated control connection
authenticate - Main method for authenticating to a control socket
authenticate_none - Authenticates to an open control socket
authenticate_password - Authenticates to a socket supporting password auth
authenticate_cookie - Authenticates to a socket supporting cookie auth
authenticate_safecookie - Authenticates to a socket supporting safecookie auth
get_protocolinfo - Issues a PROTOCOLINFO query
AuthenticationFailure - Base exception raised for authentication failures
|- UnrecognizedAuthMethods - Authentication methods are unsupported
|- IncorrectSocketType - Socket does not speak the tor control protocol
|
|- OpenAuthFailed - Failure when authenticating by an open socket
| +- OpenAuthRejected - Tor rejected this method of authentication
|
|- PasswordAuthFailed - Failure when authenticating by a password
| |- PasswordAuthRejected - Tor rejected this method of authentication
| |- IncorrectPassword - Password was rejected
| +- MissingPassword - Socket supports password auth but wasn't attempted
|
|- CookieAuthFailed - Failure when authenticating by a cookie
| |- CookieAuthRejected - Tor rejected this method of authentication
| |- IncorrectCookieValue - Authentication cookie was rejected
| |- IncorrectCookieSize - Size of the cookie file is incorrect
| |- UnreadableCookieFile - Unable to read the contents of the auth cookie
| +- AuthChallengeFailed - Failure completing the authchallenge request
| |- AuthChallengeUnsupported - Tor doesn't recognize the AUTHCHALLENGE command
| |- AuthSecurityFailure - Server provided the wrong nonce credentials
| |- InvalidClientNonce - The client nonce is invalid
| +- UnrecognizedAuthChallengeMethod - AUTHCHALLENGE does not support the given methods.
|
+- MissingAuthInfo - Unexpected PROTOCOLINFO response, missing auth info
|- NoAuthMethods - Missing any methods for authenticating
+- NoAuthCookie - Supports cookie auth but doesn't have its path
.. data:: AuthMethod (enum)
Enumeration of PROTOCOLINFO responses for supported authentication methods.
============== ===========
AuthMethod Description
============== ===========
**NONE** No authentication required.
**PASSWORD** Password required, see tor's HashedControlPassword option.
**COOKIE** Contents of the cookie file required, see tor's CookieAuthentication option.
**SAFECOOKIE** Need to reply to a hmac challenge using the contents of the cookie file.
**UNKNOWN** Tor provided one or more authentication methods that we don't recognize, probably something new.
============== ===========
"""
import binascii
import getpass
import os
import stem.control
import stem.response
import stem.socket
import stem.util.connection
import stem.util.enum
import stem.util.str_tools
import stem.util.system
import stem.version
from stem.util import log
AuthMethod = stem.util.enum.Enum('NONE', 'PASSWORD', 'COOKIE', 'SAFECOOKIE', 'UNKNOWN')
CLIENT_HASH_CONSTANT = b'Tor safe cookie authentication controller-to-server hash'
SERVER_HASH_CONSTANT = b'Tor safe cookie authentication server-to-controller hash'
MISSING_PASSWORD_BUG_MSG = """
BUG: You provided a password but despite this stem reported that it was
missing. This shouldn't happen - please let us know about it!
http://bugs.torproject.org
"""
UNRECOGNIZED_AUTH_TYPE_MSG = """
Tor is using a type of authentication we do not recognize...
{auth_methods}
Please check that stem is up to date and if there is an existing issue on
'http://bugs.torproject.org'. If there isn't one then let us know!
"""
UNREADABLE_COOKIE_FILE_MSG = """
We were unable to read tor's authentication cookie...
Path: {path}
Issue: {issue}
"""
WRONG_PORT_TYPE_MSG = """
Please check in your torrc that {port} is the ControlPort. Maybe you
configured it to be the ORPort or SocksPort instead?
"""
WRONG_SOCKET_TYPE_MSG = """
Unable to connect to tor. Are you sure the interface you specified belongs to
tor?
"""
CONNECT_MESSAGES = {
'general_auth_failure': 'Unable to authenticate: {error}',
'incorrect_password': 'Incorrect password',
'no_control_port': "Unable to connect to tor. Maybe it's running without a ControlPort?",
'password_prompt': 'Tor controller password:',
'needs_password': 'Tor requires a password to authenticate',
'socket_doesnt_exist': "The socket file you specified ({path}) doesn't exist",
'tor_isnt_running': "Unable to connect to tor. Are you sure it's running?",
'unable_to_use_port': 'Unable to connect to {address}:{port}: {error}',
'unable_to_use_socket': "Unable to connect to '{path}': {error}",
'missing_password_bug': MISSING_PASSWORD_BUG_MSG.strip(),
'uncrcognized_auth_type': UNRECOGNIZED_AUTH_TYPE_MSG.strip(),
'unreadable_cookie_file': UNREADABLE_COOKIE_FILE_MSG.strip(),
'wrong_port_type': WRONG_PORT_TYPE_MSG.strip(),
'wrong_socket_type': WRONG_SOCKET_TYPE_MSG.strip(),
}
COMMON_TOR_COMMANDS = (
'tor',
'tor.real', # TBB command ran
'/usr/local/bin/tor', # FreeBSD expands the whole path, this is the default location
)
def connect(control_port = ('127.0.0.1', 'default'), control_socket = '/var/run/tor/control', password = None, password_prompt = False, chroot_path = None, controller = stem.control.Controller):
"""
Convenience function for quickly getting a control connection. This is very
handy for debugging or CLI setup, handling setup and prompting for a password
if necessary (and none is provided). If any issues arise this prints a
description of the problem and returns **None**.
If both a **control_port** and **control_socket** are provided then the
**control_socket** is tried first, and this provides a generic error message
if they're both unavailable.
In much the same vein as git porcelain commands, users should not rely on
details of how this works. Messages and details of this function's behavior
could change in the future.
If the **port** is **'default'** then this checks on both 9051 (default for
relays) and 9151 (default for the Tor Browser). This default may change in
the future.
.. versionadded:: 1.2.0
.. versionchanged:: 1.5.0
Use both port 9051 and 9151 by default.
:param tuple contol_port: address and port tuple, for instance **('127.0.0.1', 9051)**
:param str path: path where the control socket is located
:param str password: passphrase to authenticate to the socket
:param bool password_prompt: prompt for the controller password if it wasn't
supplied
:param str chroot_path: path prefix if in a chroot environment
:param Class controller: :class:`~stem.control.BaseController` subclass to be
returned, this provides a :class:`~stem.socket.ControlSocket` if **None**
:returns: authenticated control connection, the type based on the controller argument
:raises: **ValueError** if given an invalid control_port, or both
**control_port** and **control_socket** are **None**
"""
if control_port is None and control_socket is None:
raise ValueError('Neither a control port nor control socket were provided. Nothing to connect to.')
elif control_port:
if len(control_port) != 2:
raise ValueError('The control_port argument for connect() should be an (address, port) tuple.')
elif not stem.util.connection.is_valid_ipv4_address(control_port[0]):
raise ValueError("'%s' isn't a vaid IPv4 address" % control_port[0])
elif control_port[1] != 'default' and not stem.util.connection.is_valid_port(control_port[1]):
raise ValueError("'%s' isn't a valid port" % control_port[1])
control_connection, error_msg = None, ''
if control_socket:
if os.path.exists(control_socket):
try:
control_connection = stem.socket.ControlSocketFile(control_socket)
except stem.SocketError as exc:
error_msg = CONNECT_MESSAGES['unable_to_use_socket'].format(path = control_socket, error = exc)
else:
error_msg = CONNECT_MESSAGES['socket_doesnt_exist'].format(path = control_socket)
if control_port and not control_connection:
address, port = control_port
try:
if port == 'default':
control_connection = _connection_for_default_port(address)
else:
control_connection = stem.socket.ControlPort(address, int(port))
except stem.SocketError as exc:
error_msg = CONNECT_MESSAGES['unable_to_use_port'].format(address = address, port = port, error = exc)
# If unable to connect to either a control socket or port then finally fail
# out. If we only attempted to connect to one of them then provide the error
# output from that. Otherwise we provide a more generic error message.
if not control_connection:
if control_socket and control_port:
is_tor_running = stem.util.system.is_running(COMMON_TOR_COMMANDS)
error_msg = CONNECT_MESSAGES['no_control_port'] if is_tor_running else CONNECT_MESSAGES['tor_isnt_running']
print(error_msg)
return None
return _connect_auth(control_connection, password, password_prompt, chroot_path, controller)
def connect_port(address = '127.0.0.1', port = 9051, password = None, chroot_path = None, controller = stem.control.Controller):
"""
Convenience function for quickly getting a control connection. This is very
handy for debugging or CLI setup, handling setup and prompting for a password
if necessary (and none is provided). If any issues arise this prints a
description of the problem and returns **None**.
.. deprecated:: 1.2.0
Use :func:`~stem.connection.connect` instead.
:param str address: ip address of the controller
:param int port: port number of the controller
:param str password: passphrase to authenticate to the socket
:param str chroot_path: path prefix if in a chroot environment
:param Class controller: :class:`~stem.control.BaseController` subclass to be
returned, this provides a :class:`~stem.socket.ControlSocket` if **None**
:returns: authenticated control connection, the type based on the controller argument
"""
try:
control_port = stem.socket.ControlPort(address, port)
except stem.SocketError as exc:
print(exc)
return None
return _connect_auth(control_port, password, True, chroot_path, controller)
def connect_socket_file(path = '/var/run/tor/control', password = None, chroot_path = None, controller = stem.control.Controller):
"""
Convenience function for quickly getting a control connection. For more
information see the :func:`~stem.connection.connect_port` function.
In much the same vein as git porcelain commands, users should not rely on
details of how this works. Messages or details of this function's behavior
might change in the future.
.. deprecated:: 1.2.0
Use :func:`~stem.connection.connect` instead.
:param str path: path where the control socket is located
:param str password: passphrase to authenticate to the socket
:param str chroot_path: path prefix if in a chroot environment
:param Class controller: :class:`~stem.control.BaseController` subclass to be
returned, this provides a :class:`~stem.socket.ControlSocket` if **None**
:returns: authenticated control connection, the type based on the controller argument
"""
try:
control_socket = stem.socket.ControlSocketFile(path)
except stem.SocketError as exc:
print(exc)
return None
return _connect_auth(control_socket, password, True, chroot_path, controller)
def _connect_auth(control_socket, password, password_prompt, chroot_path, controller):
"""
Helper for the connect_* functions that authenticates the socket and
constructs the controller.
:param stem.socket.ControlSocket control_socket: socket being authenticated to
:param str password: passphrase to authenticate to the socket
:param bool password_prompt: prompt for the controller password if it wasn't
supplied
:param str chroot_path: path prefix if in a chroot environment
:param Class controller: :class:`~stem.control.BaseController` subclass to be
returned, this provides a :class:`~stem.socket.ControlSocket` if **None**
:returns: authenticated control connection, the type based on the controller argument
"""
try:
authenticate(control_socket, password, chroot_path)
if controller is None:
return control_socket
else:
return controller(control_socket, is_authenticated = True)
except IncorrectSocketType:
if isinstance(control_socket, stem.socket.ControlPort):
print(CONNECT_MESSAGES['wrong_port_type'].format(port = control_socket.port))
else:
print(CONNECT_MESSAGES['wrong_socket_type'])
control_socket.close()
return None
except UnrecognizedAuthMethods as exc:
print(CONNECT_MESSAGES['uncrcognized_auth_type'].format(auth_methods = ', '.join(exc.unknown_auth_methods)))
control_socket.close()
return None
except IncorrectPassword:
print(CONNECT_MESSAGES['incorrect_password'])
control_socket.close()
return None
except MissingPassword:
if password is not None:
control_socket.close()
raise ValueError(CONNECT_MESSAGES['missing_password_bug'])
if password_prompt:
try:
password = getpass.getpass(CONNECT_MESSAGES['password_prompt'] + ' ')
except KeyboardInterrupt:
control_socket.close()
return None
return _connect_auth(control_socket, password, password_prompt, chroot_path, controller)
else:
print(CONNECT_MESSAGES['needs_password'])
control_socket.close()
return None
except UnreadableCookieFile as exc:
print(CONNECT_MESSAGES['unreadable_cookie_file'].format(path = exc.cookie_path, issue = str(exc)))
control_socket.close()
return None
except AuthenticationFailure as exc:
print(CONNECT_MESSAGES['general_auth_failure'].format(error = exc))
control_socket.close()
return None
def authenticate(controller, password = None, chroot_path = None, protocolinfo_response = None):
"""
Authenticates to a control socket using the information provided by a
PROTOCOLINFO response. In practice this will often be all we need to
authenticate, raising an exception if all attempts to authenticate fail.
All exceptions are subclasses of AuthenticationFailure so, in practice,
callers should catch the types of authentication failure that they care
about, then have a :class:`~stem.connection.AuthenticationFailure` catch-all
at the end.
This can authenticate to either a :class:`~stem.control.BaseController` or
:class:`~stem.socket.ControlSocket`.
:param controller: tor controller or socket to be authenticated
:param str password: passphrase to present to the socket if it uses password
authentication (skips password auth if **None**)
:param str chroot_path: path prefix if in a chroot environment
:param stem.response.protocolinfo.ProtocolInfoResponse protocolinfo_response:
tor protocolinfo response, this is retrieved on our own if **None**
:raises: If all attempts to authenticate fails then this will raise a
:class:`~stem.connection.AuthenticationFailure` subclass. Since this may
try multiple authentication methods it may encounter multiple exceptions.
If so then the exception this raises is prioritized as follows...
* :class:`stem.connection.IncorrectSocketType`
The controller does not speak the tor control protocol. Most often this
happened because the user confused the SocksPort or ORPort with the
ControlPort.
* :class:`stem.connection.UnrecognizedAuthMethods`
All of the authentication methods tor will accept are new and
unrecognized. Please upgrade stem and, if that doesn't work, file a
ticket on 'trac.torproject.org' and I'd be happy to add support.
* :class:`stem.connection.MissingPassword`
We were unable to authenticate but didn't attempt password authentication
because none was provided. You should prompt the user for a password and
try again via 'authenticate_password'.
* :class:`stem.connection.IncorrectPassword`
We were provided with a password but it was incorrect.
* :class:`stem.connection.IncorrectCookieSize`
Tor allows for authentication by reading it a cookie file, but that file
is the wrong size to be an authentication cookie.
* :class:`stem.connection.UnreadableCookieFile`
Tor allows for authentication by reading it a cookie file, but we can't
read that file (probably due to permissions).
* **\***:class:`stem.connection.IncorrectCookieValue`
Tor allows for authentication by reading it a cookie file, but rejected
the contents of that file.
* **\***:class:`stem.connection.AuthChallengeUnsupported`
Tor doesn't recognize the AUTHCHALLENGE command. This is probably a Tor
version prior to SAFECOOKIE being implement, but this exception shouldn't
arise because we won't attempt SAFECOOKIE auth unless Tor claims to
support it.
* **\***:class:`stem.connection.UnrecognizedAuthChallengeMethod`
Tor couldn't recognize the AUTHCHALLENGE method Stem sent to it. This
shouldn't happen at all.
* **\***:class:`stem.connection.InvalidClientNonce`
Tor says that the client nonce provided by Stem during the AUTHCHALLENGE
process is invalid.
* **\***:class:`stem.connection.AuthSecurityFailure`
Nonce value provided by the server was invalid.
* **\***:class:`stem.connection.OpenAuthRejected`
Tor says that it allows for authentication without any credentials, but
then rejected our authentication attempt.
* **\***:class:`stem.connection.MissingAuthInfo`
Tor provided us with a PROTOCOLINFO reply that is technically valid, but
missing the information we need to authenticate.
* **\***:class:`stem.connection.AuthenticationFailure`
There are numerous other ways that authentication could have failed
including socket failures, malformed controller responses, etc. These
mostly constitute transient failures or bugs.
**\*** In practice it is highly unusual for this to occur, being more of a
theoretical possibility rather than something you should expect. It's fine
to treat these as errors. If you have a use case where this commonly
happens, please file a ticket on 'trac.torproject.org'.
In the future new :class:`~stem.connection.AuthenticationFailure`
subclasses may be added to allow for better error handling.
"""
if not protocolinfo_response:
try:
protocolinfo_response = get_protocolinfo(controller)
except stem.ProtocolError:
raise IncorrectSocketType('unable to use the control socket')
except stem.SocketError as exc:
raise AuthenticationFailure('socket connection failed (%s)' % exc)
auth_methods = list(protocolinfo_response.auth_methods)
auth_exceptions = []
if len(auth_methods) == 0:
raise NoAuthMethods('our PROTOCOLINFO response did not have any methods for authenticating')
# remove authentication methods that are either unknown or for which we don't
# have an input
if AuthMethod.UNKNOWN in auth_methods:
auth_methods.remove(AuthMethod.UNKNOWN)
unknown_methods = protocolinfo_response.unknown_auth_methods
plural_label = 's' if len(unknown_methods) > 1 else ''
methods_label = ', '.join(unknown_methods)
# we... er, can't do anything with only unrecognized auth types
if not auth_methods:
exc_msg = 'unrecognized authentication method%s (%s)' % (plural_label, methods_label)
auth_exceptions.append(UnrecognizedAuthMethods(exc_msg, unknown_methods))
else:
log.debug('Authenticating to a socket with unrecognized auth method%s, ignoring them: %s' % (plural_label, methods_label))
if protocolinfo_response.cookie_path is None:
for cookie_auth_method in (AuthMethod.COOKIE, AuthMethod.SAFECOOKIE):
if cookie_auth_method in auth_methods:
auth_methods.remove(cookie_auth_method)
exc_msg = 'our PROTOCOLINFO response did not have the location of our authentication cookie'
auth_exceptions.append(NoAuthCookie(exc_msg, cookie_auth_method == AuthMethod.SAFECOOKIE))
if AuthMethod.PASSWORD in auth_methods and password is None:
auth_methods.remove(AuthMethod.PASSWORD)
auth_exceptions.append(MissingPassword('no passphrase provided'))
# iterating over AuthMethods so we can try them in this order
for auth_type in (AuthMethod.NONE, AuthMethod.PASSWORD, AuthMethod.SAFECOOKIE, AuthMethod.COOKIE):
if auth_type not in auth_methods:
continue
try:
if auth_type == AuthMethod.NONE:
authenticate_none(controller, False)
elif auth_type == AuthMethod.PASSWORD:
authenticate_password(controller, password, False)
elif auth_type in (AuthMethod.COOKIE, AuthMethod.SAFECOOKIE):
cookie_path = protocolinfo_response.cookie_path
if chroot_path:
cookie_path = os.path.join(chroot_path, cookie_path.lstrip(os.path.sep))
if auth_type == AuthMethod.SAFECOOKIE:
authenticate_safecookie(controller, cookie_path, False)
else:
authenticate_cookie(controller, cookie_path, False)
if isinstance(controller, stem.control.BaseController):
controller._post_authentication()
return # success!
except OpenAuthRejected as exc:
auth_exceptions.append(exc)
except IncorrectPassword as exc:
auth_exceptions.append(exc)
except PasswordAuthRejected as exc:
# Since the PROTOCOLINFO says password auth is available we can assume
# that if PasswordAuthRejected is raised it's being raised in error.
log.debug('The authenticate_password method raised a PasswordAuthRejected when password auth should be available. Stem may need to be corrected to recognize this response: %s' % exc)
auth_exceptions.append(IncorrectPassword(str(exc)))
except AuthSecurityFailure as exc:
log.info('Tor failed to provide the nonce expected for safecookie authentication. (%s)' % exc)
auth_exceptions.append(exc)
except (InvalidClientNonce, UnrecognizedAuthChallengeMethod, AuthChallengeFailed) as exc:
auth_exceptions.append(exc)
except (IncorrectCookieSize, UnreadableCookieFile, IncorrectCookieValue) as exc:
auth_exceptions.append(exc)
except CookieAuthRejected as exc:
auth_func = 'authenticate_safecookie' if exc.is_safecookie else 'authenticate_cookie'
log.debug('The %s method raised a CookieAuthRejected when cookie auth should be available. Stem may need to be corrected to recognize this response: %s' % (auth_func, exc))
auth_exceptions.append(IncorrectCookieValue(str(exc), exc.cookie_path, exc.is_safecookie))
except stem.ControllerError as exc:
auth_exceptions.append(AuthenticationFailure(str(exc)))
# All authentication attempts failed. Raise the exception that takes priority
# according to our pydocs.
for exc_type in AUTHENTICATE_EXCEPTIONS:
for auth_exc in auth_exceptions:
if isinstance(auth_exc, exc_type):
raise auth_exc
# We really, really shouldn't get here. It means that auth_exceptions is
# either empty or contains something that isn't an AuthenticationFailure.
raise AssertionError('BUG: Authentication failed without providing a recognized exception: %s' % str(auth_exceptions))
def authenticate_none(controller, suppress_ctl_errors = True):
"""
Authenticates to an open control socket. All control connections need to
authenticate before they can be used, even if tor hasn't been configured to
use any authentication.
If authentication fails tor will disconnect and we'll make a best effort
attempt to re-establish the connection. This may not succeed, so check
:func:`~stem.socket.ControlSocket.is_alive` before using the socket further.
This can authenticate to either a :class:`~stem.control.BaseController` or
:class:`~stem.socket.ControlSocket`.
For general usage use the :func:`~stem.connection.authenticate` function
instead.
:param controller: tor controller or socket to be authenticated
:param bool suppress_ctl_errors: reports raised
:class:`~stem.ControllerError` as authentication rejection if
**True**, otherwise they're re-raised
:raises: :class:`stem.connection.OpenAuthRejected` if the empty authentication credentials aren't accepted
"""
try:
auth_response = _msg(controller, 'AUTHENTICATE')
# if we got anything but an OK response then error
if str(auth_response) != 'OK':
try:
controller.connect()
except:
pass
raise OpenAuthRejected(str(auth_response), auth_response)
except stem.ControllerError as exc:
try:
controller.connect()
except:
pass
if not suppress_ctl_errors:
raise
else:
raise OpenAuthRejected('Socket failed (%s)' % exc)
def authenticate_password(controller, password, suppress_ctl_errors = True):
"""
Authenticates to a control socket that uses a password (via the
HashedControlPassword torrc option). Quotes in the password are escaped.
If authentication fails tor will disconnect and we'll make a best effort
attempt to re-establish the connection. This may not succeed, so check
:func:`~stem.socket.ControlSocket.is_alive` before using the socket further.
If you use this function directly, rather than
:func:`~stem.connection.authenticate`, we may mistakenly raise a
PasswordAuthRejected rather than IncorrectPassword. This is because we rely
on tor's error messaging which is liable to change in future versions
(:trac:`4817`).
This can authenticate to either a :class:`~stem.control.BaseController` or
:class:`~stem.socket.ControlSocket`.
For general usage use the :func:`~stem.connection.authenticate` function
instead.
:param controller: tor controller or socket to be authenticated
:param str password: passphrase to present to the socket
:param bool suppress_ctl_errors: reports raised
:class:`~stem.ControllerError` as authentication rejection if
**True**, otherwise they're re-raised
:raises:
* :class:`stem.connection.PasswordAuthRejected` if the socket doesn't
accept password authentication
* :class:`stem.connection.IncorrectPassword` if the authentication
credentials aren't accepted
"""
# Escapes quotes. Tor can include those in the password hash, in which case
# it expects escaped quotes from the controller. For more information see...
# https://trac.torproject.org/projects/tor/ticket/4600
password = password.replace('"', '\\"')
try:
auth_response = _msg(controller, 'AUTHENTICATE "%s"' % password)
# if we got anything but an OK response then error
if str(auth_response) != 'OK':
try:
controller.connect()
except:
pass
# all we have to go on is the error message from tor...
# Password did not match HashedControlPassword value value from configuration...
# Password did not match HashedControlPassword *or*...
if 'Password did not match HashedControlPassword' in str(auth_response):
raise IncorrectPassword(str(auth_response), auth_response)
else:
raise PasswordAuthRejected(str(auth_response), auth_response)
except stem.ControllerError as exc:
try:
controller.connect()
except:
pass
if not suppress_ctl_errors:
raise
else:
raise PasswordAuthRejected('Socket failed (%s)' % exc)
def authenticate_cookie(controller, cookie_path, suppress_ctl_errors = True):
"""
Authenticates to a control socket that uses the contents of an authentication
cookie (generated via the CookieAuthentication torrc option). This does basic
validation that this is a cookie before presenting the contents to the
socket.
The :class:`~stem.connection.IncorrectCookieSize` and
:class:`~stem.connection.UnreadableCookieFile` exceptions take precedence
over the other types.
If authentication fails tor will disconnect and we'll make a best effort
attempt to re-establish the connection. This may not succeed, so check
:func:`~stem.socket.ControlSocket.is_alive` before using the socket further.
If you use this function directly, rather than
:func:`~stem.connection.authenticate`, we may mistakenly raise a
:class:`~stem.connection.CookieAuthRejected` rather than
:class:`~stem.connection.IncorrectCookieValue`. This is because we rely on
tor's error messaging which is liable to change in future versions
(:trac:`4817`).
This can authenticate to either a :class:`~stem.control.BaseController` or
:class:`~stem.socket.ControlSocket`.
For general usage use the :func:`~stem.connection.authenticate` function
instead.
:param controller: tor controller or socket to be authenticated
:param str cookie_path: path of the authentication cookie to send to tor
:param bool suppress_ctl_errors: reports raised
:class:`~stem.ControllerError` as authentication rejection if
**True**, otherwise they're re-raised
:raises:
* :class:`stem.connection.IncorrectCookieSize` if the cookie file's size
is wrong
* :class:`stem.connection.UnreadableCookieFile` if the cookie file doesn't
exist or we're unable to read it
* :class:`stem.connection.CookieAuthRejected` if cookie authentication is
attempted but the socket doesn't accept it
* :class:`stem.connection.IncorrectCookieValue` if the cookie file's value
is rejected
"""
cookie_data = _read_cookie(cookie_path, False)
try:
# binascii.b2a_hex() takes a byte string and returns one too. With python 3
# this is a problem because string formatting for byte strings includes the
# b'' wrapper...
#
# >>> "AUTHENTICATE %s" % b'content'
# "AUTHENTICATE b'content'"
#
# This seems dumb but oh well. Converting the result to unicode so it won't
# misbehave.
auth_token_hex = binascii.b2a_hex(stem.util.str_tools._to_bytes(cookie_data))
msg = 'AUTHENTICATE %s' % stem.util.str_tools._to_unicode(auth_token_hex)
auth_response = _msg(controller, msg)
# if we got anything but an OK response then error
if str(auth_response) != 'OK':
try:
controller.connect()
except:
pass
# all we have to go on is the error message from tor...
# ... Authentication cookie did not match expected value.
# ... *or* authentication cookie.
if '*or* authentication cookie.' in str(auth_response) or \
'Authentication cookie did not match expected value.' in str(auth_response):
raise IncorrectCookieValue(str(auth_response), cookie_path, False, auth_response)
else:
raise CookieAuthRejected(str(auth_response), cookie_path, False, auth_response)
except stem.ControllerError as exc:
try:
controller.connect()
except:
pass
if not suppress_ctl_errors:
raise
else:
raise CookieAuthRejected('Socket failed (%s)' % exc, cookie_path, False)
def authenticate_safecookie(controller, cookie_path, suppress_ctl_errors = True):
"""
Authenticates to a control socket using the safe cookie method, which is
enabled by setting the CookieAuthentication torrc option on Tor client's which
support it.
Authentication with this is a two-step process...
1. send a nonce to the server and receives a challenge from the server for
the cookie's contents
2. generate a hash digest using the challenge received in the first step, and
use it to authenticate the controller
The :class:`~stem.connection.IncorrectCookieSize` and
:class:`~stem.connection.UnreadableCookieFile` exceptions take precedence
over the other exception types.
The :class:`~stem.connection.AuthChallengeUnsupported`,
:class:`~stem.connection.UnrecognizedAuthChallengeMethod`,
:class:`~stem.connection.InvalidClientNonce` and
:class:`~stem.connection.CookieAuthRejected` exceptions are next in the order
of precedence. Depending on the reason, one of these is raised if the first
(AUTHCHALLENGE) step fails.
In the second (AUTHENTICATE) step,
:class:`~stem.connection.IncorrectCookieValue` or
:class:`~stem.connection.CookieAuthRejected` maybe raised.
If authentication fails tor will disconnect and we'll make a best effort
attempt to re-establish the connection. This may not succeed, so check
:func:`~stem.socket.ControlSocket.is_alive` before using the socket further.
For general usage use the :func:`~stem.connection.authenticate` function
instead.
:param controller: tor controller or socket to be authenticated
:param str cookie_path: path of the authentication cookie to send to tor
:param bool suppress_ctl_errors: reports raised
:class:`~stem.ControllerError` as authentication rejection if
**True**, otherwise they're re-raised
:raises:
* :class:`stem.connection.IncorrectCookieSize` if the cookie file's size
is wrong
* :class:`stem.connection.UnreadableCookieFile` if the cookie file doesn't
exist or we're unable to read it
* :class:`stem.connection.CookieAuthRejected` if cookie authentication is
attempted but the socket doesn't accept it
* :class:`stem.connection.IncorrectCookieValue` if the cookie file's value
is rejected
* :class:`stem.connection.UnrecognizedAuthChallengeMethod` if the Tor
client fails to recognize the AuthChallenge method
* :class:`stem.connection.AuthChallengeUnsupported` if AUTHCHALLENGE is
unimplemented, or if unable to parse AUTHCHALLENGE response
* :class:`stem.connection.AuthSecurityFailure` if AUTHCHALLENGE's response
looks like a security attack
* :class:`stem.connection.InvalidClientNonce` if stem's AUTHCHALLENGE
client nonce is rejected for being invalid
"""
cookie_data = _read_cookie(cookie_path, True)
client_nonce = os.urandom(32)
try:
client_nonce_hex = stem.util.str_tools._to_unicode(binascii.b2a_hex(client_nonce))
authchallenge_response = _msg(controller, 'AUTHCHALLENGE SAFECOOKIE %s' % client_nonce_hex)
if not authchallenge_response.is_ok():
try:
controller.connect()
except:
pass
authchallenge_response_str = str(authchallenge_response)
if 'Authentication required.' in authchallenge_response_str:
raise AuthChallengeUnsupported("SAFECOOKIE authentication isn't supported", cookie_path)
elif 'AUTHCHALLENGE only supports' in authchallenge_response_str:
raise UnrecognizedAuthChallengeMethod(authchallenge_response_str, cookie_path)
elif 'Invalid base16 client nonce' in authchallenge_response_str:
raise InvalidClientNonce(authchallenge_response_str, cookie_path)
elif 'Cookie authentication is disabled' in authchallenge_response_str:
raise CookieAuthRejected(authchallenge_response_str, cookie_path, True)
else:
raise AuthChallengeFailed(authchallenge_response, cookie_path)
except stem.ControllerError as exc:
try:
controller.connect()
except:
pass
if not suppress_ctl_errors:
raise
else:
raise AuthChallengeFailed('Socket failed (%s)' % exc, cookie_path, True)
try:
stem.response.convert('AUTHCHALLENGE', authchallenge_response)
except stem.ProtocolError as exc:
if not suppress_ctl_errors:
raise
else:
raise AuthChallengeFailed('Unable to parse AUTHCHALLENGE response: %s' % exc, cookie_path)
expected_server_hash = stem.util.connection._hmac_sha256(
SERVER_HASH_CONSTANT,
cookie_data + client_nonce + authchallenge_response.server_nonce)
if not stem.util.connection._cryptovariables_equal(authchallenge_response.server_hash, expected_server_hash):
raise AuthSecurityFailure('Tor provided the wrong server nonce', cookie_path)
try:
client_hash = stem.util.connection._hmac_sha256(
CLIENT_HASH_CONSTANT,
cookie_data + client_nonce + authchallenge_response.server_nonce)
auth_response = _msg(controller, 'AUTHENTICATE %s' % stem.util.str_tools._to_unicode(binascii.b2a_hex(client_hash)))
except stem.ControllerError as exc:
try:
controller.connect()
except:
pass
if not suppress_ctl_errors:
raise
else:
raise CookieAuthRejected('Socket failed (%s)' % exc, cookie_path, True, auth_response)
# if we got anything but an OK response then err
if not auth_response.is_ok():
try:
controller.connect()
except:
pass
# all we have to go on is the error message from tor...
# ... Safe cookie response did not match expected value
# ... *or* authentication cookie.
if '*or* authentication cookie.' in str(auth_response) or \
'Safe cookie response did not match expected value' in str(auth_response):
raise IncorrectCookieValue(str(auth_response), cookie_path, True, auth_response)
else:
raise CookieAuthRejected(str(auth_response), cookie_path, True, auth_response)
def get_protocolinfo(controller):
"""
Issues a PROTOCOLINFO query to a control socket, getting information about
the tor process running on it. If the socket is already closed then it is
first reconnected.
This can authenticate to either a :class:`~stem.control.BaseController` or
:class:`~stem.socket.ControlSocket`.
:param controller: tor controller or socket to be queried
:returns: :class:`~stem.response.protocolinfo.ProtocolInfoResponse` provided by tor
:raises:
* :class:`stem.ProtocolError` if the PROTOCOLINFO response is
malformed
* :class:`stem.SocketError` if problems arise in establishing or
using the socket
"""
try:
protocolinfo_response = _msg(controller, 'PROTOCOLINFO 1')
except:
protocolinfo_response = None
# Tor hangs up on sockets after receiving a PROTOCOLINFO query if it isn't
# next followed by authentication. Transparently reconnect if that happens.
if not protocolinfo_response or str(protocolinfo_response) == 'Authentication required.':
controller.connect()
try:
protocolinfo_response = _msg(controller, 'PROTOCOLINFO 1')
except stem.SocketClosed as exc:
raise stem.SocketError(exc)
stem.response.convert('PROTOCOLINFO', protocolinfo_response)
return protocolinfo_response
def _msg(controller, message):
"""
Sends and receives a message with either a
:class:`~stem.socket.ControlSocket` or :class:`~stem.control.BaseController`.
"""
if isinstance(controller, stem.socket.ControlSocket):
controller.send(message)
return controller.recv()
else:
return controller.msg(message)
def _connection_for_default_port(address):
"""
Attempts to provide a controller connection for either port 9051 (default for
relays) or 9151 (default for Tor Browser). If both fail then this raises the
exception for port 9051.
:param str address: address to connect to
:returns: :class:`~stem.socket.ControlPort` for the controller conneciton
:raises: :class:`stem.SocketError` if we're unable to establish a connection
"""
try:
return stem.socket.ControlPort(address, 9051)
except stem.SocketError as exc:
try:
return stem.socket.ControlPort(address, 9151)
except stem.SocketError:
raise exc
def _read_cookie(cookie_path, is_safecookie):
"""
Provides the contents of a given cookie file.
:param str cookie_path: absolute path of the cookie file
:param bool is_safecookie: **True** if this was for SAFECOOKIE
authentication, **False** if for COOKIE
:raises:
* :class:`stem.connection.UnreadableCookieFile` if the cookie file is
unreadable
* :class:`stem.connection.IncorrectCookieSize` if the cookie size is
incorrect (not 32 bytes)
"""
if not os.path.exists(cookie_path):
exc_msg = "Authentication failed: '%s' doesn't exist" % cookie_path
raise UnreadableCookieFile(exc_msg, cookie_path, is_safecookie)
# Abort if the file isn't 32 bytes long. This is to avoid exposing arbitrary
# file content to the port.
#
# Without this a malicious socket could, for instance, claim that
# '~/.bash_history' or '~/.ssh/id_rsa' was its authentication cookie to trick
# us into reading it for them with our current permissions.
#
# https://trac.torproject.org/projects/tor/ticket/4303
auth_cookie_size = os.path.getsize(cookie_path)
if auth_cookie_size != 32:
exc_msg = "Authentication failed: authentication cookie '%s' is the wrong size (%i bytes instead of 32)" % (cookie_path, auth_cookie_size)
raise IncorrectCookieSize(exc_msg, cookie_path, is_safecookie)
try:
with open(cookie_path, 'rb', 0) as f:
return f.read()
except IOError as exc:
exc_msg = "Authentication failed: unable to read '%s' (%s)" % (cookie_path, exc)
raise UnreadableCookieFile(exc_msg, cookie_path, is_safecookie)
class AuthenticationFailure(Exception):
"""
Base error for authentication failures.
:var stem.socket.ControlMessage auth_response: AUTHENTICATE response from the
control socket, **None** if one wasn't received
"""
def __init__(self, message, auth_response = None):
super(AuthenticationFailure, self).__init__(message)
self.auth_response = auth_response
class UnrecognizedAuthMethods(AuthenticationFailure):
"""
All methods for authenticating aren't recognized.
:var list unknown_auth_methods: authentication methods that weren't recognized
"""
def __init__(self, message, unknown_auth_methods):
super(UnrecognizedAuthMethods, self).__init__(message)
self.unknown_auth_methods = unknown_auth_methods
class IncorrectSocketType(AuthenticationFailure):
'Socket does not speak the control protocol.'
class OpenAuthFailed(AuthenticationFailure):
'Failure to authenticate to an open socket.'
class OpenAuthRejected(OpenAuthFailed):
'Attempt to connect to an open control socket was rejected.'
class PasswordAuthFailed(AuthenticationFailure):
'Failure to authenticate with a password.'
class PasswordAuthRejected(PasswordAuthFailed):
'Socket does not support password authentication.'
class IncorrectPassword(PasswordAuthFailed):
'Authentication password incorrect.'
class MissingPassword(PasswordAuthFailed):
"Password authentication is supported but we weren't provided with one."
class CookieAuthFailed(AuthenticationFailure):
"""
Failure to authenticate with an authentication cookie.
:param str cookie_path: location of the authentication cookie we attempted
:param bool is_safecookie: **True** if this was for SAFECOOKIE
authentication, **False** if for COOKIE
:param stem.response.ControlMessage auth_response: reply to our
authentication attempt
"""
def __init__(self, message, cookie_path, is_safecookie, auth_response = None):
super(CookieAuthFailed, self).__init__(message, auth_response)
self.is_safecookie = is_safecookie
self.cookie_path = cookie_path
class CookieAuthRejected(CookieAuthFailed):
'Socket does not support password authentication.'
class IncorrectCookieValue(CookieAuthFailed):
'Authentication cookie value was rejected.'
class IncorrectCookieSize(CookieAuthFailed):
'Aborted because the cookie file is the wrong size.'
class UnreadableCookieFile(CookieAuthFailed):
'Error arose in reading the authentication cookie.'
class AuthChallengeFailed(CookieAuthFailed):
"""
AUTHCHALLENGE command has failed.
"""
def __init__(self, message, cookie_path):
super(AuthChallengeFailed, self).__init__(message, cookie_path, True)
class AuthChallengeUnsupported(AuthChallengeFailed):
"""
AUTHCHALLENGE isn't implemented.
"""
class UnrecognizedAuthChallengeMethod(AuthChallengeFailed):
"""
Tor couldn't recognize our AUTHCHALLENGE method.
:var str authchallenge_method: AUTHCHALLENGE method that Tor couldn't recognize
"""
def __init__(self, message, cookie_path, authchallenge_method):
super(UnrecognizedAuthChallengeMethod, self).__init__(message, cookie_path)
self.authchallenge_method = authchallenge_method
class AuthSecurityFailure(AuthChallengeFailed):
'AUTHCHALLENGE response is invalid.'
class InvalidClientNonce(AuthChallengeFailed):
'AUTHCHALLENGE request contains an invalid client nonce.'
class MissingAuthInfo(AuthenticationFailure):
"""
The PROTOCOLINFO response didn't have enough information to authenticate.
These are valid control responses but really shouldn't happen in practice.
"""
class NoAuthMethods(MissingAuthInfo):
"PROTOCOLINFO response didn't have any methods for authenticating."
class NoAuthCookie(MissingAuthInfo):
"""
PROTOCOLINFO response supports cookie auth but doesn't have its path.
:param bool is_safecookie: **True** if this was for SAFECOOKIE
authentication, **False** if for COOKIE
"""
def __init__(self, message, is_safecookie):
super(NoAuthCookie, self).__init__(message)
self.is_safecookie = is_safecookie
# authentication exceptions ordered as per the authenticate function's pydocs
AUTHENTICATE_EXCEPTIONS = (
IncorrectSocketType,
UnrecognizedAuthMethods,
MissingPassword,
IncorrectPassword,
IncorrectCookieSize,
UnreadableCookieFile,
IncorrectCookieValue,
AuthChallengeUnsupported,
UnrecognizedAuthChallengeMethod,
InvalidClientNonce,
AuthSecurityFailure,
OpenAuthRejected,
MissingAuthInfo,
AuthenticationFailure
)
stem-1.7.1/stem/cached_manual.sqlite 0000644 0001750 0001750 00000734000 13355717377 020175 0 ustar atagar atagar 0000000 0000000 SQLite format 3 @ î -ñ¸
6
Ì Â>™ÿj‘å Ìf ‚
tabletorrctorrc
CREATE TABLE torrc(key TEXT PRIMARY KEY, name TEXT, category TEXT, usage TEXT, summary TEXT, description TEXT, position INTEGER))
= indexsqlite_autoindex_torrc_1torrcRtablefilesfilesCREATE TABLE files(name TEXT PRIMARY KEY, description TEXT))= indexsqlite_autoindex_files_1files XtablesignalssignalsCREATE TABLE signals(name TEXT PRIMARY KEY, description TEXT)-A indexsqlite_autoindex_signals_1signalsd##tablecommandlinecommandlineCREATE TABLE commandline(name TEXT PRIMARY KEY, description TEXT)5I# indexsqlite_autoindex_commandline_1commandline UtablemetadatametadataCREATE TABLE metadata(name TEXT, synopsis TEXT, description TEXT, man_commit TEXT, stem_commit TEXT)<UtableschemaschemaCREATE TABLE schema(version INTEGER)
ü ü
’ ’ ‡r]7I]]tor - The second-generation onion routertor [OPTION value]...Tor is a connection-oriented anonym ñ ûöñ
<