pax_global_header 0000666 0000000 0000000 00000000064 14773172534 0014527 g ustar 00root root 0000000 0000000 52 comment=4c177fb83940284edafeb846bd8a9317ecd9fc8a
snitun-0.42.0/ 0000775 0000000 0000000 00000000000 14773172534 0013132 5 ustar 00root root 0000000 0000000 snitun-0.42.0/.codecov.yml 0000664 0000000 0000000 00000000065 14773172534 0015356 0 ustar 00root root 0000000 0000000 comment: false
coverage:
status:
project: true
snitun-0.42.0/.devcontainer.json 0000664 0000000 0000000 00000002345 14773172534 0016570 0 ustar 00root root 0000000 0000000 {
"name": "NabuCasa SniTun Dev",
"image": "mcr.microsoft.com/vscode/devcontainers/python:1-3.12",
"postCreateCommand": "python3 -m pip install -e .[test,lint]",
"postStartCommand": "python3 -m pip install -e .",
"containerUser": "vscode",
"containerEnv": {
"GIT_EDITOR": "code --wait"
},
"customizations": {
"vscode": {
"extensions": [
"charliermarsh.ruff",
"esbenp.prettier-vscode",
"ms-python.python",
"ms-python.vscode-pylance",
"visualstudioexptteam.vscodeintellicode"
],
"settings": {
"python.pythonPath": "/usr/local/bin/python",
"python.formatting.provider": "ruff",
"editor.formatOnPaste": false,
"editor.formatOnSave": true,
"editor.formatOnType": true,
"editor.defaultFormatter": "charliermarsh.ruff",
"editor.rulers": [88],
"editor.codeActionsOnSave": {
"source.fixAll": "always",
"source.organizeImports": "always"
},
"files.trimTrailingWhitespace": true,
"terminal.integrated.profiles.linux": {
"zsh": {
"path": "/usr/bin/zsh"
}
},
"terminal.integrated.defaultProfile.linux": "zsh"
}
}
}
}
snitun-0.42.0/.github/ 0000775 0000000 0000000 00000000000 14773172534 0014472 5 ustar 00root root 0000000 0000000 snitun-0.42.0/.github/dependabot.yml 0000664 0000000 0000000 00000000413 14773172534 0017320 0 ustar 00root root 0000000 0000000 version: 2
updates:
- package-ecosystem: pip
directory: "/"
schedule:
interval: monthly
open-pull-requests-limit: 10
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: monthly
open-pull-requests-limit: 10
snitun-0.42.0/.github/release-drafter.yml 0000664 0000000 0000000 00000000502 14773172534 0020257 0 ustar 00root root 0000000 0000000 name-template: "$RESOLVED_VERSION"
tag-template: "$RESOLVED_VERSION"
categories:
- title: "⬆️ Dependencies"
collapse-after: 1
labels:
- "dependencies"
version-resolver:
default: minor
change-template: "- #$NUMBER $TITLE @$AUTHOR"
sort-direction: ascending
template: |
## What's Changed
$CHANGES
snitun-0.42.0/.github/workflows/ 0000775 0000000 0000000 00000000000 14773172534 0016527 5 ustar 00root root 0000000 0000000 snitun-0.42.0/.github/workflows/ci.yml 0000664 0000000 0000000 00000004113 14773172534 0017644 0 ustar 00root root 0000000 0000000 name: Run Tests
on:
push:
branches:
- main
pull_request:
branches:
- main
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Python 3.12
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: "3.12"
- uses: pre-commit/action@v3.0.1
benchmark:
runs-on: ubuntu-latest
timeout-minutes: 6
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Python 3.13
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: "3.13"
cache: pip
- name: Install dependencies
shell: bash
run: |
python3 -m pip install -e .[test]
- name: Run benchmarks
uses: CodSpeedHQ/action@v3
with:
token: ${{ secrets.CODSPEED_TOKEN }}
run: pytest --timeout=300 --no-cov -vvvvv --codspeed tests/benchmarks
build:
runs-on: ubuntu-latest
timeout-minutes: 6
needs: lint
environment: CI
strategy:
matrix:
python-version:
- "3.12"
- "3.13"
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: ${{ matrix.python-version }}
cache: "pip" # caching pip dependencies
- name: Install dependencies
shell: bash
run: |
python3 -m pip install -e .[test]
- name: Test with Pytest
run: pytest --cov=snitun --cov-report=xml --timeout=10
- name: Upload coverage to Codecov
uses: codecov/codecov-action@0565863a31f2c772f9f0395002a31e3f06189574 # v5.4.0
with:
token: ${{ secrets.CODECOV_TOKEN }}
snitun-0.42.0/.github/workflows/release-drafter.yml 0000664 0000000 0000000 00000000702 14773172534 0022316 0 ustar 00root root 0000000 0000000 name: Release Drafter
on:
push:
# branches to consider in the event; optional, defaults to all
branches:
- main
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
update_release_draft:
runs-on: ubuntu-latest
steps:
- uses: release-drafter/release-drafter@b1476f6e6eb133afa41ed8589daba6dc69b4d3f5 # v6.1.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
snitun-0.42.0/.github/workflows/release.yml 0000664 0000000 0000000 00000003306 14773172534 0020674 0 ustar 00root root 0000000 0000000 name: Upload Python Package
on:
release:
types:
- published
permissions: {}
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Make sure tag_name is not empty
run: |
if [[ "${{ github.event.release.tag_name }}" == "" ]]; then
exit 1
fi
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Python
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: "3.x"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install setuptools build
- name: Set version ${{ github.event.release.tag_name }}
run: |
sed -i "s/^version = \".*\"/version = \"${{ github.event.release.tag_name }}\"/" pyproject.toml
- name: Build ${{ github.event.release.tag_name }}
run: |
python -m build
- name: Upload dists
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: "dist"
path: "dist/"
if-no-files-found: error
retention-days: 5
publish:
name: Upload release to PyPI
runs-on: ubuntu-latest
needs: "build"
environment:
name: release
url: https://pypi.org/p/snitun
permissions:
id-token: write
steps:
- name: Download dists
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
with:
name: "dist"
path: "dist/"
- name: Publish dists to PyPI
uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # v1.12.4
snitun-0.42.0/.gitignore 0000664 0000000 0000000 00000002307 14773172534 0015124 0 ustar 00root root 0000000 0000000 # Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
# Editors
.vscode/
snitun-0.42.0/.pre-commit-config.yaml 0000664 0000000 0000000 00000001640 14773172534 0017414 0 ustar 00root root 0000000 0000000 repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: debug-statements
- id: check-builtin-literals
- id: check-case-conflict
- id: check-docstring-first
- id: check-json
- id: check-toml
- id: check-xml
- id: check-yaml
- id: detect-private-key
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.9.5
hooks:
- id: ruff
args:
- --fix
files: ^((snitun)/.+)?[^/]+\.(py)$
- id: ruff-format
files: ^((snitun)/.+)?[^/]+\.(py)$
- repo: https://github.com/pre-commit/mirrors-prettier
rev: v3.0.3
hooks:
- id: prettier
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.14.1
hooks:
- id: mypy
additional_dependencies: []
files: ^((snitun)/.+)?[^/]+\.(py)$
snitun-0.42.0/LICENSE 0000664 0000000 0000000 00000104515 14773172534 0014145 0 ustar 00root root 0000000 0000000 GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
Copyright (C)
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
.
snitun-0.42.0/README.md 0000664 0000000 0000000 00000011247 14773172534 0014416 0 ustar 00root root 0000000 0000000 # SniTun
End-to-End encryption with SNI proxy on top of a TCP multiplexer
## Connection flow
```
[ CLIENT ] --AUTH/CONFIG--> [ SESSION MASTER ] (Trusted connection)
[ CLIENT ] <--FERNET-TOKEN- [ SESSION MASTER ]
[ CLIENT ] --------FERNET-TOKEN---------------------> [ SNITUN ] (Unsecure connection)
[ CLIENT ] <-------CHALLENGE-RESPONSE-(AES/CBC)-----> [ SNITUN ]
<---> <------------------------------>
[ ENDPOINT ] <---> [ CLIENT ] <---------MULTIPLEXER---(AES/CBC)--------> [ SNITUN ] <------EXTERNAL-CONECTIONS-----> [ DEVICE ]
| <---> <------------------------------> |
| |
| <--------------------------------------------------END-TO-END-SSL------------------------------------------------->|
(Trusted connection)
```
## Fernet token
The session master creates a Fernet token from the client's configuration (AES/whitelist) and attaches the hostname and a UTC timestamp until which the token is valid.
```json
{
"valid": 1923841,
"hostname": "myname.ui.nabu.casa",
"aes_key": "hexstring",
"aes_iv": "hexstring"
}
```
The SniTun server must be able to decrypt this token to validate the client's authenticity. SniTun then initiates a challenge-response handling to validate the AES key and ensure that it is the same client that requested the Fernet token from the session master.
Note: SniTun server does not perform any user authentication!
### Challenge/Response
The SniTun server creates a SHA256 hash from a random 40-bit value. This value is encrypted and sent to the client, who then decrypts the value and performs another SHA256 hash with the value and sends it encrypted back to SniTun. If it is valid, the client enters the Multiplexer mode.
## Multiplexer Protocol
The header is encrypted using AES/CBC. The payload should be SSL. The ID changes for every TCP connection and is unique for each connection. The size is for the data payload.
The extra information could include the caller IP address for a new message. Otherwise, it is random bits.
```
|________________________________________________________|
|-----------------HEADER---------------------------------|______________________________________________|
|------ID-----|--FLAG--|--SIZE--|---------EXTRA ---------|--------------------DATA----------------------|
| 16 bytes | 1 byte | 4 bytes| 11 bytes | variable |
|--------------------------------------------------------|----------------------------------------------|
```
Message Flags/Types:
- `0x01`: New | The extra data includes the first byte as an ASCII value of 4 or 6, followed by the caller IP in bytes.
- `0x02`: DATA
- `0x04`: Close
- `0x08`: Ping | The extra data is a `ping` or `pong` response to a ping.
- `0x16`: Pause the remote reader (added in protocol version 1)
- `0x32`: Resume the remote reader (added in protocol version 1)
## Configuration via environment variables
The following environment variables, which, to be effective, must be set before importing this package, are available to override internal defaults:
- `MULTIPLEXER_INCOMING_QUEUE_MAX_BYTES_CHANNEL` - The maximum number of bytes allowed in the incoming queue for each multiplexer channel.
- `MULTIPLEXER_INCOMING_QUEUE_LOW_WATERMARK` - The low watermark threshold, in bytes, for the incoming queue for each multiplexer channel.
- `MULTIPLEXER_INCOMING_QUEUE_HIGH_WATERMARK` - The high watermark threshold, in bytes, for the incoming queue for each multiplexer channel.
- `MULTIPLEXER_OUTGOING_QUEUE_MAX_BYTES_CHANNEL` - The maximum number of bytes allowed in the outgoing queue for the multiplexer channel.
- `MULTIPLEXER_OUTGOING_QUEUE_LOW_WATERMARK` - The low watermark threshold, in bytes, for the outgoing queue for each multiplexer channel.
- `MULTIPLEXER_OUTGOING_QUEUE_HIGH_WATERMARK` - The high watermark threshold, in bytes, for the outgoing queue for each multiplexer channel.
## Protocol versioning considerations
- The client is responsible for setting the `protocol_version` key in the token. If no `protocol_version` is provided, the server must assume protocol version 0.
- The server side must always be updated first when incrementing the protocol version as the client assumes that the server is always running a protocol version that it supports.
- When new message types are added to the Multiplexer, the protocol version must be incremented.
snitun-0.42.0/pyproject.toml 0000664 0000000 0000000 00000006643 14773172534 0016057 0 ustar 00root root 0000000 0000000 [build-system]
build-backend = "setuptools.build_meta"
requires = ["setuptools>=62.3"]
[project]
authors = [{ name = "Nabu Casa, Inc.", email = "opensource@nabucasa.com" }]
classifiers = [
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
"Topic :: Internet :: Proxy Servers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
]
dependencies = ["aiohttp>=3.9.3", "cryptography>=2.5"]
description = "SNI proxy with TCP multiplexer"
keywords = ["sni", "proxy", "multiplexer", "tls"]
license = { text = "GPL v3" }
name = "snitun"
readme = "README.md"
requires-python = ">=3.12"
version = "0.0.0"
[project.optional-dependencies]
lint = ["ruff==0.11.2"]
test = [
"covdefaults==2.3.0",
"pytest-aiohttp==1.1.0",
"pytest-codspeed==3.2.0",
"pytest-cov==6.1.0",
"pytest-timeout==2.3.1",
"pytest==8.3.5",
]
[project.urls]
Homepage = "https://www.nabucasa.com/"
Repository = "https://github.com/NabuCasa/snitun.git"
[tool.pytest.ini_options]
asyncio_mode = "auto"
[tool.ruff]
fix = true
line-length = 88
show-fixes = true
target-version = "py311"
[tool.ruff.lint]
ignore = [
"A005", # https://docs.astral.sh/ruff/rules/stdlib-module-shadowing/
"ASYNC110", # https://docs.astral.sh/ruff/rules/async-busy-wait/
"ANN101", # https://docs.astral.sh/ruff/rules/missing-type-self/
"EM101", # https://docs.astral.sh/ruff/rules/raw-string-in-exception/
"EM102", # https://docs.astral.sh/ruff/rules/f-string-in-exception/
"FBT", # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt
"N818", # https://docs.astral.sh/ruff/rules/error-suffix-on-exception-name/
"PLR2004", # https://docs.astral.sh/ruff/rules/magic-value-comparison/
"PERF203", # https://docs.astral.sh/ruff/rules/try-except-in-loop/
"S101", # https://docs.astral.sh/ruff/rules/assert/
"S104", # https://docs.astral.sh/ruff/rules/hardcoded-bind-all-interfaces/
"TCH001", # https://docs.astral.sh/ruff/rules/typing-only-first-party-import/
"TCH003", # https://docs.astral.sh/ruff/rules/typing-only-standard-library-import/
"TID252", # https://docs.astral.sh/ruff/rules/relative-imports/
"TRY003", # https://docs.astral.sh/ruff/rules/raise-vanilla-args/
"TRY301", # https://docs.astral.sh/ruff/rules/raise-within-try/
"TRY400", # https://docs.astral.sh/ruff/rules/error-instead-of-exception/
]
select = ["ALL"]
[tool.ruff.lint.extend-per-file-ignores]
"py.typed" = ["D100"]
[tool.ruff.lint.flake8-pytest-style]
fixture-parentheses = false
mark-parentheses = false
[tool.ruff.lint.isort]
combine-as-imports = true
force-sort-within-sections = true
known-first-party = ["snitun"]
[tool.ruff.lint.mccabe]
max-complexity = 22
[tool.ruff.lint.pydocstyle]
# Use Google-style docstrings.
convention = "pep257"
[tool.ruff.lint.pylint]
max-args = 15
max-branches = 30
max-returns = 8
max-statements = 80
[tool.setuptools]
include-package-data = true
platforms = ["any"]
zip-safe = false
[tool.setuptools.packages.find]
include = [
"snitun",
"snitun.server",
"snitun.client",
"snitun.multiplexer",
"snitun.utils",
]
[tool.coverage.run]
plugins = ["covdefaults"]
[tool.coverage.report]
fail_under = 75
exclude_also = [
"if self._debug:",
]
snitun-0.42.0/scripts/ 0000775 0000000 0000000 00000000000 14773172534 0014621 5 ustar 00root root 0000000 0000000 snitun-0.42.0/scripts/lint 0000775 0000000 0000000 00000000155 14773172534 0015516 0 ustar 00root root 0000000 0000000 #!/bin/sh
cd "$(dirname "$0")/.."
python3 -m ruff check --fix snitun
python3 -m ruff format --check snitun
snitun-0.42.0/scripts/test 0000775 0000000 0000000 00000000066 14773172534 0015530 0 ustar 00root root 0000000 0000000 #!/bin/sh
cd "$(dirname "$0")/.."
python3 -m pytest
snitun-0.42.0/snitun/ 0000775 0000000 0000000 00000000000 14773172534 0014452 5 ustar 00root root 0000000 0000000 snitun-0.42.0/snitun/__init__.py 0000664 0000000 0000000 00000000162 14773172534 0016562 0 ustar 00root root 0000000 0000000 """SniTun - SNI Proxy + TCP multiplexer."""
from .utils import PROTOCOL_VERSION
__all__ = ("PROTOCOL_VERSION",)
snitun-0.42.0/snitun/client/ 0000775 0000000 0000000 00000000000 14773172534 0015730 5 ustar 00root root 0000000 0000000 snitun-0.42.0/snitun/client/__init__.py 0000664 0000000 0000000 00000000035 14773172534 0020037 0 ustar 00root root 0000000 0000000 """SniTun Client library."""
snitun-0.42.0/snitun/client/client_peer.py 0000664 0000000 0000000 00000013514 14773172534 0020577 0 ustar 00root root 0000000 0000000 """SniTun client for server connection."""
from __future__ import annotations
import asyncio
import hashlib
import logging
from ..exceptions import (
MultiplexerTransportDecrypt,
MultiplexerTransportError,
SniTunConnectionError,
)
from ..multiplexer.core import Multiplexer
from ..multiplexer.crypto import CryptoTransport
from ..utils import PROTOCOL_VERSION
from ..utils.asyncio import asyncio_timeout, make_task_waiter_future
from .connector import Connector
_LOGGER = logging.getLogger(__name__)
CONNECTION_TIMEOUT = 60
class ClientPeer:
"""Client to SniTun Server."""
def __init__(self, snitun_host: str, snitun_port: int | None = None) -> None:
"""Initialize ClientPeer connector."""
self._multiplexer: Multiplexer | None = None
self._loop = asyncio.get_event_loop()
self._snitun_host = snitun_host
self._snitun_port = snitun_port or 8080
self._handler_task: asyncio.Task[None] | None = None
@property
def is_connected(self) -> bool:
"""Return true, if a connection exists."""
return self._multiplexer is not None
def wait(self) -> asyncio.Future[None]:
"""Block until connection to peer is closed."""
if not self._multiplexer or not self._handler_task:
raise RuntimeError("No SniTun connection available")
# Wait until the handler task is done
# as we know the connection is closed
return make_task_waiter_future(self._handler_task)
async def start(
self,
connector: Connector,
fernet_token: bytes,
aes_key: bytes,
aes_iv: bytes,
throttling: int | None = None,
) -> None:
"""Connect an start ClientPeer."""
if self._multiplexer:
raise RuntimeError("SniTun connection available")
# Connect to SniTun server
_LOGGER.debug(
"Opening connection to %s:%s",
self._snitun_host,
self._snitun_port,
)
try:
async with asyncio_timeout.timeout(CONNECTION_TIMEOUT):
reader, writer = await asyncio.open_connection(
host=self._snitun_host,
port=self._snitun_port,
)
except TimeoutError:
raise SniTunConnectionError(
"Connection timeout for SniTun server "
f"{self._snitun_host}:{self._snitun_port}",
) from None
except OSError as err:
raise SniTunConnectionError(
"Can't connect to SniTun server "
f"{self._snitun_host}:{self._snitun_port} with: {err}",
) from err
# Send fernet token
writer.write(fernet_token)
try:
async with asyncio_timeout.timeout(CONNECTION_TIMEOUT):
await writer.drain()
except TimeoutError:
raise SniTunConnectionError(
"Timeout for writting connection token",
) from None
# Challenge/Response
crypto = CryptoTransport(aes_key, aes_iv)
try:
async with asyncio_timeout.timeout(CONNECTION_TIMEOUT):
challenge = await reader.readexactly(32)
answer = hashlib.sha256(crypto.decrypt(challenge)).digest()
writer.write(crypto.encrypt(answer))
await writer.drain()
except TimeoutError:
raise SniTunConnectionError(
"Challenge/Response timeout error to SniTun server",
) from None
except (
MultiplexerTransportDecrypt,
asyncio.IncompleteReadError,
OSError,
) as err:
raise SniTunConnectionError(
f"Challenge/Response error with SniTun server ({err})",
) from err
# Run multiplexer
self._multiplexer = Multiplexer(
crypto,
reader,
writer,
# We always assume the server can handle the latest protocol
# version since the server is deployed before the client is
# updated in the wild.
PROTOCOL_VERSION,
new_connections=connector.handler,
throttling=throttling,
)
# Task a process for pings/cleanups
assert not self._handler_task or self._handler_task.done(), (
"SniTun connection already running"
)
self._handler_task = self._loop.create_task(self._handler())
async def stop(self) -> None:
"""Stop connection to SniTun server."""
if not self._multiplexer:
raise RuntimeError("No SniTun connection available")
self._multiplexer.shutdown()
await self._multiplexer.wait()
await self._stop_handler()
async def _stop_handler(self) -> None:
"""Stop the handler."""
assert self._handler_task, "Handler task not started"
self._handler_task.cancel()
try:
await self._handler_task
except asyncio.CancelledError:
# Don't swallow cancellation
if (current_task := asyncio.current_task()) and current_task.cancelling():
raise
finally:
self._handler_task = None
async def _handler(self) -> None:
"""Wait until connection is closed."""
async def _wait_with_timeout(multiplexer: Multiplexer) -> None:
try:
async with asyncio_timeout.timeout(50):
await multiplexer.wait()
except TimeoutError:
await multiplexer.ping()
try:
while self._multiplexer and self._multiplexer.is_connected:
await _wait_with_timeout(self._multiplexer)
except MultiplexerTransportError:
pass
finally:
if self._multiplexer:
self._multiplexer.shutdown()
self._multiplexer = None
snitun-0.42.0/snitun/client/connector.py 0000664 0000000 0000000 00000013754 14773172534 0020306 0 ustar 00root root 0000000 0000000 """Connector to end resource."""
from __future__ import annotations
import asyncio
from collections.abc import Callable, Coroutine
from contextlib import suppress
import ipaddress
from ipaddress import IPv4Address
import logging
from typing import Any
from ..exceptions import MultiplexerTransportClose, MultiplexerTransportError
from ..multiplexer.channel import ChannelFlowControlBase, MultiplexerChannel
from ..multiplexer.core import Multiplexer
_LOGGER = logging.getLogger(__name__)
class Connector:
"""Connector to end resource."""
def __init__(
self,
end_host: str,
end_port: int | None = None,
whitelist: bool = False,
endpoint_connection_error_callback: Callable[[], Coroutine[Any, Any, None]]
| None = None,
) -> None:
"""Initialize Connector."""
self._loop = asyncio.get_event_loop()
self._end_host = end_host
self._end_port = end_port or 443
self._whitelist: set[IPv4Address] = set()
self._whitelist_enabled = whitelist
self._endpoint_connection_error_callback = endpoint_connection_error_callback
@property
def whitelist(self) -> set:
"""Allow to block requests per IP Return None or access to a set."""
return self._whitelist
def _whitelist_policy(self, ip_address: ipaddress.IPv4Address) -> bool:
"""Return True if the ip address can access to endpoint."""
if self._whitelist_enabled:
return ip_address in self._whitelist
return True
async def handler(
self,
multiplexer: Multiplexer,
channel: MultiplexerChannel,
) -> None:
"""Handle new connection from SNIProxy."""
_LOGGER.debug(
"Receive from %s a request for %s",
channel.ip_address,
self._end_host,
)
# Check policy
if not self._whitelist_policy(channel.ip_address):
_LOGGER.warning("Block request from %s per policy", channel.ip_address)
multiplexer.delete_channel(channel)
return
await ConnectorHandler(self._loop, channel).start(
multiplexer,
self._end_host,
self._end_port,
self._endpoint_connection_error_callback,
)
class ConnectorHandler(ChannelFlowControlBase):
"""Handle connection to endpoint."""
def __init__(
self,
loop: asyncio.AbstractEventLoop,
channel: MultiplexerChannel,
) -> None:
"""Initialize ConnectorHandler."""
super().__init__(loop)
self._channel = channel
async def start(
self,
multiplexer: Multiplexer,
end_host: str,
end_port: int,
endpoint_connection_error_callback: Callable[[], Coroutine[Any, Any, None]]
| None = None,
) -> None:
"""Start handler."""
channel = self._channel
channel.set_pause_resume_reader_callback(self._pause_resume_reader_callback)
# Open connection to endpoint
try:
reader, writer = await asyncio.open_connection(host=end_host, port=end_port)
except OSError:
_LOGGER.error(
"Can't connect to endpoint %s:%s",
end_host,
end_port,
)
multiplexer.delete_channel(channel)
if endpoint_connection_error_callback:
await endpoint_connection_error_callback()
return
from_endpoint: asyncio.Future[None] | asyncio.Task[bytes] | None = None
from_peer: asyncio.Task[bytes] | None = None
try:
# Process stream from multiplexer
while not writer.transport.is_closing():
if not from_endpoint:
# If the multiplexer channel queue is under water, pause the reader
# by waiting for the future to be set, once the queue is not under
# water the future will be set and cleared to resume the reader
from_endpoint = self._pause_future or self._loop.create_task(
reader.read(4096), # type: ignore[arg-type]
)
if not from_peer:
from_peer = self._loop.create_task(channel.read())
# Wait until data need to be processed
await asyncio.wait(
[from_endpoint, from_peer],
return_when=asyncio.FIRST_COMPLETED,
)
# From proxy
if from_endpoint.done():
if from_endpoint_exc := from_endpoint.exception():
raise from_endpoint_exc
if (from_endpoint_result := from_endpoint.result()) is not None:
await channel.write(from_endpoint_result)
from_endpoint = None
# From peer
if from_peer.done():
if from_peer_exc := from_peer.exception():
raise from_peer_exc
writer.write(from_peer.result())
from_peer = None
# Flush buffer
await writer.drain()
except (MultiplexerTransportError, OSError, RuntimeError):
_LOGGER.debug("Transport closed by endpoint for %s", channel.id)
multiplexer.delete_channel(channel)
except MultiplexerTransportClose:
_LOGGER.debug("Peer close connection for %s", channel.id)
finally:
# Cleanup peer reader
if from_peer:
if not from_peer.done():
from_peer.cancel()
else:
# Avoid exception was never retrieved
from_peer.exception()
# Cleanup endpoint reader
if from_endpoint and not from_endpoint.done():
from_endpoint.cancel()
# Close Transport
if not writer.transport.is_closing():
with suppress(OSError):
writer.close()
snitun-0.42.0/snitun/exceptions.py 0000664 0000000 0000000 00000001511 14773172534 0017203 0 ustar 00root root 0000000 0000000 """SniTun Exceptions."""
class SniTunError(Exception):
"""Base Exception for SniTun exceptions."""
class SniTunChallengeError(SniTunError):
"""Raise if a challenge error is occure."""
class SniTunInvalidPeer(SniTunError):
"""Raise if peer config is invalid."""
class ParseSNIError(SniTunError):
"""Invalid ClientHello data."""
class ParseSNIIncompleteError(ParseSNIError):
"""Incomplete ClientHello data."""
class MultiplexerTransportError(SniTunError):
"""Raise if multiplexer have an problem with peer."""
class MultiplexerTransportClose(SniTunError):
"""Raise if connection to peer is closed."""
class MultiplexerTransportDecrypt(SniTunError):
"""Raise if decryption of message fails."""
class SniTunConnectionError(SniTunError):
"""Raise if SniTun client can't connect to server."""
snitun-0.42.0/snitun/multiplexer/ 0000775 0000000 0000000 00000000000 14773172534 0017024 5 ustar 00root root 0000000 0000000 snitun-0.42.0/snitun/multiplexer/__init__.py 0000664 0000000 0000000 00000000036 14773172534 0021134 0 ustar 00root root 0000000 0000000 """Multiplexer for SniTun."""
snitun-0.42.0/snitun/multiplexer/channel.py 0000664 0000000 0000000 00000023270 14773172534 0021012 0 ustar 00root root 0000000 0000000 """Multiplexer channel."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from contextlib import suppress
from ipaddress import IPv4Address
import logging
import os
from ..exceptions import MultiplexerTransportClose, MultiplexerTransportError
from ..utils.asyncio import asyncio_timeout
from ..utils.ipaddress import ip_address_to_bytes
from .const import (
INCOMING_QUEUE_HIGH_WATERMARK,
INCOMING_QUEUE_LOW_WATERMARK,
INCOMING_QUEUE_MAX_BYTES_CHANNEL,
)
from .message import (
CHANNEL_FLOW_CLOSE,
CHANNEL_FLOW_DATA,
CHANNEL_FLOW_NEW,
CHANNEL_FLOW_PAUSE,
CHANNEL_FLOW_RESUME,
MIN_PROTOCOL_VERSION_FOR_PAUSE_RESUME,
MultiplexerChannelId,
MultiplexerMessage,
)
from .queue import MultiplexerMultiChannelQueue, MultiplexerSingleChannelQueue
_LOGGER = logging.getLogger(__name__)
class ChannelFlowControlBase:
"""A channel that implements flow control."""
_channel: MultiplexerChannel
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
"""Initialize a channel that implements flow control."""
self._loop = loop
self._pause_future: asyncio.Future[None] | None = None
self._debug = _LOGGER.isEnabledFor(logging.DEBUG)
def _pause_resume_reader_callback(self, pause: bool) -> None:
"""Pause and resume reader."""
channel = self._channel
ip_address = channel.ip_address
id_ = channel.id
if not pause:
if self._pause_future and not self._pause_future.done():
if self._debug:
_LOGGER.debug("Resuming reader for %s (%s)", ip_address, id_)
self._pause_future.set_result(None)
self._pause_future = None
return
raise RuntimeError(f"Reader already resumed for {ip_address} ({id_})")
if self._pause_future is None or self._pause_future.done():
if self._debug:
_LOGGER.debug("Pause reader for %s (%s)", ip_address, id_)
self._pause_future = self._loop.create_future()
return
raise RuntimeError(f"Reader already paused for {ip_address} ({id_})")
class MultiplexerChannel:
"""Represent a multiplexer channel."""
__slots__ = (
"_closing",
"_debug",
"_id",
"_input",
"_ip_address",
"_local_output_under_water",
"_output",
"_pause_resume_reader_callback",
"_peer_protocol_version",
"_reader_paused",
"_remote_input_under_water",
"_throttling",
)
def __init__(
self,
output: MultiplexerMultiChannelQueue,
ip_address: IPv4Address,
peer_protocol_version: int,
pause_resume_reader_callback: Callable[[bool], None] | None = None,
channel_id: MultiplexerChannelId | None = None,
throttling: float | None = None,
) -> None:
"""Initialize Multiplexer Channel."""
self._input = MultiplexerSingleChannelQueue(
INCOMING_QUEUE_MAX_BYTES_CHANNEL,
INCOMING_QUEUE_LOW_WATERMARK,
INCOMING_QUEUE_HIGH_WATERMARK,
self._on_local_input_under_water,
)
self._output = output
self._id = channel_id or MultiplexerChannelId(os.urandom(16))
self._ip_address = ip_address
self._peer_protocol_version = peer_protocol_version
self._throttling = throttling
self._closing = False
# Backpressure - We track when our output queue is under water
# or the remote input queue is under water so we can pause reading
# of whatever is connected to this channel to prevent overflowing
# either queue.
self._local_output_under_water = False
self._remote_input_under_water = False
self._output.create_channel(self._id, self._on_local_output_under_water)
self._pause_resume_reader_callback = pause_resume_reader_callback
self._reader_paused = False
self._debug = _LOGGER.isEnabledFor(logging.DEBUG)
def set_pause_resume_reader_callback(
self,
pause_resume_reader_callback: Callable[[bool], None],
) -> None:
"""Set pause resume reader callback."""
self._pause_resume_reader_callback = pause_resume_reader_callback
def _on_local_input_under_water(self, under_water: bool) -> None:
"""On callback from the input queue when goes under water or recovers."""
if self._peer_protocol_version < MIN_PROTOCOL_VERSION_FOR_PAUSE_RESUME:
if self._debug:
_LOGGER.debug(
"Remote does not support pause/resume, ignoring %s input water",
self._id,
)
return
msg_type = CHANNEL_FLOW_PAUSE if under_water else CHANNEL_FLOW_RESUME
# Tell the remote that our input queue is under water so it
# can pause reading from whatever is connected to this channel
if self._debug:
_LOGGER.debug(
"Informing remote that %s input is now %s water",
self._id,
"under" if under_water else "above",
)
try:
self._output.put_nowait(self._id, MultiplexerMessage(self._id, msg_type))
except asyncio.QueueFull:
_LOGGER.warning(
"%s: Cannot send pause/resume message to peer, output queue is full",
self._id,
)
def _on_local_output_under_water(self, under_water: bool) -> None:
"""On callback from the output queue when goes under water or recovers."""
if self._debug:
_LOGGER.debug(
"Local output is under water: %s for %s",
under_water,
self._id,
)
self._local_output_under_water = under_water
self._pause_or_resume_reader()
def on_remote_input_under_water(self, under_water: bool) -> None:
"""Call when remote input is under water."""
if self._debug:
_LOGGER.debug(
"Remote input is under water: %s for %s",
under_water,
self._id,
)
self._remote_input_under_water = under_water
self._pause_or_resume_reader()
def _pause_or_resume_reader(self) -> None:
"""Pause or resume reader."""
# Pause if either local output or remote input is under water
# Resume if both local output and remote input are not under water
if self._pause_resume_reader_callback is None:
return
pause_reader = self._local_output_under_water or self._remote_input_under_water
if self._reader_paused != pause_reader:
self._reader_paused = pause_reader
self._pause_resume_reader_callback(pause_reader)
@property
def id(self) -> MultiplexerChannelId:
"""Return ID of this channel."""
return self._id
@property
def ip_address(self) -> IPv4Address:
"""Return caller IP4Address."""
return self._ip_address
@property
def unhealthy(self) -> bool:
"""Return True if an error has occurred."""
return self._input.full()
@property
def closing(self) -> bool:
"""Return True if channel is in closing state."""
return self._closing
def close(self) -> None:
"""Close channel on next run."""
_LOGGER.debug("Schedule close channel %s", self._id)
self._closing = True
with suppress(asyncio.QueueFull):
self._input.put_nowait(None)
async def write(self, data: bytes) -> None:
"""Send data to peer."""
if not data:
raise MultiplexerTransportError
if self._closing:
raise MultiplexerTransportClose
# Create message
message = tuple.__new__(
MultiplexerMessage,
(self._id, CHANNEL_FLOW_DATA, data, b""),
)
try:
# Try to avoid the timer handle if we can
# add to the queue without waiting
self._output.put_nowait(self._id, message)
except asyncio.QueueFull:
try:
async with asyncio_timeout.timeout(5):
await self._output.put(self._id, message)
except TimeoutError:
if self._debug:
_LOGGER.debug("Can't write to peer transport")
raise MultiplexerTransportError from None
if self._throttling is not None:
await asyncio.sleep(self._throttling)
async def read(self) -> bytes:
"""Read data from peer."""
if self._closing and self._input.empty():
message = None
else:
message = await self._input.get()
# Send data
if message is not None:
return message.data
_LOGGER.debug("Read a close message for channel %s", self._id)
raise MultiplexerTransportClose
def init_close(self) -> MultiplexerMessage:
"""Init close message for transport."""
if self._debug:
_LOGGER.debug("Sending close channel %s", self._id)
return MultiplexerMessage(self._id, CHANNEL_FLOW_CLOSE)
def init_new(self) -> MultiplexerMessage:
"""Init new session for transport."""
if self._debug:
_LOGGER.debug("Sending new channel %s", self._id)
extra = b"4" + ip_address_to_bytes(self.ip_address)
return MultiplexerMessage(self._id, CHANNEL_FLOW_NEW, b"", extra)
def message_transport(self, message: MultiplexerMessage) -> None:
"""Only for internal usage of core transport."""
if self._closing:
return
try:
self._input.put_nowait(message)
except asyncio.QueueFull:
_LOGGER.warning("Channel %s input is full", self._id)
snitun-0.42.0/snitun/multiplexer/const.py 0000664 0000000 0000000 00000003125 14773172534 0020525 0 ustar 00root root 0000000 0000000 """This file contains the constants used by the multiplexer."""
import os
# When downloading a file, the message size will be
# ~4199990 bytes which is the protocol maximum. Make
# sure we have enough space to store 16 messages
# in the incoming queue before we drop the connection.
DEFAULT_INCOMING_QUEUE_MAX_BYTES_CHANNEL = 1024 * 1024 * 65
DEFAULT_INCOMING_QUEUE_LOW_WATERMARK = 1024 * 512
DEFAULT_INCOMING_QUEUE_HIGH_WATERMARK = 1024 * 1024 * 2
DEFAULT_OUTGOING_QUEUE_MAX_BYTES_CHANNEL = 1024 * 1024 * 12
DEFAULT_OUTGOING_QUEUE_LOW_WATERMARK = 1024 * 512
DEFAULT_OUTGOING_QUEUE_HIGH_WATERMARK = 1024 * 1024 * 1
INCOMING_QUEUE_MAX_BYTES_CHANNEL = int(
os.getenv(
"MULTIPLEXER_INCOMING_QUEUE_MAX_BYTES_CHANNEL",
DEFAULT_INCOMING_QUEUE_MAX_BYTES_CHANNEL,
),
)
INCOMING_QUEUE_LOW_WATERMARK = int(
os.getenv(
"MULTIPLEXER_INCOMING_QUEUE_LOW_WATERMARK",
DEFAULT_INCOMING_QUEUE_LOW_WATERMARK,
),
)
INCOMING_QUEUE_HIGH_WATERMARK = int(
os.getenv(
"MULTIPLEXER_INCOMING_QUEUE_HIGH_WATERMARK",
DEFAULT_INCOMING_QUEUE_HIGH_WATERMARK,
),
)
OUTGOING_QUEUE_MAX_BYTES_CHANNEL = int(
os.getenv(
"MULTIPLEXER_OUTGOING_QUEUE_MAX_BYTES_CHANNEL",
DEFAULT_OUTGOING_QUEUE_MAX_BYTES_CHANNEL,
),
)
OUTGOING_QUEUE_LOW_WATERMARK = int(
os.getenv(
"MULTIPLEXER_OUTGOING_QUEUE_LOW_WATERMARK",
DEFAULT_OUTGOING_QUEUE_LOW_WATERMARK,
),
)
OUTGOING_QUEUE_HIGH_WATERMARK = int(
os.getenv(
"MULTIPLEXER_OUTGOING_QUEUE_HIGH_WATERMARK",
DEFAULT_OUTGOING_QUEUE_HIGH_WATERMARK,
),
)
PEER_TCP_TIMEOUT = 90
snitun-0.42.0/snitun/multiplexer/core.py 0000664 0000000 0000000 00000032351 14773172534 0020332 0 ustar 00root root 0000000 0000000 """Multiplexer for SniTun."""
from __future__ import annotations
import asyncio
from collections.abc import Callable, Coroutine
from contextlib import suppress
import ipaddress
import logging
import os
import struct
from typing import Any
from ..exceptions import (
MultiplexerTransportClose,
MultiplexerTransportDecrypt,
MultiplexerTransportError,
)
from ..utils.asyncio import asyncio_timeout
from ..utils.ipaddress import bytes_to_ip_address
from .channel import MultiplexerChannel
from .const import (
OUTGOING_QUEUE_HIGH_WATERMARK,
OUTGOING_QUEUE_LOW_WATERMARK,
OUTGOING_QUEUE_MAX_BYTES_CHANNEL,
PEER_TCP_TIMEOUT,
)
from .crypto import CryptoTransport
from .message import (
CHANNEL_FLOW_CLOSE,
CHANNEL_FLOW_DATA,
CHANNEL_FLOW_NEW,
CHANNEL_FLOW_PAUSE,
CHANNEL_FLOW_PING,
CHANNEL_FLOW_RESUME,
HEADER_SIZE,
HEADER_STRUCT,
MultiplexerChannelId,
MultiplexerMessage,
)
from .queue import MultiplexerMultiChannelQueue
_LOGGER = logging.getLogger(__name__)
class Multiplexer:
"""Multiplexer Socket wrapper."""
__slots__ = [
"_channel_tasks",
"_channels",
"_crypto",
"_healthy",
"_loop",
"_new_connections",
"_peer_protocol_version",
"_processing_task",
"_queue",
"_reader",
"_throttling",
"_writer",
]
def __init__(
self,
crypto: CryptoTransport,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
peer_protocol_version: int,
new_connections: Callable[
[Multiplexer, MultiplexerChannel],
Coroutine[Any, Any, None],
]
| None = None,
throttling: int | None = None,
) -> None:
"""Initialize Multiplexer."""
self._crypto = crypto
self._reader = reader
self._writer = writer
self._peer_protocol_version = peer_protocol_version
self._loop = asyncio.get_event_loop()
self._queue = MultiplexerMultiChannelQueue(
OUTGOING_QUEUE_MAX_BYTES_CHANNEL,
OUTGOING_QUEUE_LOW_WATERMARK,
OUTGOING_QUEUE_HIGH_WATERMARK,
)
self._healthy = asyncio.Event()
self._channel_tasks: set[asyncio.Task[None]] = set()
self._processing_task = self._loop.create_task(self._runner())
self._channels: dict[MultiplexerChannelId, MultiplexerChannel] = {}
self._new_connections = new_connections
self._throttling = 1 / throttling if throttling else None
@property
def is_connected(self) -> bool:
"""Return True is they is connected."""
return not self._processing_task.done()
def wait(self) -> asyncio.Future[None]:
"""Block until the connection is closed.
Return a awaitable object.
"""
return asyncio.shield(self._processing_task)
def shutdown(self) -> None:
"""Shutdown connection."""
if self._processing_task.done():
return
_LOGGER.debug("Cancel connection")
self._processing_task.cancel()
self._graceful_channel_shutdown()
def _graceful_channel_shutdown(self) -> None:
"""Graceful shutdown of channels."""
for channel in self._channels.values():
self._queue.delete_channel(channel.id)
channel.close()
self._channels.clear()
async def ping(self) -> None:
"""Send a ping flow message to hold the connection open."""
self._healthy.clear()
channel_id = MultiplexerChannelId(os.urandom(16))
try:
self._write_message(
MultiplexerMessage(channel_id, CHANNEL_FLOW_PING, b"", b"ping"),
)
# Wait until pong is received
async with asyncio_timeout.timeout(PEER_TCP_TIMEOUT):
await self._healthy.wait()
except TimeoutError:
_LOGGER.error("Timeout error while pinging peer")
self._loop.call_soon(self.shutdown)
raise MultiplexerTransportError from None
except OSError as exception:
_LOGGER.error("Peer ping failed - %s", exception)
self._loop.call_soon(self.shutdown)
raise MultiplexerTransportError from None
async def _runner(self) -> None:
"""Runner task of processing stream."""
transport = self._writer.transport
from_peer = None
to_peer = None
# Process stream
self._healthy.set()
try:
while not transport.is_closing():
if not from_peer:
from_peer = self._loop.create_task(
self._reader.readexactly(HEADER_SIZE),
)
if not to_peer:
to_peer = self._loop.create_task(self._queue.get())
# Wait until data need to be processed
async with asyncio_timeout.timeout(PEER_TCP_TIMEOUT):
await asyncio.wait(
[from_peer, to_peer],
return_when=asyncio.FIRST_COMPLETED,
)
# From peer
if from_peer.done():
if from_peer_exc := from_peer.exception():
raise from_peer_exc
await self._read_message(from_peer.result())
from_peer = None
# To peer
if to_peer.done():
if to_peer_exc := to_peer.exception():
raise to_peer_exc
if msg := to_peer.result():
self._write_message(msg)
to_peer = None
# Flush buffer
await self._writer.drain()
# throttling
if not self._throttling:
continue
await asyncio.sleep(self._throttling)
except (asyncio.CancelledError, TimeoutError):
_LOGGER.debug("Receive canceling")
with suppress(OSError):
self._writer.write_eof()
await self._writer.drain()
except (
MultiplexerTransportClose,
asyncio.IncompleteReadError,
ConnectionResetError,
OSError,
):
_LOGGER.debug("Transport was closed")
finally:
# Cleanup peer writer
if to_peer and not to_peer.done():
to_peer.cancel()
# Cleanup peer reader
if from_peer:
if not from_peer.done():
from_peer.cancel()
else:
# Avoid exception was never retrieved
from_peer.exception()
# Cleanup transport
if not transport.is_closing():
with suppress(OSError):
self._writer.close()
self._graceful_channel_shutdown()
_LOGGER.debug("Multiplexer connection is closed")
def _write_message(self, message: MultiplexerMessage) -> None:
"""Write message to peer."""
id_, flow_type, data, extra = message
data_len = len(data)
header = HEADER_STRUCT.pack(
id_.bytes,
flow_type,
data_len,
extra + os.urandom(11 - len(extra)),
)
try:
encrypted_header = self._crypto.encrypt(header)
self._writer.write(
encrypted_header + data if data_len else encrypted_header,
)
except RuntimeError:
raise MultiplexerTransportClose from None
async def _read_message(self, header: bytes) -> None:
"""Read message from peer."""
if not header:
raise MultiplexerTransportClose
channel_id: bytes
flow_type: int
data_size: int
extra: bytes
try:
channel_id, flow_type, data_size, extra = HEADER_STRUCT.unpack(
self._crypto.decrypt(header),
)
except (struct.error, MultiplexerTransportDecrypt):
_LOGGER.warning("Wrong message header received")
return
# Read message data
if data_size:
data = await self._reader.readexactly(data_size)
else:
data = b""
message = tuple.__new__(
MultiplexerMessage,
(MultiplexerChannelId(channel_id), flow_type, data, extra),
)
# Process message to queue
await self._process_message(message)
async def _process_message(self, message: MultiplexerMessage) -> None:
"""Process received message."""
# DATA
flow_type = message.flow_type
if flow_type == CHANNEL_FLOW_DATA:
# check if message exists
if message.id not in self._channels:
_LOGGER.debug("Receive data from unknown channel: %s", message.id)
return
channel = self._channels[message.id]
if channel.closing:
pass
elif channel.unhealthy:
_LOGGER.warning(
"Abort connection, channel %s is not healthy",
channel.id,
)
channel.close()
self.delete_channel(channel)
else:
channel.message_transport(message)
# New
elif flow_type == CHANNEL_FLOW_NEW:
# Check if we would handle new connection
if not self._new_connections:
_LOGGER.warning("Request new Channel is not allow")
return
ip_address = bytes_to_ip_address(message.extra[1:5])
channel = MultiplexerChannel(
self._queue,
ip_address,
self._peer_protocol_version,
channel_id=message.id,
throttling=self._throttling,
)
self._channels[channel.id] = channel
self._create_channel_task(self._new_connections(self, channel))
# Close
elif flow_type == CHANNEL_FLOW_CLOSE:
# check if message exists
if channel_ := self._delete_channel_and_queue(message.id):
channel_.close()
else:
_LOGGER.debug("Receive close from unknown channel: %s", message.id)
# Ping
elif flow_type == CHANNEL_FLOW_PING:
if message.extra.startswith(b"pong"):
_LOGGER.debug("Receive pong from peer / reset healthy")
self._healthy.set()
else:
_LOGGER.debug("Receive ping from peer / send pong")
self._write_message(
MultiplexerMessage(message.id, CHANNEL_FLOW_PING, b"", b"pong"),
)
# Pause or Resume
elif flow_type in (CHANNEL_FLOW_PAUSE, CHANNEL_FLOW_RESUME):
# When the remote input is under water state changes
# call the on_remote_input_under_water method
if channel_ := self._channels.get(message.id):
channel_.on_remote_input_under_water(
message.flow_type == CHANNEL_FLOW_PAUSE,
)
else:
_LOGGER.debug(
"Receive %s from unknown channel: %s",
"pause" if flow_type == CHANNEL_FLOW_PAUSE else "resume",
message.id,
)
else:
_LOGGER.warning(
"Receive unknown message type: %s for channel %s",
message.flow_type,
message.id,
)
def _create_channel_task(self, coro: Coroutine[Any, Any, None]) -> None:
"""Create a new task for channel."""
task = self._loop.create_task(coro)
self._channel_tasks.add(task)
task.add_done_callback(self._channel_tasks.remove)
async def create_channel(
self,
ip_address: ipaddress.IPv4Address,
pause_resume_reader_callback: Callable[[bool], None],
) -> MultiplexerChannel:
"""Create a new channel for transport."""
channel = MultiplexerChannel(
self._queue,
ip_address,
self._peer_protocol_version,
pause_resume_reader_callback,
throttling=self._throttling,
)
message = channel.init_new()
try:
async with asyncio_timeout.timeout(5):
await self._queue.put(channel.id, message)
except TimeoutError:
raise MultiplexerTransportError from None
self._channels[channel.id] = channel
return channel
def delete_channel(self, channel: MultiplexerChannel) -> None:
"""Delete channel from transport."""
if channel.id not in self._channels:
# Make sure the queue is cleaned up if the channel
# is already deleted
self._queue.delete_channel(channel.id)
return
message = channel.init_close()
try:
self._queue.put_nowait_force(channel.id, message)
finally:
self._delete_channel_and_queue(channel.id)
def _delete_channel_and_queue(
self,
channel_id: MultiplexerChannelId,
) -> MultiplexerChannel | None:
"""Delete channel and queue from multiplexer if it exists."""
self._queue.delete_channel(channel_id)
return self._channels.pop(channel_id, None)
snitun-0.42.0/snitun/multiplexer/crypto.py 0000664 0000000 0000000 00000002133 14773172534 0020715 0 ustar 00root root 0000000 0000000 """Encrypt or Decrypt multiplexer transport data."""
from cryptography.exceptions import InvalidTag
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from ..exceptions import MultiplexerTransportDecrypt
class CryptoTransport:
"""Encrypt/Decrypt Transport flow."""
__slots__ = ["_cipher", "_decryptor", "_encryptor"]
def __init__(self, key: bytes, iv: bytes) -> None:
"""Initialize crypto data."""
self._cipher = Cipher(
algorithms.AES(key),
modes.CBC(iv),
backend=default_backend(),
)
self._encryptor = self._cipher.encryptor()
self._decryptor = self._cipher.decryptor()
def encrypt(self, data: bytes) -> bytes:
"""Encrypt data from transport."""
return self._encryptor.update(data)
def decrypt(self, data: bytes) -> bytes:
"""Decrypt data from transport."""
try:
return self._decryptor.update(data)
except InvalidTag:
raise MultiplexerTransportDecrypt from None
snitun-0.42.0/snitun/multiplexer/message.py 0000664 0000000 0000000 00000005060 14773172534 0021023 0 ustar 00root root 0000000 0000000 """Multiplexer message handling."""
from enum import IntEnum
from functools import cached_property, lru_cache
import struct
from typing import NamedTuple
MIN_PROTOCOL_VERSION_FOR_PAUSE_RESUME = 1
class FlowType(IntEnum):
"""Flow type for multiplexer message.
Note that only one byte is available for the flow type.
"""
NEW = 0x01 # protocol_version 0
DATA = 0x02 # protocol_version 0
CLOSE = 0x04 # protocol_version 0
PING = 0x08 # protocol_version 0
PAUSE = 0x16 # protocol_version 1
RESUME = 0x32 # protocol_version 1
@cached_property
def value(self) -> int:
"""Return the value of the flow type."""
return self._value_
CHANNEL_FLOW_NEW = FlowType.NEW.value
CHANNEL_FLOW_DATA = FlowType.DATA.value
CHANNEL_FLOW_CLOSE = FlowType.CLOSE.value
CHANNEL_FLOW_PING = FlowType.PING.value
CHANNEL_FLOW_PAUSE = FlowType.PAUSE.value
CHANNEL_FLOW_RESUME = FlowType.RESUME.value
# |-----------------HEADER---------------------------------|
# |------ID-----|--FLAG--|--SIZE--|---------EXTRA ---------|
# | 16 bytes | 1 byte | 4 bytes| 11 bytes |
# |--------------------------------------------------------|
# >: All bytes are big-endian and unsigned
# 16s: 16 bytes: Channel ID - random
# B: 1 byte: Flow type - 1: NEW, 2: DATA, 4: CLOSE, 8: PING, 16: PAUSE, 32: RESUME
# I: 4 bytes: Data size - 0-4294967295
# 11s: 11 bytes: Extra - data + random padding
HEADER_STRUCT = struct.Struct(">16sBI11s")
HEADER_SIZE = HEADER_STRUCT.size
class MultiplexerChannelId(bytes):
"""Represent a channel ID aka multiplexer stream."""
@cached_property
def bytes(self) -> "bytes":
"""Return bytes representation of the channel ID."""
return self
def __str__(self) -> str:
"""Return string representation for logger."""
return self.hex()
@lru_cache
def try_parse_flow_type(flow_type: int) -> FlowType | int:
"""Try to parse flow type."""
try:
return FlowType(flow_type)
except ValueError:
return flow_type
class MultiplexerMessage(NamedTuple):
"""Represent a message from multiplexer stream."""
id: MultiplexerChannelId
flow_type: FlowType | int
data: bytes = b""
extra: bytes = b""
def __repr__(self) -> str:
"""Return string representation for logger."""
return (
"MultiplexerMessage("
f"id={self.id.hex()}, "
f"flow_type={try_parse_flow_type(self.flow_type)!r}, "
f"data={self.data!r}, "
f"extra={self.extra!r}"
")"
)
snitun-0.42.0/snitun/multiplexer/queue.py 0000664 0000000 0000000 00000025730 14773172534 0020531 0 ustar 00root root 0000000 0000000 """Multiplexer message queues."""
from __future__ import annotations
import asyncio
from collections import OrderedDict, deque
from collections.abc import Callable
import contextlib
from dataclasses import dataclass, field
import logging
from .message import HEADER_SIZE, MultiplexerChannelId, MultiplexerMessage
_LOGGER = logging.getLogger(__name__)
@dataclass(slots=True)
class _ChannelQueue:
"""Channel queue.
A queue that manages a single channel, with a size limit.
total_bytes: the size of the queue in bytes instead of the number of items.
queue: a deque of MultiplexerMessage | None.
putters: a deque of asyncio.Future[None] which is used to wake up putters
when the queue is full and space becomes available.
"""
under_water_callback: Callable[[bool], None]
total_bytes: int = 0
under_water: bool = False
pending_close: bool = False
queue: deque[MultiplexerMessage | None] = field(default_factory=deque)
putters: deque[asyncio.Future[None]] = field(default_factory=deque)
def _effective_size(message: MultiplexerMessage | None) -> int:
"""Return the effective size of the message."""
return 0 if message is None else HEADER_SIZE + len(message.data)
class MultiplexerSingleChannelQueue(asyncio.Queue[MultiplexerMessage | None]):
"""Multiplexer single channel queue.
qsize is the size of the queue in bytes instead of the number of items.
Note that the queue is allowed to go over by one message
because we are subclassing asyncio.Queue and it is not
possible to prevent this without reimplementing the whole
class, which is not worth it since its ok if we go over by
one message.
"""
_total_bytes: int = 0
def __init__(
self,
maxsize: int,
low_water_mark: int,
high_water_mark: int,
under_water_callback: Callable[[bool], None],
) -> None:
"""Initialize Multiplexer Queue."""
self._low_water_mark = low_water_mark
self._high_water_mark = high_water_mark
self._under_water_callback = under_water_callback
self._under_water: bool = False
super().__init__(maxsize)
def _put(self, message: MultiplexerMessage | None) -> None:
"""Put a message in the queue."""
self._total_bytes += _effective_size(message)
super()._put(message)
if not self._under_water and self._total_bytes >= self._high_water_mark:
self._under_water = True
self._under_water_callback(True)
def _get(self) -> MultiplexerMessage | None:
"""Get a message from the queue."""
message = super()._get()
self._total_bytes -= _effective_size(message)
if self._under_water and self._total_bytes <= self._low_water_mark:
self._under_water = False
self._under_water_callback(False)
return message
def qsize(self) -> int:
"""Size of the queue in bytes."""
return self._total_bytes
class MultiplexerMultiChannelQueue:
"""Multiplexer multi channel queue.
A queue that manages multiple channels, each with a size limit.
This class allows for asynchronous message passing between multiple channels,
ensuring that each channel does not exceed a specified size limit.
When fetching from the queue, the channels are fetched in a round-robin
fashion, ensuring that no channel is starved.
"""
def __init__(
self,
channel_size_limit: int,
channel_low_water_mark: int,
channel_high_water_mark: int,
) -> None:
"""Initialize Multiplexer Queue.
Args:
channel_size_limit (int): The maximum size of a channel
data queue in bytes.
"""
self._channel_size_limit = channel_size_limit
self._channel_low_water_mark = channel_low_water_mark
self._channel_high_water_mark = channel_high_water_mark
self._channels: dict[MultiplexerChannelId, _ChannelQueue] = {}
# _order controls which channel_id to get next. We use
# an OrderedDict because we need to use popitem(last=False)
# here to maintain FIFO order.
self._order: OrderedDict[MultiplexerChannelId, None] = OrderedDict()
self._getters: deque[asyncio.Future[None]] = deque()
self._loop = asyncio.get_running_loop()
def create_channel(
self,
channel_id: MultiplexerChannelId,
under_water_callback: Callable[[bool], None],
) -> None:
"""Create a new channel."""
_LOGGER.debug("Queue creating channel %s", channel_id)
if channel_id in self._channels:
raise RuntimeError(f"Channel {channel_id} already exists")
self._channels[channel_id] = _ChannelQueue(under_water_callback)
def delete_channel(self, channel_id: MultiplexerChannelId) -> None:
"""Delete a channel."""
if channel := self._channels.get(channel_id):
if channel.queue:
channel.pending_close = True
else:
del self._channels[channel_id]
def _wakeup_next(self, waiters: deque[asyncio.Future[None]]) -> None:
"""Wake up the next waiter."""
while waiters:
waiter = waiters.popleft()
if not waiter.done():
waiter.set_result(None)
break
async def put(
self,
channel_id: MultiplexerChannelId,
message: MultiplexerMessage | None,
) -> None:
"""Put a message in the queue."""
# Based on asyncio.Queue.put()
if not (channel := self._channels.get(channel_id)):
raise RuntimeError(f"Channel {channel_id} does not exist or already closed")
size = _effective_size(message)
while channel.total_bytes + size > self._channel_size_limit: # full
putter = self._loop.create_future()
channel.putters.append(putter)
try:
await putter
except:
putter.cancel() # Just in case putter is not done yet.
with contextlib.suppress(ValueError):
# Clean self._putters from canceled putters.
channel.putters.remove(putter)
if not self.full(channel_id) and not putter.cancelled():
# We were woken up by get_nowait(), but can't take
# the call. Wake up the next in line.
self._wakeup_next(channel.putters)
raise
self._put(channel_id, channel, message, size)
def put_nowait(
self,
channel_id: MultiplexerChannelId,
message: MultiplexerMessage | None,
) -> None:
"""Put a message in the queue.
Raises:
asyncio.QueueFull: If the queue is full.
"""
size = _effective_size(message)
if not (channel := self._channels.get(channel_id)):
raise RuntimeError(f"Channel {channel_id} does not exist or already closed")
if channel.total_bytes + size > self._channel_size_limit:
raise asyncio.QueueFull
self._put(channel_id, channel, message, size)
def put_nowait_force(
self,
channel_id: MultiplexerChannelId,
message: MultiplexerMessage | None,
) -> None:
"""Put a message in the queue.
This method is used to force a message into the queue without
checking if the queue is full. This is used when a channel is
being closed.
"""
if not (channel := self._channels.get(channel_id)):
raise RuntimeError(f"Channel {channel_id} does not exist or already closed")
self._put(channel_id, channel, message, _effective_size(message))
def _put(
self,
channel_id: MultiplexerChannelId,
channel: _ChannelQueue,
message: MultiplexerMessage | None,
size: int,
) -> None:
"""Put a message in the queue."""
channel.queue.append(message)
channel.total_bytes += size
self._order[channel_id] = None
if (
not channel.under_water
and channel.total_bytes >= self._channel_high_water_mark
):
channel.under_water = True
channel.under_water_callback(True)
self._wakeup_next(self._getters)
async def get(self) -> MultiplexerMessage | None:
"""Asynchronously retrieve the next `MultiplexerMessage` from the queue."""
# Based on asyncio.Queue.get()
while not self._order: # order is which channel_id to get next
getter = self._loop.create_future()
self._getters.append(getter)
try:
await getter
except:
getter.cancel() # Just in case getter is not done yet.
with contextlib.suppress(ValueError):
# Clean self._getters from canceled getters.
self._getters.remove(getter)
# order is which channel_id to get next
if self._order and not getter.cancelled():
# We were woken up by put_nowait(), but can't take
# the call. Wake up the next in line.
self._wakeup_next(self._getters)
raise
return self.get_nowait()
def get_nowait(self) -> MultiplexerMessage | None:
"""Get a message from the queue.
Raises:
asyncio.QueueEmpty: If the queue is empty.
"""
if not self._order:
raise asyncio.QueueEmpty
channel_id, _ = self._order.popitem(last=False)
channel = self._channels[channel_id]
message = channel.queue.popleft()
size = _effective_size(message)
channel.total_bytes -= size
if channel.queue:
# Now put the channel_id back, but at the end of the queue
# so the next get will get the next waiting channel_id.
self._order[channel_id] = None
elif channel.pending_close:
# Got to the end of the queue and the channel wants
# to close so we now drop the channel.
del self._channels[channel_id]
if channel.under_water and channel.total_bytes <= self._channel_low_water_mark:
channel.under_water = False
channel.under_water_callback(False)
if channel.putters:
self._wakeup_next(channel.putters)
return message
def empty(self, channel_id: MultiplexerChannelId) -> bool:
"""Empty the queue."""
if not (channel := self._channels.get(channel_id)):
return True
return channel.total_bytes == 0
def size(self, channel_id: MultiplexerChannelId) -> int:
"""Return the size of the channel queue in bytes."""
if not (channel := self._channels.get(channel_id)):
return 0
return channel.total_bytes
def full(self, channel_id: MultiplexerChannelId) -> bool:
"""Return True if the channel queue is full."""
if not (channel := self._channels.get(channel_id)):
return False
return channel.total_bytes >= self._channel_size_limit
snitun-0.42.0/snitun/server/ 0000775 0000000 0000000 00000000000 14773172534 0015760 5 ustar 00root root 0000000 0000000 snitun-0.42.0/snitun/server/__init__.py 0000664 0000000 0000000 00000000035 14773172534 0020067 0 ustar 00root root 0000000 0000000 """SniTun server library."""
snitun-0.42.0/snitun/server/listener_peer.py 0000664 0000000 0000000 00000005463 14773172534 0021202 0 ustar 00root root 0000000 0000000 """Public peer interface."""
from __future__ import annotations
import asyncio
from contextlib import suppress
import logging
from ..exceptions import SniTunChallengeError, SniTunInvalidPeer
from ..utils.asyncio import asyncio_timeout
from .peer_manager import PeerManager
_LOGGER = logging.getLogger(__name__)
CHECK_VALID_EXPIRE = 14400
class PeerListener:
"""Peer Listener class."""
def __init__(
self,
peer_manager: PeerManager,
host: str | None = None,
port: int | None = None,
) -> None:
"""Initialize SNI Proxy interface."""
self._peer_manager = peer_manager
self._host = host
self._port = port or 8080
self._server: asyncio.Server | None = None
async def start(self) -> None:
"""Start peer server."""
self._server = await asyncio.start_server(
self.handle_connection,
host=self._host,
port=self._port,
)
async def stop(self) -> None:
"""Stop peer server."""
assert self._server is not None, "Server not started"
self._server.close()
await self._server.wait_closed()
async def handle_connection(
self,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
data: bytes | None = None,
) -> None:
"""Handle incoming requests."""
if not data:
try:
async with asyncio_timeout.timeout(2):
fernet_data = await reader.read(2048)
except TimeoutError:
_LOGGER.warning("Abort peer handshake")
writer.close()
return
except OSError:
return
else:
fernet_data = data
peer = None
try:
# Connection closed before data received
if not fernet_data:
return
peer = self._peer_manager.create_peer(fernet_data)
# Start multiplexer
await peer.init_multiplexer_challenge(reader, writer)
self._peer_manager.add_peer(peer)
while peer.is_connected:
try:
async with asyncio_timeout.timeout(CHECK_VALID_EXPIRE):
await peer.wait_disconnect()
except TimeoutError:
if not peer.is_valid:
break
except SniTunInvalidPeer:
_LOGGER.debug("Close because invalid fernet data")
except SniTunChallengeError:
_LOGGER.debug("Close because challenge was wrong")
finally:
if peer:
self._peer_manager.remove_peer(peer)
# Cleanup transport
if not writer.transport.is_closing():
with suppress(OSError):
writer.close()
snitun-0.42.0/snitun/server/listener_sni.py 0000664 0000000 0000000 00000017227 14773172534 0021041 0 ustar 00root root 0000000 0000000 """Public proxy interface with SNI."""
from __future__ import annotations
import asyncio
from contextlib import suppress
import ipaddress
import logging
from ..exceptions import (
MultiplexerTransportClose,
MultiplexerTransportError,
ParseSNIError,
)
from ..multiplexer.channel import ChannelFlowControlBase
from ..multiplexer.core import Multiplexer
from ..utils.asyncio import asyncio_timeout
from .peer_manager import PeerManager
from .sni import parse_tls_sni, payload_reader
_LOGGER = logging.getLogger(__name__)
TCP_SESSION_TIMEOUT = 60
class SNIProxy:
"""SNI Proxy class."""
def __init__(
self,
peer_manager: PeerManager,
host: str | None = None,
port: int | None = None,
) -> None:
"""Initialize SNI Proxy interface."""
self._peer_manager = peer_manager
self._loop = asyncio.get_event_loop()
self._host = host
self._port = port or 443
self._server: asyncio.Server | None = None
async def start(self) -> None:
"""Start Proxy server."""
self._server = await asyncio.start_server(
self.handle_connection,
host=self._host,
port=self._port,
)
async def stop(self) -> None:
"""Stop proxy server."""
assert self._server is not None, "Server not started"
self._server.close()
await self._server.wait_closed()
async def handle_connection(
self,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
data: bytes | None = None,
sni: str | None = None,
) -> None:
"""Handle incoming requests."""
if data is None:
try:
async with asyncio_timeout.timeout(2):
client_hello = await payload_reader(reader)
except TimeoutError:
_LOGGER.warning("Abort SNI handshake")
writer.close()
return
except OSError:
return
else:
client_hello = data
# Connection closed before data received
if not client_hello:
with suppress(OSError):
writer.close()
return
try:
# Read Hostname
if sni is None:
try:
hostname = parse_tls_sni(client_hello)
except ParseSNIError:
_LOGGER.warning("Receive invalid ClientHello on public Interface")
return
else:
hostname = sni
# Peer available?
if not self._peer_manager.peer_available(hostname):
_LOGGER.debug("Hostname %s not connected", hostname)
return
peer = self._peer_manager.get_peer(hostname)
assert peer is not None, "Peer not found"
# Proxy data over mutliplexer to client
_LOGGER.debug("Processing for hostname %s started", hostname)
assert peer.multiplexer is not None, "Multiplexer not initialized"
await self._proxy_peer(peer.multiplexer, client_hello, reader, writer)
finally:
if not writer.transport.is_closing():
with suppress(OSError):
writer.close()
async def _proxy_peer(
self,
multiplexer: Multiplexer,
client_hello: bytes,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
) -> None:
"""Proxy data between end points."""
try:
ip_address = ipaddress.IPv4Address(writer.get_extra_info("peername")[0])
except (TypeError, AttributeError):
_LOGGER.error("Can't read source IP")
return
handler = ProxyPeerHandler(self._loop, ip_address)
await handler.start(multiplexer, client_hello, reader, writer)
class ProxyPeerHandler(ChannelFlowControlBase):
"""Proxy Peer Handler."""
def __init__(
self,
loop: asyncio.AbstractEventLoop,
ip_address: ipaddress.IPv4Address,
) -> None:
"""Initialize ProxyPeerHandler."""
super().__init__(loop)
self._ip_address = ip_address
async def start(
self,
multiplexer: Multiplexer,
client_hello: bytes,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
) -> None:
"""Start handler."""
ip_address = self._ip_address
transport = writer.transport
from_proxy: asyncio.Future[None] | asyncio.Task[bytes] | None = None
from_peer = None
# Open multiplexer channel
try:
channel = self._channel = await multiplexer.create_channel(
ip_address,
self._pause_resume_reader_callback,
)
except MultiplexerTransportError:
_LOGGER.error("New transport channel to peer fails")
return
try:
await channel.write(client_hello)
# Process stream into multiplexer
while not transport.is_closing():
if not from_proxy:
# If the multiplexer channel queue is under water, pause the reader
# by waiting for the future to be set, once the queue is not under
# water the future will be set and cleared to resume the reader
from_proxy = self._pause_future or self._loop.create_task(
reader.read(4096), # type: ignore[arg-type]
)
if not from_peer:
from_peer = self._loop.create_task(channel.read())
# Wait until data need to be processed
async with asyncio_timeout.timeout(TCP_SESSION_TIMEOUT):
await asyncio.wait(
[from_proxy, from_peer],
return_when=asyncio.FIRST_COMPLETED,
)
# From proxy
if from_proxy.done():
if from_proxy_exc := from_proxy.exception():
raise from_proxy_exc
if (from_proxy_result := from_proxy.result()) is not None:
await channel.write(from_proxy_result)
from_proxy = None
# From peer
if from_peer.done():
if from_peer_exc := from_peer.exception():
raise from_peer_exc
writer.write(from_peer.result())
from_peer = None
# Flush buffer
await writer.drain()
except TimeoutError:
_LOGGER.debug("Close TCP session after timeout for %s", channel.id)
multiplexer.delete_channel(channel)
except OSError as exc:
_LOGGER.debug(
"Transport closed by Proxy for %s: %s",
channel.id,
exc,
exc_info=True,
)
multiplexer.delete_channel(channel)
except (MultiplexerTransportError, RuntimeError, ConnectionResetError) as exc:
_LOGGER.debug("Transport closed by Proxy for %s: %s", channel.id, exc)
multiplexer.delete_channel(channel)
except MultiplexerTransportClose:
_LOGGER.debug("Peer close connection for %s", channel.id)
finally:
# Cleanup peer reader
if from_peer:
if not from_peer.done():
from_peer.cancel()
else:
# Avoid exception was never retrieved
from_peer.exception()
# Cleanup proxy reader
if from_proxy and not from_proxy.done():
from_proxy.cancel()
snitun-0.42.0/snitun/server/peer.py 0000664 0000000 0000000 00000006652 14773172534 0017276 0 ustar 00root root 0000000 0000000 """Represent a single Peer."""
from __future__ import annotations
import asyncio
from datetime import UTC, datetime
import hashlib
import logging
import os
from ..exceptions import MultiplexerTransportDecrypt, SniTunChallengeError
from ..multiplexer.core import Multiplexer
from ..multiplexer.crypto import CryptoTransport
from ..utils.asyncio import asyncio_timeout
_LOGGER = logging.getLogger(__name__)
class Peer:
"""Representation of a Peer."""
def __init__(
self,
hostname: str,
valid: datetime,
aes_key: bytes,
aes_iv: bytes,
protocol_version: int,
throttling: int | None = None,
alias: list[str] | None = None,
) -> None:
"""Initialize a Peer."""
self._hostname = hostname
self._valid = valid
self._throttling = throttling
self._alias = alias or []
self._multiplexer: Multiplexer | None = None
self._crypto = CryptoTransport(aes_key, aes_iv)
self._protocol_version = protocol_version
@property
def hostname(self) -> str:
"""Return his hostname."""
return self._hostname
@property
def alias(self) -> list[str]:
"""Return the alias."""
return self._alias
@property
def all_hostnames(self) -> list[str]:
"""Return a list of the base hostname and any alias."""
return [self._hostname, *self._alias]
@property
def is_connected(self) -> bool:
"""Return True if we are connected to peer."""
if not self._multiplexer:
return False
return self._multiplexer.is_connected
@property
def is_valid(self) -> bool:
"""Return True if the peer is valid."""
return self._valid > datetime.now(tz=UTC)
@property
def multiplexer(self) -> Multiplexer | None:
"""Return Multiplexer object."""
return self._multiplexer
@property
def is_ready(self) -> bool:
"""Return true if the Peer is ready to process data."""
if self.multiplexer is None:
return False
return self.multiplexer.is_connected
async def init_multiplexer_challenge(
self,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
) -> None:
"""Initialize multiplexer."""
try:
token = hashlib.sha256(os.urandom(40)).digest()
writer.write(self._crypto.encrypt(token))
async with asyncio_timeout.timeout(60):
await writer.drain()
data = await reader.readexactly(32)
# Check Token
data = self._crypto.decrypt(data)
assert hashlib.sha256(token).digest() == data
except (
TimeoutError,
asyncio.IncompleteReadError,
MultiplexerTransportDecrypt,
AssertionError,
OSError,
) as err:
raise SniTunChallengeError("Wrong challenge from peer") from err
# Start Multiplexer
self._multiplexer = Multiplexer(
self._crypto,
reader,
writer,
self._protocol_version,
throttling=self._throttling,
)
def wait_disconnect(self) -> asyncio.Future[None]:
"""Wait until peer is disconnected.
Return a coroutine.
"""
if not self._multiplexer:
raise RuntimeError("No Transport initialize for peer")
return self._multiplexer.wait()
snitun-0.42.0/snitun/server/peer_manager.py 0000664 0000000 0000000 00000010702 14773172534 0020757 0 ustar 00root root 0000000 0000000 """Manage peer connections."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from datetime import UTC, datetime
from enum import Enum
import json
import logging
from cryptography.fernet import Fernet, InvalidToken, MultiFernet
from ..exceptions import SniTunInvalidPeer
from ..utils.asyncio import asyncio_timeout
from ..utils.server import TokenData
from .peer import Peer
_LOGGER = logging.getLogger(__name__)
class PeerManagerEvent(str, Enum):
"""Peer Manager event flags."""
CONNECTED = "connected"
DISCONNECTED = "disconnected"
class PeerManager:
"""Manage Peer connections."""
def __init__(
self,
fernet_tokens: list[str],
throttling: int | None = None,
event_callback: Callable[[Peer, PeerManagerEvent], None] | None = None,
) -> None:
"""Initialize Peer Manager."""
self._fernet = MultiFernet([Fernet(key) for key in fernet_tokens])
self._loop = asyncio.get_event_loop()
self._throttling = throttling
self._event_callback = event_callback
self._peers: dict[str, Peer] = {}
@property
def connections(self) -> int:
"""Return count of connected devices."""
return len(self._peers)
def create_peer(self, fernet_data: bytes) -> Peer:
"""Create a new peer from crypt config."""
try:
data = self._fernet.decrypt(fernet_data).decode("utf-8")
config: TokenData = json.loads(data)
except (InvalidToken, json.JSONDecodeError, UnicodeDecodeError) as err:
raise SniTunInvalidPeer("Invalid fernet token") from err
# Check if token is valid
valid = datetime.fromtimestamp(config["valid"], tz=UTC)
if valid < datetime.now(tz=UTC):
raise SniTunInvalidPeer("Token was expired")
# Extract configuration
hostname = config["hostname"]
aes_key = bytes.fromhex(config["aes_key"])
aes_iv = bytes.fromhex(config["aes_iv"])
return Peer(
hostname,
valid,
aes_key,
aes_iv,
protocol_version=config.get("protocol_version", 0),
throttling=self._throttling,
alias=config.get("alias", []),
)
def add_peer(self, peer: Peer) -> None:
"""Register peer to internal hostname list."""
if self.peer_available(peer.hostname) and (
multiplexer := self._peers[peer.hostname].multiplexer
):
_LOGGER.warning("Found stale peer connection")
multiplexer.shutdown()
_LOGGER.debug("New peer connection: %s", peer.hostname)
self._peers[peer.hostname] = peer
for alias in peer.alias:
_LOGGER.debug("New peer connection alias: %s for %s", alias, peer.hostname)
self._peers[alias] = peer
if self._event_callback:
self._loop.call_soon(self._event_callback, peer, PeerManagerEvent.CONNECTED)
def remove_peer(self, peer: Peer) -> None:
"""Remove peer from list."""
if self._peers.get(peer.hostname) != peer:
return
_LOGGER.debug("Close peer connection: %s", peer.hostname)
for hostname in peer.all_hostnames:
self._peers.pop(hostname, None)
if self._event_callback:
self._loop.call_soon(
self._event_callback,
peer,
PeerManagerEvent.DISCONNECTED,
)
def peer_available(self, hostname: str) -> bool:
"""Check if peer available and return True or False."""
if hostname in self._peers:
return self._peers[hostname].is_ready
return False
def get_peer(self, hostname: str) -> Peer | None:
"""Get peer."""
return self._peers.get(hostname)
async def close_connections(self, timeout: int = 10) -> None: # noqa: ASYNC109
"""Close all peer connections.
Use this function only if you do not controll the server socket.
"""
peers = list(self._peers.values())
for peer in peers:
if peer.is_connected and peer.multiplexer:
peer.multiplexer.shutdown()
if waiters := [peer.wait_disconnect() for peer in peers]:
try:
async with asyncio_timeout.timeout(timeout):
await asyncio.gather(*waiters, return_exceptions=True)
except TimeoutError:
_LOGGER.error("Timeout while waiting for peer disconnect")
snitun-0.42.0/snitun/server/run.py 0000664 0000000 0000000 00000027045 14773172534 0017146 0 ustar 00root root 0000000 0000000 """SniTun reference implementation."""
from __future__ import annotations
import asyncio
from collections.abc import Coroutine, Iterator
from contextlib import suppress
from dataclasses import dataclass
from itertools import cycle
import logging
from multiprocessing import cpu_count
import os
import select
import signal
import socket
from threading import Thread
from typing import Any
from ..exceptions import ParseSNIIncompleteError
from ..utils.asyncio import asyncio_timeout
from ..utils.server import MAX_BUFFER_SIZE, MAX_READ_SIZE
from .listener_peer import PeerListener
from .listener_sni import SNIProxy
from .peer_manager import PeerManager
from .sni import ParseSNIError, parse_tls_sni
from .worker import ServerWorker
_LOGGER = logging.getLogger(__name__)
WORKER_STALE_MAX = 30
class SniTunServer:
"""SniTunServer helper class for Dual port Asyncio."""
def __init__(
self,
fernet_keys: list[str],
sni_port: int | None = None,
sni_host: str | None = None,
peer_port: int | None = None,
peer_host: str | None = None,
throttling: int | None = None,
) -> None:
"""Initialize SniTun Server."""
self._peers: PeerManager = PeerManager(fernet_keys, throttling=throttling)
self._list_sni: SNIProxy = SNIProxy(self._peers, host=sni_host, port=sni_port)
self._list_peer: PeerListener = PeerListener(
self._peers,
host=peer_host,
port=peer_port,
)
@property
def peers(self) -> PeerManager:
"""Return peer manager."""
return self._peers
def start(
self,
) -> Coroutine[Any, Any, tuple[set[asyncio.Task[None]], set[asyncio.Task[None]]]]:
"""Run server.
Return coroutine.
"""
return asyncio.wait(
[
asyncio.create_task(self._list_peer.start()),
asyncio.create_task(self._list_sni.start()),
],
)
def stop(
self,
) -> Coroutine[Any, Any, tuple[set[asyncio.Task[None]], set[asyncio.Task[None]]]]:
"""Stop server.
Return coroutine.
"""
return asyncio.wait(
[
asyncio.create_task(self._list_peer.stop()),
asyncio.create_task(self._list_sni.stop()),
],
)
class SniTunServerSingle:
"""SniTunServer helper class for Single port Asnycio."""
def __init__(
self,
fernet_keys: list[str],
host: str | None = None,
port: int | None = None,
throttling: int | None = None,
) -> None:
"""Initialize SniTun Server."""
self._loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()
self._server: asyncio.AbstractServer | None = None
self._peers: PeerManager = PeerManager(fernet_keys, throttling=throttling)
self._list_sni: SNIProxy = SNIProxy(self._peers)
self._list_peer: PeerListener = PeerListener(self._peers)
self._host: str = host or "0.0.0.0"
self._port: int = port or 443
@property
def peers(self) -> PeerManager:
"""Return peer manager."""
return self._peers
async def start(self) -> None:
"""Run server."""
self._server = await asyncio.start_server(
self._handler,
host=self._host,
port=self._port,
)
async def stop(self) -> None:
"""Stop server."""
assert self._server is not None, "Server not started"
self._server.close()
await self._server.wait_closed()
async def _handler(
self,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
) -> None:
"""Handle incoming connection."""
try:
async with asyncio_timeout.timeout(10):
data = await reader.read(2048)
except TimeoutError:
_LOGGER.warning("Abort connection initializing")
writer.close()
return
except OSError:
return
# Connection closed / healty check
if not data:
writer.close()
return
# Select the correct handler for process data
if data[0] == 0x16:
self._loop.create_task(
self._list_sni.handle_connection(reader, writer, data=data),
)
elif data.startswith(b"gA"):
self._loop.create_task(
self._list_peer.handle_connection(reader, writer, data=data),
)
else:
_LOGGER.warning("No valid ClientHello found: %s", data)
writer.close()
return
@dataclass(slots=True)
class Connection:
"""Connection data class."""
sock: socket.socket
epoll: select.epoll
buffer: bytes = b""
stale: int = 0
close: bool = False
@property
def fileno(self) -> int:
"""Return filehanle ID."""
return self.sock.fileno()
def soft_close(self) -> None:
"""Socket got handled over."""
self.close = True
self.epoll.unregister(self.fileno)
def close_socket(self, shutdown: bool = True) -> None:
"""Gracefull shutdown a socket or free the handle."""
self.soft_close()
with suppress(OSError):
if shutdown:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
class SniTunServerWorker(Thread):
"""SniTunServer helper class for Worker."""
def __init__(
self,
fernet_keys: list[str],
host: str | None = None,
port: int | None = None,
worker_size: int | None = None,
throttling: int | None = None,
) -> None:
"""Initialize SniTun Server."""
super().__init__()
self._host: str = host or "0.0.0.0"
self._port: int = port or 443
self._fernet_keys: list[str] = fernet_keys
self._throttling: int | None = throttling
self._worker_size: int = worker_size or (cpu_count() * 2)
self._workers: list[ServerWorker] = []
self._running: bool = False
# TCP server
self._server: socket.socket | None = None
self._poller: select.epoll | None = None
@property
def peer_counter(self) -> int:
"""Return number of active peer connections."""
return sum(worker.peer_size for worker in self._workers)
def start(self) -> None:
"""Run server."""
# Init first all worker, we don't want the epoll on the childs
_LOGGER.info("Run SniTun with %d worker", self._worker_size)
for _ in range(self._worker_size):
worker = ServerWorker(self._fernet_keys, throttling=self._throttling)
worker.start()
self._workers.append(worker)
self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._server.bind((self._host, self._port))
self._server.setblocking(False)
self._server.listen(80 * 1000)
self._running = True
self._poller = select.epoll()
self._poller.register(self._server.fileno(), select.EPOLLIN)
super().start()
def stop(self) -> None:
"""Stop server."""
self._running = False
self.join()
# Shutdown all workers
for worker in self._workers:
worker.shutdown()
worker.close()
self._workers.clear()
assert self._server is not None, "Server not started"
self._server.close()
assert self._poller is not None, "Poller not started"
self._poller.close()
def run(self) -> None:
"""Handle incoming connection."""
assert self._server is not None, "Server not started"
fd_server = self._server.fileno()
connections: dict[int, Connection] = {}
worker_lb = cycle(self._workers)
_LOGGER.info("Server started, fd: %s", fd_server)
assert self._poller is not None, "Poller not started"
while self._running:
events = self._poller.poll(1)
for fileno, event in events:
# New Connection
if fileno == fd_server:
con, _ = self._server.accept()
con.setblocking(False)
self._poller.register(
con.fileno(),
select.EPOLLIN | select.EPOLLHUP | select.EPOLLERR,
)
connections[con.fileno()] = Connection(con, self._poller)
# Read hello & forward to worker
elif event & select.EPOLLIN:
client = connections[fileno]
client.stale = 0 # reset stale count
# Process connection
self._process(client, worker_lb)
# Partial read
if not client.close:
continue
connections.pop(fileno)
# Close
else:
client = connections.pop(fileno)
client.close_socket(shutdown=False)
# cleanup stale connection
for client_id in tuple(connections.keys()):
client = connections[client_id]
if client.stale >= WORKER_STALE_MAX:
connections.pop(client.fileno)
client.close_socket()
else:
client.stale += 1
# Check if worker are running
for worker in self._workers:
if worker.is_alive():
continue
_LOGGER.critical("Worker '%s' crashed!", worker.name)
os.kill(os.getpid(), signal.SIGINT)
def _process(
self,
client: Connection,
workers_lb: Iterator[ServerWorker],
) -> None:
"""Process connection & helo."""
try:
data = client.sock.recv(MAX_READ_SIZE)
except OSError as err:
_LOGGER.warning("Receive fails: %s", err)
client.close_socket(shutdown=False)
return
# No data received
if not data:
client.close_socket()
return
client.buffer += data
# Peer connection
if client.buffer.startswith(b"gA"):
client.soft_close()
next(workers_lb).handover_connection(client.sock, client.buffer)
_LOGGER.debug("Handover new peer connection: %s", client.buffer)
return
# TLS/SSL connection
if client.buffer[0] != 0x16:
_LOGGER.warning("No valid ClientHello found: %s", client.buffer)
client.close_socket()
return
# Get Hostname
try:
hostname = parse_tls_sni(client.buffer)
except ParseSNIIncompleteError:
# Check Buffer Size
if len(client.buffer) >= MAX_BUFFER_SIZE:
_LOGGER.warning("Connection %d exceed buffer size", client.fileno)
client.close_socket()
return
except ParseSNIError:
_LOGGER.warning("Receive invalid ClientHello on public Interface")
client.close_socket()
return
# Distribute to child worker
for worker in self._workers:
if not worker.is_responsible_peer(hostname):
continue
client.soft_close()
worker.handover_connection(client.sock, client.buffer, sni=hostname)
_LOGGER.debug("Handover %s to %s", hostname, worker.name)
return
_LOGGER.debug("No responsible worker for %s", hostname)
client.close_socket()
return
snitun-0.42.0/snitun/server/sni.py 0000664 0000000 0000000 00000010457 14773172534 0017132 0 ustar 00root root 0000000 0000000 """TLS ClientHello parser."""
from __future__ import annotations
import asyncio
import logging
from ..exceptions import ParseSNIError, ParseSNIIncompleteError
from ..utils.server import MAX_BUFFER_SIZE, MAX_READ_SIZE
_LOGGER = logging.getLogger(__name__)
TLS_HEADER_LEN = 5
TLS_HANDSHAKE_CONTENT_TYPE = 0x16
TLS_HANDSHAKE_TYPE_CLIENT_HELLO = 0x01
async def payload_reader(reader: asyncio.StreamReader) -> bytes | None:
"""Read data from reader."""
try:
header = await reader.read(6)
except ConnectionResetError:
raise ParseSNIError from None
if not header:
raise ParseSNIError
if len(header) < 5:
raise ParseSNIError
if (
header[0] != TLS_HANDSHAKE_CONTENT_TYPE
or header[5] != TLS_HANDSHAKE_TYPE_CLIENT_HELLO
):
return None
tls_size = (header[3] << 8) + header[4] + TLS_HEADER_LEN
data = header
while (data_size := len(data)) < tls_size and data_size <= MAX_BUFFER_SIZE:
try:
data += await reader.read(MAX_READ_SIZE)
except ConnectionResetError:
raise ParseSNIError from None
return data
def parse_tls_sni(data: bytes) -> str:
"""Parse TLS SNI extention."""
if (data_size := len(data)) < TLS_HEADER_LEN:
_LOGGER.debug("Invalid TLS header")
raise ParseSNIError
# If TLS handshake
if data[0] != TLS_HANDSHAKE_CONTENT_TYPE:
_LOGGER.debug("Not TLS handshake received")
raise ParseSNIError
# Check compatible ClientHello
if int(data[1]) < 3:
_LOGGER.debug("Received ClientHello without SNI support")
raise ParseSNIError
# Calculate TLS record size
tls_size = (data[3] << 8) + data[4] + TLS_HEADER_LEN
if data_size < tls_size:
_LOGGER.debug("Can't calculate the TLS record size")
raise ParseSNIIncompleteError
# Check if handshake is a ClientHello
pos = TLS_HEADER_LEN
if data[pos] != TLS_HANDSHAKE_TYPE_CLIENT_HELLO:
_LOGGER.debug("Invalid ClientHello type")
raise ParseSNIError
# Seek fixed length header part
pos += 38
# Seek SessionID
try:
pos += 1 + data[pos]
except IndexError:
_LOGGER.debug("Invalid SessionID")
raise ParseSNIError from None
# Seek Cipher Suites
try:
pos += 2 + (data[pos] << 8) + data[pos + 1]
except IndexError:
_LOGGER.debug("Invalid CipherSuites")
raise ParseSNIError from None
# Seek Compression Methods
try:
pos += 1 + data[pos]
except IndexError:
_LOGGER.debug("Invalid CompressionMethods")
raise ParseSNIError from None
# Check data buffer + extension size
if pos + 2 > data_size:
_LOGGER.debug("Mismatch Extension TLS header")
raise ParseSNIError
# Process extension
return _parse_extension(data, pos, data_size)
def _parse_extension(data: bytes, pos: int, data_size: int) -> str:
"""Parse TLS ClientHello Extension."""
# Seek Extension start
try:
tls_extension_size = (data[pos] << 8) + data[pos + 1]
pos += 2
except IndexError:
raise ParseSNIError from None
# Check data buffer + extension size
if pos + tls_extension_size > data_size:
_LOGGER.debug("Mismatch Extension TLS header")
raise ParseSNIError
# Loop over extension until we have our SNI
while pos + 4 <= data_size:
# SNI?
if data[pos] == 0x00 and data[pos + 1] == 0x00:
return _parse_host_name(data, pos + 4, data_size)
pos += 4 + (data[pos + 2] << 8) + data[pos + 3]
_LOGGER.debug("Can't find any ServerName Extension")
raise ParseSNIError
def _parse_host_name(data: bytes, pos: int, data_size: int) -> str:
"""Parse TLS ServerName Extension."""
# Seek list size
pos += 2
while pos + 3 < data_size:
size = (data[pos + 1] << 8) + data[pos + 2]
# Unknown server name type
if data[pos] != 0x00:
_LOGGER.debug("Unknown ServerName type")
pos += 3 + size
continue
try:
return bytes(data[pos + 3 : pos + 3 + size]).decode("utf-8")
except (IndexError, UnicodeDecodeError):
_LOGGER.debug("Wrong host length/format")
raise ParseSNIError from None
_LOGGER.debug("Not found any valid ServerName")
raise ParseSNIError
snitun-0.42.0/snitun/server/worker.py 0000664 0000000 0000000 00000011523 14773172534 0017645 0 ustar 00root root 0000000 0000000 """SniTun worker for traffics."""
from __future__ import annotations
import asyncio
import logging
from multiprocessing import Manager, Process
from socket import socket
from threading import Thread
from typing import TYPE_CHECKING
from .listener_peer import PeerListener
from .listener_sni import SNIProxy
from .peer import Peer
from .peer_manager import PeerManager, PeerManagerEvent
_LOGGER = logging.getLogger(__name__)
if TYPE_CHECKING:
from multiprocessing.managers import SyncManager
class ServerWorker(Process):
"""Worker for multiplexer."""
def __init__(
self,
fernet_keys: list[str],
throttling: int | None = None,
) -> None:
"""Initialize worker & communication."""
super().__init__()
self._fernet_keys: list[str] = fernet_keys
self._throttling: int | None = throttling
# Used on the child
self._peers: PeerManager | None = None
self._list_sni: SNIProxy | None = None
self._list_peer: PeerListener | None = None
self._loop: asyncio.AbstractEventLoop | None = None
# Communication between Parent/Child
self._manager: SyncManager = Manager()
self._new = self._manager.Queue()
self._sync = self._manager.dict()
self._peer_count = self._manager.Value("peer_count", 0)
@property
def peer_size(self) -> int:
"""Return amount of managed peers."""
return self._peer_count.value
def is_responsible_peer(self, sni: str) -> bool:
"""Return True if worker is responsible for this peer domain."""
return sni in self._sync
async def _async_init(self) -> None:
"""Initialize child process data."""
self._peers = PeerManager(
self._fernet_keys,
throttling=self._throttling,
event_callback=self._event_stream,
)
self._list_sni = SNIProxy(self._peers)
self._list_peer = PeerListener(self._peers)
def _event_stream(self, peer: Peer, event: PeerManagerEvent) -> None:
"""Event stream peer connection data."""
if event == PeerManagerEvent.CONNECTED:
if peer.hostname not in self._sync:
self._peer_count.set(self._peer_count.value + 1)
for hostname in peer.all_hostnames:
self._sync[hostname] = None
else:
if peer.hostname in self._sync:
self._peer_count.set(self._peer_count.value - 1)
for hostname in peer.all_hostnames:
self._sync.pop(hostname, None)
def shutdown(self) -> None:
"""Shutdown child process."""
self._new.put(None)
self.join(10)
def handover_connection(
self,
con: socket,
data: bytes,
sni: str | None = None,
) -> None:
"""Move new connection to worker."""
self._new.put_nowait((con, data, sni))
def run(self) -> None:
"""Run the worker process."""
_LOGGER.info("Start worker: %s", self.name)
# Init new event loop
self._loop = asyncio.new_event_loop()
asyncio.set_event_loop(self._loop)
# Start eventloop
running_loop = Thread(target=self._loop.run_forever)
running_loop.start()
# Init backend
asyncio.run_coroutine_threadsafe(self._async_init(), loop=self._loop).result()
while True:
new = self._new.get()
if new is None:
break
new[0].setblocking(False)
asyncio.run_coroutine_threadsafe(
self._async_new_connection(*new),
loop=self._loop,
)
# Shutdown worker
_LOGGER.info("Stoping worker: %s", self.name)
assert self._peers is not None, "PeerManager not initialized"
asyncio.run_coroutine_threadsafe(
self._peers.close_connections(),
loop=self._loop,
).result()
self._loop.call_soon_threadsafe(self._loop.stop)
running_loop.join(10)
async def _async_new_connection(
self,
con: socket,
data: bytes,
sni: str | None,
) -> None:
"""Handle incoming connection."""
try:
reader, writer = await asyncio.open_connection(sock=con)
except OSError:
con.close()
return
# Select the correct handler for process connection
assert self._loop is not None, "Event loop not initialized"
if sni:
assert self._list_sni is not None, "SNIProxy not initialized"
self._loop.create_task(
self._list_sni.handle_connection(reader, writer, data=data, sni=sni),
)
else:
assert self._list_peer is not None, "PeerListener not initialized"
self._loop.create_task(
self._list_peer.handle_connection(reader, writer, data=data),
)
snitun-0.42.0/snitun/utils/ 0000775 0000000 0000000 00000000000 14773172534 0015612 5 ustar 00root root 0000000 0000000 snitun-0.42.0/snitun/utils/__init__.py 0000664 0000000 0000000 00000000163 14773172534 0017723 0 ustar 00root root 0000000 0000000 """Utils & function for implementations."""
from .server import PROTOCOL_VERSION
__all__ = ("PROTOCOL_VERSION",)
snitun-0.42.0/snitun/utils/aes.py 0000664 0000000 0000000 00000000324 14773172534 0016733 0 ustar 00root root 0000000 0000000 """AES helper functions."""
from __future__ import annotations
import os
def generate_aes_keyset() -> tuple[bytes, bytes]:
"""Generate AES key + IV for CBC."""
return (os.urandom(32), os.urandom(16))
snitun-0.42.0/snitun/utils/aiohttp_client.py 0000664 0000000 0000000 00000010003 14773172534 0021164 0 ustar 00root root 0000000 0000000 """Helper for handle aiohttp internal server."""
from __future__ import annotations
import asyncio
from collections.abc import Callable, Coroutine
from contextlib import suppress
import logging
import socket
import ssl
from typing import Any
from aiohttp.web import AppRunner, SockSite
from ..client.client_peer import ClientPeer
from ..client.connector import Connector
from .asyncio import asyncio_timeout
_LOGGER = logging.getLogger(__name__)
class SniTunClientAioHttp:
"""Help to handle a internal aiohttp app runner."""
def __init__(
self,
runner: AppRunner,
context: ssl.SSLContext,
snitun_server: str,
snitun_port: int | None = None,
) -> None:
"""Initialize SniTunClient with aiohttp."""
self._connector: Connector | None = None
self._client = ClientPeer(snitun_server, snitun_port)
self._socket = socket.socket()
self._server_name = f"{snitun_server}:{snitun_port}"
# Init interface
self._socket.setblocking(False)
self._socket.bind(("127.0.0.1", 0))
self._site = SockSite(runner, self._socket, ssl_context=context)
@property
def is_connected(self) -> bool:
"""Return True if we are connected to snitun."""
return self._client.is_connected
@property
def whitelist(self) -> set:
"""Return whitelist from connector."""
if self._connector:
return self._connector.whitelist
return set()
def wait(self) -> asyncio.Future[None]:
"""Block until connection to snitun is closed."""
return self._client.wait()
async def start(
self,
whitelist: bool = False,
endpoint_connection_error_callback: Callable[[], Coroutine[Any, Any, None]]
| None = None,
) -> None:
"""Start internal server."""
await self._site.start()
host, port = self._socket.getsockname()[:2]
self._connector = Connector(
host,
port,
whitelist,
endpoint_connection_error_callback=endpoint_connection_error_callback,
)
_LOGGER.info("AioHTTP snitun client started on %s:%s", host, port)
async def stop(self, *, wait: bool = False) -> None:
"""
Stop internal server.
Args:
wait: wait for the socket to close.
"""
await self.disconnect()
with suppress(OSError):
self._socket.close()
with suppress(RuntimeError):
self._site._runner._unreg_site(self._site) # noqa: SLF001
if wait:
# Wait for the socket to close
await _async_waitfor_socket_closed(self._socket)
_LOGGER.info("AioHTTP snitun client closed")
async def connect(
self,
fernet_key: bytes,
aes_key: bytes,
aes_iv: bytes,
throttling: int | None = None,
) -> None:
"""Connect to SniTun server."""
if self._client.is_connected:
return
assert self._connector is not None, "Connector is not initialized"
await self._client.start(
self._connector,
fernet_key,
aes_key,
aes_iv,
throttling=throttling,
)
_LOGGER.info("AioHTTP snitun client connected to: %s", self._server_name)
async def disconnect(self) -> None:
"""Disconnect from SniTun server."""
if not self._client.is_connected:
return
await self._client.stop()
_LOGGER.info("AioHTTP snitun client disconnected from: %s", self._server_name)
async def _async_waitfor_socket_closed(sock: socket.socket | None = None) -> None:
"""Wait for the socket to be closed."""
if sock is None:
return
loop = asyncio.get_event_loop()
try:
async with asyncio_timeout.timeout(60):
while (await loop.run_in_executor(None, sock.fileno)) != -1:
await asyncio.sleep(1)
except TimeoutError:
_LOGGER.warning("Timeout while waiting for the socket to close.")
snitun-0.42.0/snitun/utils/asyncio.py 0000664 0000000 0000000 00000002167 14773172534 0017637 0 ustar 00root root 0000000 0000000 """Utils for asyncio."""
import asyncio
from collections.abc import Coroutine
from typing import Any, TypeVar
_T = TypeVar("_T")
asyncio_timeout = asyncio
def create_eager_task(
coro: Coroutine[Any, Any, _T],
*,
name: str | None = None,
loop: asyncio.AbstractEventLoop | None = None,
) -> asyncio.Task[_T]:
"""Create a task from a coroutine and schedule it to run immediately."""
return asyncio.Task(
coro,
loop=loop or asyncio.get_running_loop(),
name=name,
eager_start=True, # type: ignore[call-arg]
)
def make_task_waiter_future(task: asyncio.Task) -> asyncio.Future[None]:
"""Create a future that waits for a task to complete.
A future is used to ensure that cancellation of the
task does not propagate to the waiter.
"""
loop = asyncio.get_running_loop()
fut: asyncio.Future[None] = loop.create_future()
def _resolve_future(_: asyncio.Task) -> None:
if not fut.done():
fut.set_result(None)
if task.done():
_resolve_future(task)
return fut
task.add_done_callback(_resolve_future)
return fut
snitun-0.42.0/snitun/utils/ipaddress.py 0000664 0000000 0000000 00000001313 14773172534 0020140 0 ustar 00root root 0000000 0000000 """Utils for handling IP address."""
from functools import lru_cache
import ipaddress
import socket
EMPTY_IP_ADDRESS = ipaddress.IPv4Address(0)
EMPTY_IP_ADDRESS_BYTES = bytes(4)
@lru_cache
def bytes_to_ip_address(data: bytes) -> ipaddress.IPv4Address:
"""Convert bytes into a IP address."""
try:
return ipaddress.IPv4Address(socket.inet_ntop(socket.AF_INET, data))
except (ValueError, OSError):
return EMPTY_IP_ADDRESS
@lru_cache
def ip_address_to_bytes(ip_address: ipaddress.IPv4Address) -> bytes:
"""Convert a IP address object into bytes."""
try:
return socket.inet_pton(socket.AF_INET, str(ip_address))
except OSError:
return EMPTY_IP_ADDRESS_BYTES
snitun-0.42.0/snitun/utils/server.py 0000664 0000000 0000000 00000002112 14773172534 0017466 0 ustar 00root root 0000000 0000000 """Utils for server handling."""
from __future__ import annotations
from datetime import UTC, datetime, timedelta
import json
from typing import NotRequired, TypedDict
from cryptography.fernet import Fernet, MultiFernet
MAX_READ_SIZE = 4_096
MAX_BUFFER_SIZE = 1_024_000
PROTOCOL_VERSION = 1
class TokenData(TypedDict):
"""Token data."""
valid: float
hostname: str
aes_key: str
aes_iv: str
protocol_version: NotRequired[int]
alias: list[str] | None
def generate_client_token(
tokens: list[str],
valid_delta: timedelta,
hostname: str,
aes_key: bytes,
aes_iv: bytes,
) -> bytes:
"""Generate a token for client."""
fernet = MultiFernet([Fernet(key) for key in tokens])
valid = datetime.now(tz=UTC) + valid_delta
return fernet.encrypt(
json.dumps(
{
"valid": valid.timestamp(),
"hostname": hostname,
"aes_key": aes_key.hex(),
"aes_iv": aes_iv.hex(),
"protocol_version": PROTOCOL_VERSION,
},
).encode(),
)
snitun-0.42.0/tests/ 0000775 0000000 0000000 00000000000 14773172534 0014274 5 ustar 00root root 0000000 0000000 snitun-0.42.0/tests/__init__.py 0000664 0000000 0000000 00000000024 14773172534 0016401 0 ustar 00root root 0000000 0000000 """SniTun tests."""
snitun-0.42.0/tests/benchmarks/ 0000775 0000000 0000000 00000000000 14773172534 0016411 5 ustar 00root root 0000000 0000000 snitun-0.42.0/tests/benchmarks/__init__.py 0000664 0000000 0000000 00000000000 14773172534 0020510 0 ustar 00root root 0000000 0000000 snitun-0.42.0/tests/benchmarks/test_multiplexer.py 0000664 0000000 0000000 00000003312 14773172534 0022373 0 ustar 00root root 0000000 0000000 import asyncio
import ipaddress
import pytest
from pytest_codspeed import BenchmarkFixture
from snitun.multiplexer.channel import MultiplexerChannel
from snitun.multiplexer.core import Multiplexer
IP_ADDR = ipaddress.ip_address("8.8.8.8")
@pytest.mark.parametrize(
("size", "message_count"),
[(2048, 1000), (1024 * 1024, 100)],
ids=["1000@2KiB", "100@1MiB"],
)
def test_multiplex_channel_message(
benchmark: BenchmarkFixture,
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
size: int,
message_count: int,
) -> None:
"""Test writing messages to the channel and reading them back."""
assert not multiplexer_client._channels
assert not multiplexer_server._channels
loop = asyncio.get_event_loop()
async def setup_channel() -> tuple[MultiplexerChannel, MultiplexerChannel]:
channel_client = await multiplexer_client.create_channel(
IP_ADDR,
lambda _: None,
)
await asyncio.sleep(0.1)
channel_server = multiplexer_server._channels.get(channel_client.id)
assert channel_client
assert channel_server
return channel_client, channel_server
payload = b"x" * size
async def _async_read_write_messages(
channel_client: MultiplexerChannel,
channel_server: MultiplexerChannel,
) -> None:
for _ in range(message_count):
await channel_client.write(payload)
await channel_server.read()
channel_client, channel_server = loop.run_until_complete(setup_channel())
@benchmark
def read_write_channel() -> None:
loop.run_until_complete(
_async_read_write_messages(channel_client, channel_server),
)
snitun-0.42.0/tests/benchmarks/test_server_sni.py 0000664 0000000 0000000 00000003102 14773172534 0022175 0 ustar 00root root 0000000 0000000 import asyncio
import pytest
from pytest_codspeed import BenchmarkFixture
from snitun.multiplexer.channel import MultiplexerChannel
from snitun.multiplexer.core import Multiplexer
from ..conftest import Client
from ..server.const_tls import TLS_1_2
@pytest.mark.parametrize(
("message_size", "count"),
[(8192, 1000), (1024 * 1024, 15)],
ids=["1000@8KiB", "15@1MiB"],
)
def test_server_send_message(
benchmark: BenchmarkFixture,
multiplexer_client: Multiplexer,
test_client_ssl: Client,
message_size: int,
count: int,
) -> None:
"""Test the TLS client writing messages to the channel and reading them back."""
loop = asyncio.get_event_loop()
async def setup() -> MultiplexerChannel:
test_client_ssl.writer.write(TLS_1_2)
await test_client_ssl.writer.drain()
await asyncio.sleep(0.1)
assert multiplexer_client._channels
channel = next(iter(multiplexer_client._channels.values()))
client_hello = await channel.read()
assert client_hello == TLS_1_2
return channel
channel = loop.run_until_complete(setup())
message = b"x" * message_size
async def round_trip_messages():
for _ in range(count):
test_client_ssl.writer.write(message)
received = 0
while received != message_size:
received += len(await channel.read())
@benchmark
def read_write_channel() -> None:
loop.run_until_complete(round_trip_messages())
async def teardown():
channel.close()
loop.run_until_complete(teardown())
snitun-0.42.0/tests/client/ 0000775 0000000 0000000 00000000000 14773172534 0015552 5 ustar 00root root 0000000 0000000 snitun-0.42.0/tests/client/__init__.py 0000664 0000000 0000000 00000000037 14773172534 0017663 0 ustar 00root root 0000000 0000000 """Tests for SniTun client."""
snitun-0.42.0/tests/client/test_client_peer.py 0000664 0000000 0000000 00000031133 14773172534 0021455 0 ustar 00root root 0000000 0000000 """Test Client Peer connections."""
import asyncio
from datetime import UTC, datetime, timedelta
import ipaddress
import os
import pytest
from snitun.client.client_peer import ClientPeer
from snitun.client.connector import Connector
from snitun.exceptions import SniTunConnectionError
from snitun.server.listener_peer import PeerListener
from snitun.server.peer_manager import PeerManager
from ..conftest import Client
from ..server.const_fernet import create_peer_config
IP_ADDR = ipaddress.ip_address("8.8.8.8")
async def test_init_client_peer(
peer_listener: PeerListener,
peer_manager: PeerManager,
test_endpoint: list[Client],
) -> None:
"""Test setup of ClientPeer."""
client = ClientPeer("127.0.0.1", "8893")
connector = Connector("127.0.0.1", "8822")
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
await client.start(connector, fernet_token, aes_key, aes_iv)
await asyncio.sleep(0.1)
assert peer_manager.peer_available("localhost")
assert client.is_connected
assert client._multiplexer._throttling is None
await client.stop()
await asyncio.sleep(0.1)
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
async def test_init_client_peer_with_alias(
peer_listener: PeerListener,
peer_manager: PeerManager,
test_endpoint: list[Client],
) -> None:
"""Test setup of ClientPeer with custom tomain."""
client = ClientPeer("127.0.0.1", "8893")
connector = Connector("127.0.0.1", "8822")
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
assert not peer_manager.peer_available("localhost.custom")
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(
valid.timestamp(),
hostname,
aes_key,
aes_iv,
alias=["localhost.custom"],
)
await client.start(connector, fernet_token, aes_key, aes_iv)
await asyncio.sleep(0.1)
assert peer_manager.peer_available("localhost")
assert peer_manager.peer_available("localhost.custom")
assert client.is_connected
assert client._multiplexer._throttling is None
await client.stop()
await asyncio.sleep(0.1)
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
assert not peer_manager.peer_available("localhost.custom")
async def test_init_client_peer_invalid_token(
peer_listener: PeerListener,
peer_manager: PeerManager,
test_endpoint: list[Client],
) -> None:
"""Test setup of ClientPeer."""
client = ClientPeer("127.0.0.1", "8893")
connector = Connector("127.0.0.1", "8822")
assert not peer_manager.peer_available("localhost")
valid = datetime.now(tz=UTC) + timedelta(days=-1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
with pytest.raises(SniTunConnectionError):
await client.start(connector, fernet_token, aes_key, aes_iv)
await asyncio.sleep(0.1)
assert not peer_manager.peer_available("localhost")
async def test_flow_client_peer(
peer_listener: PeerListener,
peer_manager: PeerManager,
test_endpoint: list[Client],
) -> None:
"""Test setup of ClientPeer, test flow."""
client = ClientPeer("127.0.0.1", "8893")
connector = Connector("127.0.0.1", "8822")
assert not peer_manager.peer_available("localhost")
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
await client.start(connector, fernet_token, aes_key, aes_iv)
await asyncio.sleep(0.1)
assert peer_manager.peer_available("localhost")
peer = peer_manager.get_peer("localhost")
channel = await peer.multiplexer.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
assert test_endpoint
test_connection = test_endpoint[0]
await channel.write(b"Hallo")
data = await test_connection.reader.read(1024)
assert data == b"Hallo"
assert channel.ip_address == IP_ADDR
test_connection.writer.write(b"Hiro")
await test_connection.writer.drain()
data = await channel.read()
assert data == b"Hiro"
await client.stop()
await asyncio.sleep(0.1)
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
test_connection.close.set()
async def test_close_client_peer(
peer_listener: PeerListener,
peer_manager: PeerManager,
test_endpoint: list[Client],
) -> None:
"""Test setup of ClientPeer, test flow - close it."""
client = ClientPeer("127.0.0.1", "8893")
connector = Connector("127.0.0.1", "8822")
assert not peer_manager.peer_available("localhost")
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
await client.start(connector, fernet_token, aes_key, aes_iv)
await asyncio.sleep(0.1)
assert peer_manager.peer_available("localhost")
peer = peer_manager.get_peer("localhost")
channel = await peer.multiplexer.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
assert test_endpoint
test_connection = test_endpoint[0]
await channel.write(b"Hallo")
data = await test_connection.reader.read(1024)
assert data == b"Hallo"
assert channel.ip_address == IP_ADDR
test_connection.writer.write(b"Hiro")
await test_connection.writer.drain()
data = await channel.read()
assert data == b"Hiro"
await client.stop()
await asyncio.sleep(0.1)
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
data = await test_connection.reader.read(1024)
assert not data
test_connection.close.set()
async def test_init_client_peer_wait(
peer_listener: PeerListener,
peer_manager: PeerManager,
test_endpoint: list[Client],
) -> None:
"""Test setup of ClientPeer."""
client = ClientPeer("127.0.0.1", "8893")
connector = Connector("127.0.0.1", "8822")
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
await client.start(connector, fernet_token, aes_key, aes_iv)
await asyncio.sleep(0.1)
assert peer_manager.peer_available("localhost")
assert client.is_connected
assert not client.wait().done()
await client.stop()
await asyncio.sleep(0.1)
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
with pytest.raises(RuntimeError):
assert client.wait().done()
async def test_init_client_peer_wait_waits_for_task(
peer_listener: PeerListener,
peer_manager: PeerManager,
test_endpoint: list[Client],
) -> None:
"""Test setup of ClientPeer."""
client = ClientPeer("127.0.0.1", "8893")
connector = Connector("127.0.0.1", "8822")
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
await client.start(connector, fernet_token, aes_key, aes_iv)
await asyncio.sleep(0.1)
assert peer_manager.peer_available("localhost")
assert client.is_connected
assert not client.wait().done()
# Shutdown the multiplexer from under the client
client._multiplexer.shutdown()
await client.wait()
# Make sure the task is actually done
assert client._handler_task.done()
await client._stop_handler()
# Make sure _stop_handler cleans up the task reference
assert client._handler_task is None
async def test_client_peer_can_start_again(
peer_listener: PeerListener,
peer_manager: PeerManager,
test_endpoint: list[Client],
) -> None:
"""Test once the connection fails, we can start again."""
client = ClientPeer("127.0.0.1", "8893")
connector = Connector("127.0.0.1", "8822")
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
await client.start(connector, fernet_token, aes_key, aes_iv)
await asyncio.sleep(0.1)
assert peer_manager.peer_available("localhost")
assert client.is_connected
assert not client.wait().done()
# Shutdown the multiplexer from under the client
client._multiplexer.shutdown()
await client.wait()
assert not client.is_connected
# Now make sure we can start again
await client.start(connector, fernet_token, aes_key, aes_iv)
await asyncio.sleep(0.1)
assert peer_manager.peer_available("localhost")
await client.stop()
assert not client.is_connected
async def test_init_client_peer_throttling(
peer_listener: PeerListener,
peer_manager: PeerManager,
test_endpoint: list[Client],
) -> None:
"""Test setup of ClientPeer."""
client = ClientPeer("127.0.0.1", "8893")
connector = Connector("127.0.0.1", "8822")
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
await client.start(connector, fernet_token, aes_key, aes_iv, throttling=500)
await asyncio.sleep(0.1)
assert peer_manager.peer_available("localhost")
assert client.is_connected
assert client._multiplexer._throttling == 0.002
await client.stop()
await asyncio.sleep(0.1)
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
async def test_init_client_peer_stop_does_not_swallow_cancellation(
peer_listener: PeerListener,
peer_manager: PeerManager,
test_endpoint: list[Client],
) -> None:
"""Test stopping the peer does not swallow cancellation."""
client = ClientPeer("127.0.0.1", "8893")
connector = Connector("127.0.0.1", "8822")
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
await client.start(connector, fernet_token, aes_key, aes_iv)
await asyncio.sleep(0.1)
assert peer_manager.peer_available("localhost")
assert client.is_connected
task = asyncio.create_task(client._stop_handler())
await asyncio.sleep(0)
task.cancel()
with pytest.raises(asyncio.CancelledError):
await task
await asyncio.sleep(0.1)
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
async def test_init_client_peer_stop_twice(
peer_listener: PeerListener,
peer_manager: PeerManager,
test_endpoint: list[Client],
) -> None:
"""Test calling stop twice raises an error."""
client = ClientPeer("127.0.0.1", "8893")
connector = Connector("127.0.0.1", "8822")
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
await client.start(connector, fernet_token, aes_key, aes_iv)
await asyncio.sleep(0.1)
assert peer_manager.peer_available("localhost")
assert client.is_connected
await client.stop()
with pytest.raises(RuntimeError):
await client.stop()
await asyncio.sleep(0.1)
assert not client.is_connected
assert not peer_manager.peer_available("localhost")
snitun-0.42.0/tests/client/test_connector.py 0000664 0000000 0000000 00000022132 14773172534 0021155 0 ustar 00root root 0000000 0000000 """Test client connector."""
import asyncio
import ipaddress
from typing import cast
from unittest.mock import AsyncMock, patch
import pytest
from snitun.client.connector import Connector, ConnectorHandler
from snitun.exceptions import MultiplexerTransportClose
from snitun.multiplexer.channel import MultiplexerChannel
from snitun.multiplexer.core import Multiplexer
from ..conftest import Client
IP_ADDR = ipaddress.ip_address("8.8.8.8")
BAD_ADDR = ipaddress.ip_address("8.8.1.1")
async def test_init_connector(
test_endpoint: list[Client],
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test and init a connector."""
assert not test_endpoint
connector = Connector("127.0.0.1", "8822")
multiplexer_client._new_connections = connector.handler
channel = await multiplexer_server.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
assert test_endpoint
test_connection = test_endpoint[0]
await channel.write(b"Hallo")
data = await test_connection.reader.read(1024)
assert data == b"Hallo"
test_connection.close.set()
async def test_flow_connector(
test_endpoint: list[Client],
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test and and perform a connector flow."""
assert not test_endpoint
connector = Connector("127.0.0.1", "8822")
multiplexer_client._new_connections = connector.handler
channel = await multiplexer_server.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
assert test_endpoint
test_connection = test_endpoint[0]
await channel.write(b"Hallo")
data = await test_connection.reader.read(1024)
assert data == b"Hallo"
test_connection.writer.write(b"Hiro")
await test_connection.writer.drain()
data = await channel.read()
assert data == b"Hiro"
test_connection.close.set()
async def test_close_connector_remote(
test_endpoint: list[Client],
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test and init a connector with remote close."""
assert not test_endpoint
connector = Connector("127.0.0.1", "8822")
multiplexer_client._new_connections = connector.handler
channel = await multiplexer_server.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
assert test_endpoint
test_connection = test_endpoint[0]
await channel.write(b"Hallo")
data = await test_connection.reader.read(1024)
assert data == b"Hallo"
test_connection.writer.write(b"Hiro")
await test_connection.writer.drain()
data = await channel.read()
assert data == b"Hiro"
multiplexer_server.delete_channel(channel)
data = await test_connection.reader.read(1024)
assert not data
test_connection.close.set()
async def test_close_connector_local(
test_endpoint: list[Client],
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test and init a connector."""
assert not test_endpoint
connector = Connector("127.0.0.1", "8822")
multiplexer_client._new_connections = connector.handler
channel = await multiplexer_server.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
assert test_endpoint
test_connection = test_endpoint[0]
await channel.write(b"Hallo")
data = await test_connection.reader.read(1024)
assert data == b"Hallo"
test_connection.writer.write(b"Hiro")
await test_connection.writer.drain()
data = await channel.read()
assert data == b"Hiro"
test_connection.writer.close()
test_connection.close.set()
await asyncio.sleep(0.1)
with pytest.raises(MultiplexerTransportClose):
await channel.read()
async def test_init_connector_whitelist(
test_endpoint: list[Client],
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test and init a connector with whitelist."""
assert not test_endpoint
connector = Connector("127.0.0.1", "8822", True)
multiplexer_client._new_connections = connector.handler
connector.whitelist.add(IP_ADDR)
assert IP_ADDR in connector.whitelist
channel = await multiplexer_server.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
assert test_endpoint
test_connection = test_endpoint[0]
await channel.write(b"Hallo")
data = await test_connection.reader.read(1024)
assert data == b"Hallo"
test_connection.close.set()
async def test_init_connector_whitelist_bad(
test_endpoint: list[Client],
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test and init a connector with whitelist bad requests."""
assert not test_endpoint
connector = Connector("127.0.0.1", "8822", True)
multiplexer_client._new_connections = connector.handler
connector.whitelist.add(IP_ADDR)
assert IP_ADDR in connector.whitelist
assert BAD_ADDR not in connector.whitelist
channel = await multiplexer_server.create_channel(BAD_ADDR, lambda _: None)
await asyncio.sleep(0.1)
assert not test_endpoint
with pytest.raises(MultiplexerTransportClose):
await channel.read()
async def test_connector_error_callback(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test connector endpoint error callback."""
callback = AsyncMock()
connector = Connector("127.0.0.1", "8822", False, callback)
channel = await multiplexer_client.create_channel(IP_ADDR, lambda _: None)
callback.assert_not_called()
with patch("asyncio.open_connection", side_effect=OSError("Lorem ipsum...")):
await connector.handler(multiplexer_client, channel)
callback.assert_called_once()
async def test_connector_no_error_callback(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test connector with not endpoint error callback."""
connector = Connector("127.0.0.1", "8822", False, None)
channel = await multiplexer_client.create_channel(IP_ADDR, lambda _: None)
with patch("asyncio.open_connection", side_effect=OSError("Lorem ipsum...")):
await connector.handler(multiplexer_client, channel)
async def test_connector_handler_can_pause(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
test_endpoint: list[Client],
) -> None:
"""Test connector handler can pause."""
assert not test_endpoint
connector = Connector("127.0.0.1", "8822")
multiplexer_client._new_connections = connector.handler
connector_handler: ConnectorHandler | None = None
def save_connector_handler(
loop: asyncio.AbstractEventLoop,
channel: MultiplexerChannel,
) -> ConnectorHandler:
nonlocal connector_handler
connector_handler = ConnectorHandler(loop, channel)
return connector_handler
with patch("snitun.client.connector.ConnectorHandler", save_connector_handler):
server_channel = await multiplexer_server.create_channel(
IP_ADDR,
lambda _: None,
)
await asyncio.sleep(0.1)
assert isinstance(connector_handler, ConnectorHandler)
handler = cast(ConnectorHandler, connector_handler)
client_channel = handler._channel
assert client_channel._pause_resume_reader_callback is not None
assert (
client_channel._pause_resume_reader_callback
== handler._pause_resume_reader_callback
)
assert test_endpoint
test_connection = test_endpoint[0]
await server_channel.write(b"Hallo")
data = await test_connection.reader.read(1024)
assert data == b"Hallo"
test_connection.writer.write(b"Hiro")
await test_connection.writer.drain()
data = await server_channel.read()
assert data == b"Hiro"
assert handler._pause_future is None
# Simulate that the remote input goes under water
client_channel.on_remote_input_under_water(True)
assert handler._pause_future is not None
await server_channel.write(b"Goodbye")
data = await test_connection.reader.read(1024)
assert data == b"Goodbye"
# This is an implementation detail that we might
# change in the future, but for now we need to
# to read one more message because we don't cancel
# the current read when the reader pauses as the additional
# complexity is not worth it.
test_connection.writer.write(b"Should read one more")
await test_connection.writer.drain()
assert await server_channel.read() == b"Should read one more"
test_connection.writer.write(b"ByeBye")
await test_connection.writer.drain()
read_task = asyncio.create_task(server_channel.read())
await asyncio.sleep(0.1)
# Make sure reader is actually paused
assert not read_task.done()
# Now simulate that the remote input is no longer under water
client_channel.on_remote_input_under_water(False)
assert handler._pause_future is None
data = await read_task
assert data == b"ByeBye"
test_connection.writer.close()
test_connection.close.set()
await asyncio.sleep(0.1)
with pytest.raises(MultiplexerTransportClose):
await server_channel.read()
snitun-0.42.0/tests/conftest.py 0000664 0000000 0000000 00000017641 14773172534 0016504 0 ustar 00root root 0000000 0000000 """Pytest fixtures for SniTun."""
import asyncio
from collections.abc import AsyncGenerator, Generator
from dataclasses import dataclass, field
from datetime import UTC, datetime, timedelta
import logging
import os
import select
import socket
from threading import Thread
from unittest.mock import patch
import pytest
import snitun
from snitun.multiplexer.channel import MultiplexerChannel
from snitun.multiplexer.core import Multiplexer
from snitun.multiplexer.crypto import CryptoTransport
from snitun.server.listener_peer import PeerListener
from snitun.server.listener_sni import SNIProxy
from snitun.server.peer import Peer
from snitun.server.peer_manager import PeerManager
from snitun.utils.asyncio import asyncio_timeout
from .server.const_fernet import FERNET_TOKENS
logging.basicConfig(level=logging.DEBUG)
@dataclass
class Client:
"""Represent a TCP client object."""
reader: asyncio.StreamReader
writer: asyncio.StreamWriter
close: asyncio.Event = field(default_factory=asyncio.Event)
@pytest.fixture
def raise_timeout() -> Generator[None, None, None]:
"""Raise timeout on async-timeout."""
with patch.object(asyncio_timeout, "timeout", side_effect=TimeoutError()):
yield
@pytest.fixture
async def test_server() -> AsyncGenerator[list[Client], None]:
"""Create a TCP test server."""
connections = []
async def process_data(
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
) -> None:
"""Read data from client."""
client = Client(reader, writer)
connections.append(client)
await client.close.wait()
server = await asyncio.start_server(process_data, host="127.0.0.1", port="8866")
yield connections
server.close()
@pytest.fixture
async def test_endpoint() -> AsyncGenerator[list[Client], None]:
"""Create a TCP test endpoint."""
connections = []
async def process_data(
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
) -> None:
"""Read data from client."""
client = Client(reader, writer)
connections.append(client)
await client.close.wait()
server = await asyncio.start_server(process_data, host="127.0.0.1", port="8822")
yield connections
server.close()
@pytest.fixture
async def test_client(test_server: list[Client]) -> AsyncGenerator[Client, None]:
"""Create a TCP test client."""
reader, writer = await asyncio.open_connection(host="127.0.0.1", port="8866")
yield Client(reader, writer)
writer.close()
@pytest.fixture
def test_server_sync(
event_loop: asyncio.AbstractEventLoop,
) -> Generator[list[socket.socket], None, None]:
"""Create a TCP test server."""
connections: list[socket.socket] = []
shutdown = False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("127.0.0.1", 8366))
sock.listen(2)
sock.setblocking(False)
def _incoming() -> None:
nonlocal shutdown
poller = select.epoll()
poller.register(sock, select.EPOLLIN)
while not shutdown:
events = poller.poll(0.1)
for _, _ in events:
connection, _ = sock.accept()
connections.append(connection)
poller.close()
runner = Thread(target=_incoming)
runner.start()
yield connections
shutdown = True
runner.join()
sock.close()
@pytest.fixture
def test_client_sync(
test_server_sync: list[socket.socket],
) -> Generator[socket.socket, None, None]:
"""Create a TCP test client."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 8366))
yield sock
sock.close()
@pytest.fixture
def test_client_ssl_sync(
test_server_sync: list[socket.socket],
) -> Generator[socket.socket, None, None]:
"""Create a TCP test client for SSL."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 8366))
yield sock
sock.close()
@pytest.fixture
async def multiplexer_server(
test_server: list[Client],
test_client: Client,
crypto_key_iv: tuple[bytes, bytes],
) -> AsyncGenerator[Multiplexer, None]:
"""Create a multiplexer client from server."""
client = test_server[0]
async def mock_new_channel(
multiplexer: Multiplexer,
channel: MultiplexerChannel,
) -> None:
"""Mock new channel."""
multiplexer = Multiplexer(
CryptoTransport(*crypto_key_iv),
client.reader,
client.writer,
snitun.PROTOCOL_VERSION,
mock_new_channel,
)
yield multiplexer
multiplexer.shutdown()
client.close.set()
@pytest.fixture
async def multiplexer_server_peer_protocol_0(
test_server: list[Client],
test_client: Client,
crypto_key_iv: tuple[bytes, bytes],
) -> AsyncGenerator[Multiplexer, None]:
"""Create a multiplexer client from server with the peer using protocol 0."""
client = test_server[0]
async def mock_new_channel(
multiplexer: Multiplexer,
channel: MultiplexerChannel,
) -> None:
"""Mock new channel."""
multiplexer = Multiplexer(
CryptoTransport(*crypto_key_iv),
client.reader,
client.writer,
0,
mock_new_channel,
)
yield multiplexer
multiplexer.shutdown()
client.close.set()
@pytest.fixture
async def multiplexer_client(
test_client: Client,
crypto_key_iv: tuple[bytes, bytes],
) -> AsyncGenerator[Multiplexer, None]:
"""Create a multiplexer client from server."""
async def mock_new_channel(
multiplexer: Multiplexer,
channel: MultiplexerChannel,
) -> None:
"""Mock new channel."""
multiplexer = Multiplexer(
CryptoTransport(*crypto_key_iv),
test_client.reader,
test_client.writer,
snitun.PROTOCOL_VERSION,
mock_new_channel,
)
yield multiplexer
multiplexer.shutdown()
@pytest.fixture
async def peer_manager(multiplexer_server: Multiplexer, peer: Peer) -> PeerManager:
"""Create a localhost peer for tests."""
manager = PeerManager(FERNET_TOKENS)
manager._peers[peer.hostname] = peer
return manager
@pytest.fixture
async def sni_proxy(peer_manager: PeerManager) -> AsyncGenerator[SNIProxy, None]:
"""Create a SNI Proxy."""
proxy = SNIProxy(peer_manager, "127.0.0.1", "8863")
await proxy.start()
yield proxy
await proxy.stop()
@pytest.fixture
async def test_client_ssl(sni_proxy: SNIProxy) -> AsyncGenerator[Client, None]:
"""Create a TCP test client."""
reader, writer = await asyncio.open_connection(host="127.0.0.1", port="8863")
yield Client(reader, writer)
writer.close()
@pytest.fixture
def crypto_key_iv() -> tuple[bytes, bytes]:
"""Create a key and iv."""
key = os.urandom(32)
iv = os.urandom(16)
return key, iv
@pytest.fixture
async def peer(
crypto_key_iv: tuple[bytes, bytes],
multiplexer_server: Multiplexer,
) -> Peer:
"""Init a peer with transport."""
valid = datetime.now(tz=UTC) + timedelta(days=1)
peer = Peer(
"localhost",
valid,
os.urandom(32),
os.urandom(16),
snitun.PROTOCOL_VERSION,
)
peer._crypto = CryptoTransport(*crypto_key_iv)
peer._multiplexer = multiplexer_server
return peer
@pytest.fixture
async def peer_listener(
peer_manager: PeerManager,
peer: Peer,
) -> AsyncGenerator[PeerListener, None]:
"""Create a Peer listener."""
listener = PeerListener(peer_manager, "127.0.0.1", "8893")
await listener.start()
# Cleanup mock peer
peer_manager.remove_peer(peer)
yield listener
await listener.stop()
@pytest.fixture
async def test_client_peer(peer_listener: PeerListener) -> AsyncGenerator[Client, None]:
"""Create a TCP test client."""
reader, writer = await asyncio.open_connection(host="127.0.0.1", port="8893")
yield Client(reader, writer)
writer.close()
snitun-0.42.0/tests/multiplexer/ 0000775 0000000 0000000 00000000000 14773172534 0016646 5 ustar 00root root 0000000 0000000 snitun-0.42.0/tests/multiplexer/__init__.py 0000664 0000000 0000000 00000000035 14773172534 0020755 0 ustar 00root root 0000000 0000000 """Tests for multiplexer."""
snitun-0.42.0/tests/multiplexer/test_channel.py 0000664 0000000 0000000 00000026302 14773172534 0021672 0 ustar 00root root 0000000 0000000 """Test Multiplexer channels."""
import asyncio
import ipaddress
from unittest.mock import patch
import pytest
import snitun
from snitun.exceptions import MultiplexerTransportClose, MultiplexerTransportError
from snitun.multiplexer import channel as channel_module
from snitun.multiplexer.channel import ChannelFlowControlBase, MultiplexerChannel
from snitun.multiplexer.const import (
OUTGOING_QUEUE_HIGH_WATERMARK,
OUTGOING_QUEUE_LOW_WATERMARK,
OUTGOING_QUEUE_MAX_BYTES_CHANNEL,
)
from snitun.multiplexer.message import (
CHANNEL_FLOW_CLOSE,
CHANNEL_FLOW_DATA,
CHANNEL_FLOW_NEW,
CHANNEL_FLOW_PAUSE,
CHANNEL_FLOW_RESUME,
HEADER_SIZE,
MultiplexerChannelId,
MultiplexerMessage,
)
from snitun.multiplexer.queue import MultiplexerMultiChannelQueue
from snitun.utils.ipaddress import ip_address_to_bytes
IP_ADDR = ipaddress.ip_address("8.8.8.8")
async def test_initial_channel_msg() -> None:
"""Test new MultiplexerChannel with id."""
output = MultiplexerMultiChannelQueue(
OUTGOING_QUEUE_MAX_BYTES_CHANNEL,
OUTGOING_QUEUE_LOW_WATERMARK,
OUTGOING_QUEUE_HIGH_WATERMARK,
)
channel = MultiplexerChannel(output, IP_ADDR, snitun.PROTOCOL_VERSION)
assert isinstance(channel.id, MultiplexerChannelId)
message = channel.init_new()
assert message.id == channel.id
assert message.flow_type == CHANNEL_FLOW_NEW
assert message.data == b""
assert message.extra == b"4" + ip_address_to_bytes(IP_ADDR)
async def test_close_channel_msg() -> None:
"""Test close MultiplexerChannel."""
output = MultiplexerMultiChannelQueue(
OUTGOING_QUEUE_MAX_BYTES_CHANNEL,
OUTGOING_QUEUE_LOW_WATERMARK,
OUTGOING_QUEUE_HIGH_WATERMARK,
)
channel = MultiplexerChannel(output, IP_ADDR, snitun.PROTOCOL_VERSION)
assert isinstance(channel.id, MultiplexerChannelId)
message = channel.init_close()
assert message.id == channel.id
assert message.flow_type == CHANNEL_FLOW_CLOSE
assert message.data == b""
async def test_write_data() -> None:
"""Test send data over MultiplexerChannel."""
output = MultiplexerMultiChannelQueue(
OUTGOING_QUEUE_MAX_BYTES_CHANNEL,
OUTGOING_QUEUE_LOW_WATERMARK,
OUTGOING_QUEUE_HIGH_WATERMARK,
)
channel = MultiplexerChannel(output, IP_ADDR, snitun.PROTOCOL_VERSION)
assert isinstance(channel.id, MultiplexerChannelId)
await channel.write(b"test")
assert not output.empty(channel.id)
message = output.get_nowait()
assert message.id == channel.id
assert message.flow_type == CHANNEL_FLOW_DATA
assert message.data == b"test"
async def test_closing() -> None:
"""Test send data over MultiplexerChannel."""
output = MultiplexerMultiChannelQueue(
OUTGOING_QUEUE_MAX_BYTES_CHANNEL,
OUTGOING_QUEUE_LOW_WATERMARK,
OUTGOING_QUEUE_HIGH_WATERMARK,
)
channel = MultiplexerChannel(output, IP_ADDR, snitun.PROTOCOL_VERSION)
assert isinstance(channel.id, MultiplexerChannelId)
assert not channel.closing
channel.close()
assert channel.closing
async def test_write_data_after_close() -> None:
"""Test send data over MultiplexerChannel."""
output = MultiplexerMultiChannelQueue(
OUTGOING_QUEUE_MAX_BYTES_CHANNEL,
OUTGOING_QUEUE_LOW_WATERMARK,
OUTGOING_QUEUE_HIGH_WATERMARK,
)
channel = MultiplexerChannel(output, IP_ADDR, snitun.PROTOCOL_VERSION)
assert isinstance(channel.id, MultiplexerChannelId)
assert not channel.closing
channel.close()
with pytest.raises(MultiplexerTransportClose):
await channel.write(b"test")
assert channel.closing
async def test_write_data_empty() -> None:
"""Test send data over MultiplexerChannel."""
output = MultiplexerMultiChannelQueue(
OUTGOING_QUEUE_MAX_BYTES_CHANNEL,
OUTGOING_QUEUE_LOW_WATERMARK,
OUTGOING_QUEUE_HIGH_WATERMARK,
)
channel = MultiplexerChannel(output, IP_ADDR, snitun.PROTOCOL_VERSION)
assert isinstance(channel.id, MultiplexerChannelId)
with pytest.raises(MultiplexerTransportError):
await channel.write(b"")
async def test_read_data() -> None:
"""Test send data over MultiplexerChannel."""
output = MultiplexerMultiChannelQueue(
OUTGOING_QUEUE_MAX_BYTES_CHANNEL,
OUTGOING_QUEUE_LOW_WATERMARK,
OUTGOING_QUEUE_HIGH_WATERMARK,
)
channel = MultiplexerChannel(output, IP_ADDR, snitun.PROTOCOL_VERSION)
assert isinstance(channel.id, MultiplexerChannelId)
message = MultiplexerMessage(channel.id, CHANNEL_FLOW_DATA, b"test")
channel.message_transport(message)
data = await channel.read()
assert data == b"test"
async def test_read_data_on_close() -> None:
"""Test send data over MultiplexerChannel on close."""
output = MultiplexerMultiChannelQueue(
OUTGOING_QUEUE_MAX_BYTES_CHANNEL,
OUTGOING_QUEUE_LOW_WATERMARK,
OUTGOING_QUEUE_HIGH_WATERMARK,
)
channel = MultiplexerChannel(output, IP_ADDR, snitun.PROTOCOL_VERSION)
assert isinstance(channel.id, MultiplexerChannelId)
assert not channel.closing
channel.close()
with pytest.raises(MultiplexerTransportClose):
await channel.read()
assert channel.closing
async def test_write_data_peer_error(raise_timeout: None) -> None:
"""Test send data over MultiplexerChannel but peer don't response."""
output = MultiplexerMultiChannelQueue(1, 1, 1)
channel = MultiplexerChannel(output, IP_ADDR, snitun.PROTOCOL_VERSION)
assert isinstance(channel.id, MultiplexerChannelId)
# fill peer queue
output.put_nowait(channel.id, None)
with pytest.raises(MultiplexerTransportError):
await channel.write(b"test")
async def test_message_transport_never_lock() -> None:
"""Message transport should never lock down even when it goes unhealthy."""
output = MultiplexerMultiChannelQueue(1, 1, 1)
with patch.object(channel_module, "INCOMING_QUEUE_MAX_BYTES_CHANNEL", 1):
channel = MultiplexerChannel(output, IP_ADDR, snitun.PROTOCOL_VERSION)
assert isinstance(channel.id, MultiplexerChannelId)
assert not channel.unhealthy
assert not channel.closing
for _ in range(3):
channel.message_transport(channel.init_close())
assert channel.unhealthy
async def test_write_throttling() -> None:
"""Message transport should never lock down."""
loop = asyncio.get_running_loop()
output = MultiplexerMultiChannelQueue(500, 1, 100)
channel = MultiplexerChannel(
output,
IP_ADDR,
snitun.PROTOCOL_VERSION,
throttling=0.1,
)
assert isinstance(channel.id, MultiplexerChannelId)
message = b"test"
message_size = HEADER_SIZE + len(message)
async def _write_background():
"""Write message in background."""
for _ in range(1, 10000):
await channel.write(message)
background_task = loop.create_task(_write_background())
await asyncio.sleep(0.3)
assert not background_task.done()
assert output.size(channel.id) <= message_size * 4
background_task.cancel()
with pytest.raises(asyncio.CancelledError):
await background_task
async def test_channel_input_queue_goes_under_water() -> None:
"""Test when a channel input queue goes under water.
The channel should inform the peer to pause the reader.
"""
output = MultiplexerMultiChannelQueue(
HEADER_SIZE * 2,
HEADER_SIZE,
HEADER_SIZE * 2,
)
with (
patch.object(
channel_module,
"INCOMING_QUEUE_MAX_BYTES_CHANNEL",
HEADER_SIZE * 10,
),
patch.object(channel_module, "INCOMING_QUEUE_LOW_WATERMARK", HEADER_SIZE),
patch.object(channel_module, "INCOMING_QUEUE_HIGH_WATERMARK", HEADER_SIZE * 2),
):
channel = MultiplexerChannel(output, IP_ADDR, snitun.PROTOCOL_VERSION)
assert isinstance(channel.id, MultiplexerChannelId)
# Fake some data coming from the remote
data_msg = MultiplexerMessage(channel.id, CHANNEL_FLOW_DATA)
channel.message_transport(data_msg)
channel.message_transport(data_msg)
# The input queue is now under water
assert channel._input._under_water
# We should have told the remote to pause
assert output.get_nowait() == MultiplexerMessage(channel.id, CHANNEL_FLOW_PAUSE)
await channel.read() == data_msg.data
# The input queue is now back to normal
# We should have told the remote to resume
assert output.get_nowait() == MultiplexerMessage(channel.id, CHANNEL_FLOW_RESUME)
async def test_channel_input_queue_goes_under_water_output_full(
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test when a channel input queue goes under water when output is full.
The channel should inform the peer to pause the reader.
"""
output = MultiplexerMultiChannelQueue(
HEADER_SIZE * 2,
HEADER_SIZE,
HEADER_SIZE * 2,
)
with (
patch.object(
channel_module,
"INCOMING_QUEUE_MAX_BYTES_CHANNEL",
HEADER_SIZE * 10,
),
patch.object(channel_module, "INCOMING_QUEUE_LOW_WATERMARK", HEADER_SIZE),
patch.object(channel_module, "INCOMING_QUEUE_HIGH_WATERMARK", HEADER_SIZE * 2),
):
channel = MultiplexerChannel(output, IP_ADDR, snitun.PROTOCOL_VERSION)
assert isinstance(channel.id, MultiplexerChannelId)
data_msg = MultiplexerMessage(channel.id, CHANNEL_FLOW_DATA)
# Fill the output queue so it's full
output.put_nowait(channel.id, data_msg)
output.put_nowait(channel.id, data_msg)
# Fake some data coming from the remote
channel.message_transport(data_msg)
channel.message_transport(data_msg)
# The input queue is now under water
assert channel._input._under_water
# We can't tell the remote to pause because
# our output queue is full
assert (
f"{channel.id}: Cannot send pause/resume message to peer, "
"output queue is full" in caplog.text
)
async def test_flow_control_allow_multiple_pause_resume(
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test that we can pause and resume multiple times."""
class ChannelConsumer(ChannelFlowControlBase):
"""Channel consumer for testing."""
def __init__(self) -> None:
super().__init__(asyncio.get_running_loop())
output = MultiplexerMultiChannelQueue(
OUTGOING_QUEUE_MAX_BYTES_CHANNEL,
OUTGOING_QUEUE_LOW_WATERMARK,
OUTGOING_QUEUE_HIGH_WATERMARK,
)
self._channel = MultiplexerChannel(output, IP_ADDR, snitun.PROTOCOL_VERSION)
base_channel = ChannelConsumer()
base_channel._pause_resume_reader_callback(True)
assert base_channel._pause_future is not None
assert not base_channel._pause_future.done()
with pytest.raises(RuntimeError, match="Reader already paused for"):
base_channel._pause_resume_reader_callback(True)
assert base_channel._pause_future is not None
assert not base_channel._pause_future.done()
base_channel._pause_resume_reader_callback(False)
assert base_channel._pause_future is None
with pytest.raises(RuntimeError, match="Reader already resumed for"):
base_channel._pause_resume_reader_callback(False)
assert base_channel._pause_future is None
snitun-0.42.0/tests/multiplexer/test_const.py 0000664 0000000 0000000 00000006157 14773172534 0021416 0 ustar 00root root 0000000 0000000 import importlib
import os
from snitun.multiplexer import const
def test_override_constants_from_env():
"""Test overriding constants from environment variables."""
os.environ.pop("MULTIPLEXER_INCOMING_QUEUE_MAX_BYTES_CHANNEL", None)
os.environ.pop("MULTIPLEXER_INCOMING_QUEUE_LOW_WATERMARK", None)
os.environ.pop("MULTIPLEXER_INCOMING_QUEUE_HIGH_WATERMARK", None)
os.environ.pop("MULTIPLEXER_OUTGOING_QUEUE_MAX_BYTES_CHANNEL", None)
os.environ.pop("MULTIPLEXER_OUTGOING_QUEUE_LOW_WATERMARK", None)
os.environ.pop("MULTIPLEXER_OUTGOING_QUEUE_HIGH_WATERMARK", None)
importlib.reload(const)
assert (
const.INCOMING_QUEUE_MAX_BYTES_CHANNEL
== const.DEFAULT_INCOMING_QUEUE_MAX_BYTES_CHANNEL
)
assert (
const.INCOMING_QUEUE_LOW_WATERMARK == const.DEFAULT_INCOMING_QUEUE_LOW_WATERMARK
)
assert (
const.INCOMING_QUEUE_HIGH_WATERMARK
== const.DEFAULT_INCOMING_QUEUE_HIGH_WATERMARK
)
assert (
const.OUTGOING_QUEUE_MAX_BYTES_CHANNEL
== const.DEFAULT_OUTGOING_QUEUE_MAX_BYTES_CHANNEL
)
assert (
const.OUTGOING_QUEUE_LOW_WATERMARK == const.DEFAULT_OUTGOING_QUEUE_LOW_WATERMARK
)
assert (
const.OUTGOING_QUEUE_HIGH_WATERMARK
== const.DEFAULT_OUTGOING_QUEUE_HIGH_WATERMARK
)
os.environ["MULTIPLEXER_INCOMING_QUEUE_MAX_BYTES_CHANNEL"] = "1"
os.environ["MULTIPLEXER_INCOMING_QUEUE_LOW_WATERMARK"] = "2"
os.environ["MULTIPLEXER_INCOMING_QUEUE_HIGH_WATERMARK"] = "3"
os.environ["MULTIPLEXER_OUTGOING_QUEUE_MAX_BYTES_CHANNEL"] = "4"
os.environ["MULTIPLEXER_OUTGOING_QUEUE_LOW_WATERMARK"] = "5"
os.environ["MULTIPLEXER_OUTGOING_QUEUE_HIGH_WATERMARK"] = "6"
importlib.reload(const)
assert const.INCOMING_QUEUE_MAX_BYTES_CHANNEL == 1
assert const.INCOMING_QUEUE_LOW_WATERMARK == 2
assert const.INCOMING_QUEUE_HIGH_WATERMARK == 3
assert const.OUTGOING_QUEUE_MAX_BYTES_CHANNEL == 4
assert const.OUTGOING_QUEUE_LOW_WATERMARK == 5
assert const.OUTGOING_QUEUE_HIGH_WATERMARK == 6
del os.environ["MULTIPLEXER_INCOMING_QUEUE_MAX_BYTES_CHANNEL"]
del os.environ["MULTIPLEXER_INCOMING_QUEUE_LOW_WATERMARK"]
del os.environ["MULTIPLEXER_INCOMING_QUEUE_HIGH_WATERMARK"]
del os.environ["MULTIPLEXER_OUTGOING_QUEUE_MAX_BYTES_CHANNEL"]
del os.environ["MULTIPLEXER_OUTGOING_QUEUE_LOW_WATERMARK"]
del os.environ["MULTIPLEXER_OUTGOING_QUEUE_HIGH_WATERMARK"]
importlib.reload(const)
assert (
const.INCOMING_QUEUE_MAX_BYTES_CHANNEL
== const.DEFAULT_INCOMING_QUEUE_MAX_BYTES_CHANNEL
)
assert (
const.INCOMING_QUEUE_LOW_WATERMARK == const.DEFAULT_INCOMING_QUEUE_LOW_WATERMARK
)
assert (
const.INCOMING_QUEUE_HIGH_WATERMARK
== const.DEFAULT_INCOMING_QUEUE_HIGH_WATERMARK
)
assert (
const.OUTGOING_QUEUE_MAX_BYTES_CHANNEL
== const.DEFAULT_OUTGOING_QUEUE_MAX_BYTES_CHANNEL
)
assert (
const.OUTGOING_QUEUE_LOW_WATERMARK == const.DEFAULT_OUTGOING_QUEUE_LOW_WATERMARK
)
assert (
const.OUTGOING_QUEUE_HIGH_WATERMARK
== const.DEFAULT_OUTGOING_QUEUE_HIGH_WATERMARK
)
snitun-0.42.0/tests/multiplexer/test_core.py 0000664 0000000 0000000 00000050725 14773172534 0021220 0 ustar 00root root 0000000 0000000 """Tests for core multiplexer handler."""
import asyncio
from contextlib import suppress
import ipaddress
import os
from unittest.mock import patch
import pytest
import snitun
from snitun.exceptions import MultiplexerTransportClose, MultiplexerTransportError
from snitun.multiplexer import channel as channel_module, core as core_module
from snitun.multiplexer.channel import MultiplexerChannel
from snitun.multiplexer.core import Multiplexer
from snitun.multiplexer.crypto import CryptoTransport
from snitun.multiplexer.message import (
CHANNEL_FLOW_PAUSE,
CHANNEL_FLOW_PING,
HEADER_SIZE,
MultiplexerChannelId,
MultiplexerMessage,
)
from ..conftest import Client
IP_ADDR = ipaddress.ip_address("8.8.8.8")
async def test_init_multiplexer_server(
test_server: list[Client],
test_client: Client,
crypto_key_iv: tuple[bytes, bytes],
) -> None:
"""Test to create a new Multiplexer from server socket."""
client = test_server[0]
multiplexer = Multiplexer(
CryptoTransport(*crypto_key_iv),
client.reader,
client.writer,
snitun.PROTOCOL_VERSION,
)
assert multiplexer.is_connected
assert multiplexer._throttling is None
multiplexer.shutdown()
client.close.set()
async def test_init_multiplexer_client(
test_client: Client,
crypto_key_iv: tuple[bytes, bytes],
) -> None:
"""Test to create a new Multiplexer from client socket."""
multiplexer = Multiplexer(
CryptoTransport(*crypto_key_iv),
test_client.reader,
test_client.writer,
snitun.PROTOCOL_VERSION,
)
assert multiplexer.is_connected
assert multiplexer._throttling is None
multiplexer.shutdown()
async def test_init_multiplexer_server_throttling(
test_server: list[Client],
test_client: Client,
crypto_key_iv: tuple[bytes, bytes],
) -> None:
"""Test to create a new Multiplexer from server socket."""
client = test_server[0]
multiplexer = Multiplexer(
CryptoTransport(*crypto_key_iv),
client.reader,
client.writer,
snitun.PROTOCOL_VERSION,
throttling=500,
)
assert multiplexer.is_connected
assert multiplexer._throttling == 0.002
multiplexer.shutdown()
client.close.set()
async def test_init_multiplexer_client_throttling(
test_client: Client,
crypto_key_iv: tuple[bytes, bytes],
) -> None:
"""Test to create a new Multiplexer from client socket."""
multiplexer = Multiplexer(
CryptoTransport(*crypto_key_iv),
test_client.reader,
test_client.writer,
snitun.PROTOCOL_VERSION,
throttling=500,
)
assert multiplexer.is_connected
assert multiplexer._throttling == 0.002
multiplexer.shutdown()
async def test_multiplexer_server_close(
multiplexer_server: Multiplexer,
multiplexer_client: Multiplexer,
) -> None:
"""Test a close from server peers."""
assert multiplexer_server.is_connected
assert multiplexer_client.is_connected
multiplexer_server.shutdown()
await asyncio.sleep(0.1)
assert not multiplexer_server.is_connected
assert not multiplexer_client.is_connected
async def test_multiplexer_client_close(
multiplexer_server: Multiplexer,
multiplexer_client: Multiplexer,
) -> None:
"""Test a close from client peers."""
assert multiplexer_server.is_connected
assert multiplexer_client.is_connected
multiplexer_client.shutdown()
await asyncio.sleep(0.1)
assert not multiplexer_server.is_connected
assert not multiplexer_client.is_connected
async def test_multiplexer_ping(
test_server: list[Client],
multiplexer_client: Multiplexer,
) -> None:
"""Test a ping between peers."""
loop = asyncio.get_running_loop()
client = test_server[0]
ping_task = loop.create_task(multiplexer_client.ping())
await asyncio.sleep(0.1)
data = await client.reader.read(60)
data = multiplexer_client._crypto.decrypt(data)
assert data[16] == CHANNEL_FLOW_PING
assert int.from_bytes(data[17:21], "big") == 0
assert data[21:25] == b"ping"
ping_task.cancel()
async def test_multiplexer_ping_error(
test_server: list[Client],
multiplexer_client: Multiplexer,
) -> None:
"""Test a ping between peers."""
from snitun.multiplexer import core as multi_core
loop = asyncio.get_running_loop()
multi_core.PEER_TCP_TIMEOUT = 0.2
client = test_server[0]
ping_task = loop.create_task(multiplexer_client.ping())
await asyncio.sleep(0.3)
data = await client.reader.read(60)
data = multiplexer_client._crypto.decrypt(data)
assert data[16] == CHANNEL_FLOW_PING
assert int.from_bytes(data[17:21], "big") == 0
assert data[21:25] == b"ping"
assert ping_task.done()
with pytest.raises(MultiplexerTransportError):
raise ping_task.exception()
multi_core.PEER_TCP_TIMEOUT = 90
async def test_multiplexer_ping_pong(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test that without new channel callback can't create new channels."""
await multiplexer_client.ping()
assert multiplexer_client._healthy.is_set()
async def test_multiplexer_cant_init_channel(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test that without new channel callback can't create new channels."""
assert not multiplexer_client._channels
assert not multiplexer_server._channels
# Disable new channels
multiplexer_server._new_connections = None
await multiplexer_client.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
assert multiplexer_client._channels
assert not multiplexer_server._channels
async def test_multiplexer_init_channel(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test that new channels are created."""
assert not multiplexer_client._channels
assert not multiplexer_server._channels
channel = await multiplexer_client.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
assert multiplexer_client._channels
assert multiplexer_server._channels
assert multiplexer_client._channels[channel.id]
assert multiplexer_server._channels[channel.id]
assert multiplexer_client._channels[channel.id].ip_address == IP_ADDR
assert multiplexer_server._channels[channel.id].ip_address == IP_ADDR
async def test_multiplexer_init_channel_full(
multiplexer_client: Multiplexer,
raise_timeout: None,
) -> None:
"""Test that new channels are created but peer error is available."""
assert not multiplexer_client._channels
with pytest.raises(MultiplexerTransportError):
await multiplexer_client.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
assert not multiplexer_client._channels
async def test_multiplexer_close_channel(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test that channels are nice removed."""
assert not multiplexer_client._channels
assert not multiplexer_server._channels
channel = await multiplexer_client.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
assert multiplexer_client._channels
assert multiplexer_server._channels
assert multiplexer_client._channels[channel.id]
assert multiplexer_server._channels[channel.id]
assert multiplexer_client._channels[channel.id].ip_address == IP_ADDR
assert multiplexer_server._channels[channel.id].ip_address == IP_ADDR
multiplexer_client.delete_channel(channel)
await asyncio.sleep(0.1)
assert not multiplexer_client._channels
assert not multiplexer_server._channels
async def test_multiplexer_delete_unknown_channel(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test deleting an unknown channel."""
assert not multiplexer_client._channels
assert not multiplexer_server._channels
non_existant_channel = MultiplexerChannel(
multiplexer_server._queue,
ipaddress.IPv4Address("127.0.0.1"),
snitun.PROTOCOL_VERSION,
)
await multiplexer_server._queue.put(
non_existant_channel.id,
non_existant_channel.init_close(),
)
await asyncio.sleep(0.1)
assert not multiplexer_client._channels
assert not multiplexer_server._channels
assert (
f"Receive close from unknown channel: {non_existant_channel.id}" in caplog.text
)
async def test_multiplexer_delete_channel_called_multiple_times(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test that channels can be deleted twice."""
assert not multiplexer_client._channels
assert not multiplexer_server._channels
channel = await multiplexer_client.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
assert multiplexer_client._channels
assert multiplexer_server._channels
assert multiplexer_client._channels[channel.id]
assert multiplexer_server._channels[channel.id]
assert multiplexer_client._channels[channel.id].ip_address == IP_ADDR
assert multiplexer_server._channels[channel.id].ip_address == IP_ADDR
multiplexer_client.delete_channel(channel)
assert not multiplexer_client._channels
multiplexer_client.delete_channel(channel)
assert not multiplexer_client._channels
await asyncio.sleep(0.1)
assert not multiplexer_server._channels
async def test_multiplexer_close_channel_full(multiplexer_client: Multiplexer) -> None:
"""Test that channels are nice removed but peer error is available."""
assert not multiplexer_client._channels
channel = await multiplexer_client.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
assert multiplexer_client._channels
multiplexer_client.delete_channel(channel)
await asyncio.sleep(0.1)
assert not multiplexer_client._channels
async def test_multiplexer_data_channel(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test that new channels are created."""
assert not multiplexer_client._channels
assert not multiplexer_server._channels
channel_client = await multiplexer_client.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
channel_server = multiplexer_server._channels.get(channel_client.id)
assert channel_client
assert channel_server
await channel_client.write(b"test 1")
await asyncio.sleep(0.1)
data = await channel_server.read()
assert data == b"test 1"
await channel_server.write(b"test 2")
await asyncio.sleep(0.1)
data = await channel_client.read()
assert data == b"test 2"
async def test_multiplexer_channel_shutdown(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test that new channels are created and graceful shutdown."""
loop = asyncio.get_running_loop()
assert not multiplexer_client._channels
assert not multiplexer_server._channels
channel_client = await multiplexer_client.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
channel_server = multiplexer_server._channels.get(channel_client.id)
client_read = loop.create_task(channel_client.read())
server_read = loop.create_task(channel_server.read())
assert not client_read.done()
assert not server_read.done()
multiplexer_client.shutdown()
await asyncio.sleep(0.1)
assert not multiplexer_client._channels
assert client_read.done()
with pytest.raises(MultiplexerTransportClose):
raise client_read.exception()
assert not multiplexer_server._channels
assert server_read.done()
with pytest.raises(MultiplexerTransportClose):
raise server_read.exception()
@patch.object(channel_module, "INCOMING_QUEUE_MAX_BYTES_CHANNEL", 1)
@patch.object(core_module, "OUTGOING_QUEUE_MAX_BYTES_CHANNEL", 1)
async def test_multiplexer_data_channel_abort_full(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test that new channels are created."""
assert not multiplexer_client._channels
assert not multiplexer_server._channels
channel_client = await multiplexer_client.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
channel_server = multiplexer_server._channels.get(channel_client.id)
assert channel_client
assert channel_server
large_msg = b"test xxxx" * 1000
with pytest.raises(MultiplexerTransportClose):
for _ in range(1, 50000):
await channel_client.write(large_msg)
with pytest.raises(MultiplexerTransportClose):
for _ in range(1, 50000):
await channel_server.read()
await asyncio.sleep(0.1)
assert not multiplexer_client._channels
assert not multiplexer_server._channels
async def test_multiplexer_throttling(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test that new channels are created and graceful shutdown."""
loop = asyncio.get_running_loop()
assert not multiplexer_client._channels
assert not multiplexer_server._channels
data_in = []
channel_client = await multiplexer_client.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
channel_server = multiplexer_server._channels.get(channel_client.id)
multiplexer_server._throttling = 0.1
multiplexer_client._throttling = 0.1
async def _sender() -> None:
"""Send data much as possible."""
for _ in range(1, 100000):
await channel_client.write(b"data")
async def _receiver() -> None:
"""Receive data much as possible."""
for _ in range(1, 100000):
data = await channel_server.read()
data_in.append(data)
receiver = loop.create_task(_receiver())
sender = loop.create_task(_sender())
await asyncio.sleep(0.8)
assert not receiver.done()
assert len(data_in) <= 8
receiver.cancel()
sender.cancel()
with suppress(asyncio.CancelledError):
await receiver
with suppress(asyncio.CancelledError):
await sender
async def test_multiplexer_core_peer_timeout(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test that new channels are created and graceful shutdown."""
from snitun.multiplexer import core as multi_core
loop = asyncio.get_running_loop()
multi_core.PEER_TCP_TIMEOUT = 0.2
assert not multiplexer_client._channels
assert not multiplexer_server._channels
channel_client = await multiplexer_client.create_channel(IP_ADDR, lambda _: None)
await asyncio.sleep(0.1)
channel_server = multiplexer_server._channels.get(channel_client.id)
client_read = loop.create_task(channel_client.read())
server_read = loop.create_task(channel_server.read())
assert not client_read.done()
assert not server_read.done()
await multiplexer_client.ping()
await asyncio.sleep(0.3)
assert not multiplexer_client._channels
assert not multiplexer_server._channels
assert server_read.done()
assert client_read.done()
with pytest.raises(MultiplexerTransportClose):
raise server_read.exception()
with pytest.raises(MultiplexerTransportClose):
raise client_read.exception()
multi_core.PEER_TCP_TIMEOUT = 90
@patch.object(channel_module, "INCOMING_QUEUE_LOW_WATERMARK", HEADER_SIZE * 2)
@patch.object(channel_module, "INCOMING_QUEUE_HIGH_WATERMARK", HEADER_SIZE * 3)
async def test_remote_input_queue_goes_under_water(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
) -> None:
"""Test the remote input queue going under water."""
assert not multiplexer_client._channels
assert not multiplexer_server._channels
client_channel_under_water: list[bool] = []
server_channel_under_water: list[bool] = []
def _on_client_channel_under_water(under_water: bool) -> None:
client_channel_under_water.append(under_water)
def _on_server_channel_under_water(under_water: bool) -> None:
server_channel_under_water.append(under_water)
channel_client = await multiplexer_client.create_channel(
IP_ADDR,
_on_client_channel_under_water,
)
await asyncio.sleep(0.1)
channel_server = multiplexer_server._channels.get(channel_client.id)
channel_server.set_pause_resume_reader_callback(_on_server_channel_under_water)
assert channel_client
assert channel_server
sent_messages: list[bytes] = []
message_count = 255
for i in range(message_count):
payload = str(i).encode()
sent_messages.append(payload)
await channel_client.write(payload)
await asyncio.sleep(0.1)
assert client_channel_under_water == [True]
assert server_channel_under_water == []
for i in range(message_count):
data = await channel_server.read()
assert data == sent_messages[i]
await asyncio.sleep(0.1)
assert client_channel_under_water == [True, False]
assert server_channel_under_water == []
@patch.object(channel_module, "INCOMING_QUEUE_LOW_WATERMARK", HEADER_SIZE * 2)
@patch.object(channel_module, "INCOMING_QUEUE_HIGH_WATERMARK", HEADER_SIZE * 3)
async def test_remote_input_queue_goes_under_water_protocol_version_0(
multiplexer_client: Multiplexer,
multiplexer_server_peer_protocol_0: Multiplexer,
) -> None:
"""Test the remote input queue going under water with client protocol 0.
Protocol 0 has no flow control.
"""
assert not multiplexer_client._channels
assert not multiplexer_server_peer_protocol_0._channels
client_channel_under_water: list[bool] = []
server_channel_under_water: list[bool] = []
def _on_client_channel_under_water(under_water: bool) -> None:
client_channel_under_water.append(under_water)
def _on_server_channel_under_water(under_water: bool) -> None:
server_channel_under_water.append(under_water)
channel_client = await multiplexer_client.create_channel(
IP_ADDR,
_on_client_channel_under_water,
)
await asyncio.sleep(0.1)
channel_server = multiplexer_server_peer_protocol_0._channels.get(channel_client.id)
channel_server.set_pause_resume_reader_callback(_on_server_channel_under_water)
assert channel_client
assert channel_server
sent_messages: list[bytes] = []
message_count = 255
for i in range(message_count):
payload = str(i).encode()
sent_messages.append(payload)
await channel_client.write(payload)
await asyncio.sleep(0.1)
# No flow control for protocol 0
assert client_channel_under_water == []
assert server_channel_under_water == []
for i in range(message_count):
data = await channel_server.read()
assert data == sent_messages[i]
await asyncio.sleep(0.1)
# No flow control for protocol 0
assert client_channel_under_water == []
assert server_channel_under_water == []
async def test_sending_unknown_message_type(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test that new channels are created."""
assert not multiplexer_client._channels
assert not multiplexer_server._channels
channel_client = await multiplexer_client.create_channel(
IP_ADDR,
lambda _: None,
)
await asyncio.sleep(0.1)
channel_server = multiplexer_server._channels.get(channel_client.id)
assert channel_client
assert channel_server
channel_client._output.put_nowait(
channel_client.id,
MultiplexerMessage(channel_client.id, 255),
)
await asyncio.sleep(0.1)
assert "Receive unknown message type: 255" in caplog.text
async def test_sending_pause_for_unknown_channel(
multiplexer_client: Multiplexer,
multiplexer_server: Multiplexer,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test sending pause for unknown channel is logged."""
assert not multiplexer_client._channels
assert not multiplexer_server._channels
channel_client = await multiplexer_client.create_channel(
IP_ADDR,
lambda _: None,
)
await asyncio.sleep(0.1)
channel_server = multiplexer_server._channels.get(channel_client.id)
assert channel_client
assert channel_server
wrong_channel_id = MultiplexerChannelId(os.urandom(16))
channel_client._output.put_nowait(
channel_client.id,
MultiplexerMessage(wrong_channel_id, CHANNEL_FLOW_PAUSE),
)
await asyncio.sleep(0.1)
assert (
f"Receive pause from unknown channel: {wrong_channel_id.hex()}" in caplog.text
)
snitun-0.42.0/tests/multiplexer/test_crypto.py 0000664 0000000 0000000 00000000632 14773172534 0021600 0 ustar 00root root 0000000 0000000 """Test crypto module for transport."""
import os
from snitun.multiplexer.crypto import CryptoTransport
def test_setup_crypto_transport() -> None:
"""Test crypto transport setup."""
key = os.urandom(32)
iv = os.urandom(16)
crypto = CryptoTransport(key, iv)
for _ in range(1, 10):
test_data = os.urandom(32)
assert crypto.decrypt(crypto.encrypt(test_data)) == test_data
snitun-0.42.0/tests/multiplexer/test_message.py 0000664 0000000 0000000 00000003105 14773172534 0021702 0 ustar 00root root 0000000 0000000 from snitun.multiplexer.message import (
FlowType,
MultiplexerChannelId,
MultiplexerMessage,
)
def test_multiplexer_channel_id() -> None:
"""Test MultiplexerChannelId."""
channel_id = MultiplexerChannelId(b"testtesttesttest")
assert channel_id.bytes == b"testtesttesttest"
assert channel_id.hex() == "74657374746573747465737474657374"
assert str(channel_id) == "74657374746573747465737474657374"
def test_message_types() -> None:
"""Test FlowType."""
assert FlowType.NEW == 0x01
assert FlowType.NEW.value == 0x01
assert FlowType.DATA == 0x02
assert FlowType.DATA.value == 0x02
assert FlowType.CLOSE == 0x04
assert FlowType.CLOSE.value == 0x04
assert FlowType.PING == 0x08
assert FlowType.PING.value == 0x08
assert FlowType.PAUSE == 0x16
assert FlowType.PAUSE.value == 0x16
assert FlowType.RESUME == 0x32
assert FlowType.RESUME.value == 0x32
def test_message_repr() -> None:
"""Test MultiplexerMessage __repr__."""
msg = MultiplexerMessage(
MultiplexerChannelId(b"testtesttesttest"),
FlowType.NEW,
b"test",
b"test",
)
assert repr(msg) == (
"MultiplexerMessage(id=74657374746573747465737474657374, flow_type="
", data=b'test', extra=b'test')"
)
msg = MultiplexerMessage(
MultiplexerChannelId(b"testtesttesttest"),
255,
b"test",
b"test",
)
assert repr(msg) == (
"MultiplexerMessage(id=74657374746573747465737474657374, flow_type="
"255, data=b'test', extra=b'test')"
)
snitun-0.42.0/tests/multiplexer/test_queue.py 0000664 0000000 0000000 00000056175 14773172534 0021421 0 ustar 00root root 0000000 0000000 """Test Multiplexer queue."""
import asyncio
import os
import pytest
from snitun.multiplexer.message import (
CHANNEL_FLOW_DATA,
HEADER_SIZE,
MultiplexerChannelId,
MultiplexerMessage,
)
from snitun.multiplexer.queue import (
MultiplexerMultiChannelQueue,
MultiplexerSingleChannelQueue,
)
MOCK_MSG_SIZE = 4
def _make_mock_channel_id() -> MultiplexerChannelId:
return MultiplexerChannelId(os.urandom(16))
def _make_mock_message(
channel_id: MultiplexerChannelId,
size: int = MOCK_MSG_SIZE,
) -> MultiplexerMessage:
return MultiplexerMessage(channel_id, CHANNEL_FLOW_DATA, os.urandom(size))
async def test_get_non_existent_channels() -> None:
"""Test MultiplexerMultiChannelQueue get on non-existent channel."""
queue = MultiplexerMultiChannelQueue(100000, 10, 1000)
assert queue.empty(_make_mock_channel_id())
assert not queue.full(_make_mock_channel_id())
assert queue.size(_make_mock_channel_id()) == 0
# Make sure defaultdict does not leak
assert not queue._channels
async def test_single_channel_queue() -> None:
"""Test MultiplexerSingleChannelQueue."""
queue = MultiplexerSingleChannelQueue(100, 10, 50, lambda _: None)
channel_id = _make_mock_channel_id()
msg = _make_mock_message(channel_id)
assert queue.qsize() == 0
queue.put_nowait(msg)
assert queue.qsize() == len(msg.data) + HEADER_SIZE
assert queue.get_nowait() == msg
assert queue.qsize() == 0
queue.put_nowait(None)
assert queue.qsize() == 0
async def test_multi_channel_queue_full() -> None:
"""Test MultiplexerMultiChannelQueue getting full."""
msg_size = MOCK_MSG_SIZE + HEADER_SIZE
# Max two mock messages per channel
queue = MultiplexerMultiChannelQueue(msg_size * 2, msg_size, msg_size * 2)
channel_one_id = _make_mock_channel_id()
channel_two_id = _make_mock_channel_id()
queue.create_channel(channel_one_id, lambda _: None)
queue.create_channel(channel_two_id, lambda _: None)
channel_one_msg = _make_mock_message(channel_one_id)
channel_two_msg = _make_mock_message(channel_two_id)
queue.put_nowait(channel_one_id, channel_one_msg)
queue.put_nowait(channel_one_id, channel_one_msg)
with pytest.raises(asyncio.QueueFull):
queue.put_nowait(channel_one_id, channel_one_msg)
queue.put_nowait(channel_two_id, channel_two_msg)
queue.put_nowait(channel_two_id, channel_two_msg)
with pytest.raises(asyncio.QueueFull):
queue.put_nowait(channel_two_id, channel_two_msg)
with pytest.raises(TimeoutError):
async with asyncio.timeout(0.1):
await queue.put(channel_one_id, channel_one_msg)
assert queue.size(channel_one_id) == msg_size * 2
add_task = asyncio.create_task(queue.put(channel_one_id, channel_one_msg))
await asyncio.sleep(0)
assert not add_task.done()
assert queue.get_nowait() == channel_one_msg
await asyncio.sleep(0)
assert add_task.done()
assert queue.get_nowait() == channel_two_msg
async def test_multi_channel_queue_force_message_on_full() -> None:
"""Test MultiplexerMultiChannelQueue getting full and forcing a message in."""
msg_size = MOCK_MSG_SIZE + HEADER_SIZE
# Max two mock messages per channel
queue = MultiplexerMultiChannelQueue(msg_size * 2, msg_size, msg_size * 2)
channel_one_id = _make_mock_channel_id()
queue.create_channel(channel_one_id, lambda _: None)
channel_one_msg = _make_mock_message(channel_one_id)
queue.put_nowait(channel_one_id, channel_one_msg)
queue.put_nowait(channel_one_id, channel_one_msg)
with pytest.raises(asyncio.QueueFull):
queue.put_nowait(channel_one_id, channel_one_msg)
queue.put_nowait_force(channel_one_id, channel_one_msg)
queue.put_nowait_force(channel_one_id, None)
assert queue.size(channel_one_id) == msg_size * 3
assert queue.get_nowait() == channel_one_msg
assert queue.get_nowait() == channel_one_msg
assert queue.get_nowait() == channel_one_msg
assert queue.get_nowait() is None
queue.delete_channel(channel_one_id)
with pytest.raises(RuntimeError, match="does not exist or already closed"):
queue.put_nowait_force(channel_one_id, channel_one_msg)
async def test_multi_channel_queue_round_robin_get() -> None:
"""Test MultiplexerMultiChannelQueue round robin get."""
msg_size = MOCK_MSG_SIZE + HEADER_SIZE
# Max two mock messages per channel
queue = MultiplexerMultiChannelQueue(msg_size * 2, msg_size, msg_size * 2)
channel_one_id = _make_mock_channel_id()
channel_two_id = _make_mock_channel_id()
channel_three_id = _make_mock_channel_id()
queue.create_channel(channel_one_id, lambda _: None)
queue.create_channel(channel_two_id, lambda _: None)
queue.create_channel(channel_three_id, lambda _: None)
channel_one_msg = _make_mock_message(channel_one_id)
assert queue.empty(channel_one_id)
await queue.put(channel_one_id, channel_one_msg)
assert not queue.empty(channel_one_id)
assert queue.size(channel_one_id) == len(channel_one_msg.data) + HEADER_SIZE
channel_two_msg = _make_mock_message(channel_two_id)
assert queue.empty(channel_two_id)
await queue.put(channel_two_id, channel_two_msg)
assert not queue.empty(channel_two_id)
assert queue.size(channel_two_id) == len(channel_two_msg.data) + HEADER_SIZE
channel_three_msg = _make_mock_message(channel_three_id)
assert queue.empty(channel_three_id)
queue.put_nowait(channel_three_id, channel_three_msg)
assert not queue.empty(channel_three_id)
assert queue.size(channel_three_id) == len(channel_three_msg.data) + HEADER_SIZE
assert queue.get_nowait() == channel_one_msg
assert queue.empty(channel_one_id)
assert queue.size(channel_one_id) == 0
assert queue.get_nowait() == channel_two_msg
assert queue.empty(channel_two_id)
assert queue.size(channel_two_id) == 0
assert queue.get_nowait() == channel_three_msg
assert queue.empty(channel_three_id)
assert queue.size(channel_three_id) == 0
with pytest.raises(asyncio.QueueEmpty):
queue.get_nowait()
with pytest.raises(TimeoutError):
async with asyncio.timeout(0.1):
await queue.get()
queue.put_nowait(channel_two_id, channel_two_msg)
queue.put_nowait(channel_three_id, channel_three_msg)
queue.put_nowait(channel_one_id, channel_one_msg)
queue.put_nowait(channel_one_id, channel_one_msg)
queue.put_nowait(channel_three_id, channel_three_msg)
queue.put_nowait(channel_two_id, channel_two_msg)
msgs = [queue.get_nowait() for _ in range(6)]
# Queue should be fair regardless of the order of the messages
# coming in
assert msgs == [
channel_two_msg,
channel_three_msg,
channel_one_msg,
channel_two_msg,
channel_three_msg,
channel_one_msg,
]
with pytest.raises(asyncio.QueueEmpty):
queue.get_nowait()
async def test_concurrent_get() -> None:
"""Test MultiplexerMultiChannelQueue concurrent get."""
msg_size = MOCK_MSG_SIZE + HEADER_SIZE
# Max two mock messages per channel
queue = MultiplexerMultiChannelQueue(msg_size * 2, msg_size, msg_size * 2)
channel_one_id = _make_mock_channel_id()
channel_two_id = _make_mock_channel_id()
channel_three_id = _make_mock_channel_id()
queue.create_channel(channel_one_id, lambda _: None)
queue.create_channel(channel_two_id, lambda _: None)
queue.create_channel(channel_three_id, lambda _: None)
channel_one_msg = _make_mock_message(channel_one_id)
channel_two_msg = _make_mock_message(channel_two_id)
channel_three_msg = _make_mock_message(channel_three_id)
fetch_tasks = [asyncio.create_task(queue.get()) for _ in range(3)]
await queue.put(channel_one_id, channel_one_msg)
await queue.put(channel_two_id, channel_two_msg)
await queue.put(channel_three_id, channel_three_msg)
fetched_msgs = await asyncio.gather(*fetch_tasks)
assert channel_one_msg in fetched_msgs
assert channel_two_msg in fetched_msgs
assert channel_three_msg in fetched_msgs
with pytest.raises(asyncio.QueueEmpty):
queue.get_nowait()
async def test_cancel_one_get() -> None:
"""Test the cancellation of a single `get` operation on multiplexer queue."""
queue = MultiplexerMultiChannelQueue(100000, 10, 10000)
reader = asyncio.create_task(queue.get())
channel_one_id = _make_mock_channel_id()
queue.create_channel(channel_one_id, lambda _: None)
channel_one_msg1 = _make_mock_message(channel_one_id)
channel_one_msg2 = _make_mock_message(channel_one_id)
await asyncio.sleep(0)
queue.put_nowait(channel_one_id, channel_one_msg1)
queue.put_nowait(channel_one_id, channel_one_msg2)
reader.cancel()
with pytest.raises(asyncio.CancelledError):
await reader
assert await queue.get() == channel_one_msg1
async def test_reader_cancellation() -> None:
"""
Test behavior of the MultiplexerMultiChannelQueue when a reader task is cancelled.
Assertions:
- The cancelled reader task raises asyncio.CancelledError.
- The remaining reader tasks retrieve the messages from the queue in any order.
"""
queue = MultiplexerMultiChannelQueue(100000, 10, 10000)
channel_one_id = _make_mock_channel_id()
queue.create_channel(channel_one_id, lambda _: None)
channel_one_msg1 = _make_mock_message(channel_one_id)
channel_one_msg2 = _make_mock_message(channel_one_id)
async with asyncio.TaskGroup() as tg:
reader1 = tg.create_task(queue.get())
reader2 = tg.create_task(queue.get())
reader3 = tg.create_task(queue.get())
await asyncio.sleep(0)
queue.put_nowait(channel_one_id, channel_one_msg1)
queue.put_nowait(channel_one_id, channel_one_msg2)
reader1.cancel()
with pytest.raises(asyncio.CancelledError):
await reader1
await reader3
# Any order is fine as long as we get both messages
# since task order is not guaranteed
assert {reader2.result(), reader3.result()} == {channel_one_msg1, channel_one_msg2}
async def test_put_cancel_race() -> None:
"""Test race between putting messages and cancelling the put operation."""
msg_size = MOCK_MSG_SIZE + HEADER_SIZE
# Max one message
queue = MultiplexerMultiChannelQueue(msg_size, msg_size, msg_size)
channel_one_id = _make_mock_channel_id()
queue.create_channel(channel_one_id, lambda _: None)
channel_one_msg_1 = _make_mock_message(channel_one_id)
channel_one_msg_2 = _make_mock_message(channel_one_id)
channel_one_msg_3 = _make_mock_message(channel_one_id)
queue.put_nowait(channel_one_id, channel_one_msg_1)
assert queue.get_nowait() == channel_one_msg_1
assert queue.empty(channel_one_id)
put_1 = asyncio.create_task(queue.put(channel_one_id, channel_one_msg_1))
put_2 = asyncio.create_task(queue.put(channel_one_id, channel_one_msg_2))
put_3 = asyncio.create_task(queue.put(channel_one_id, channel_one_msg_3))
await asyncio.sleep(0)
assert put_1.done()
assert not put_2.done()
assert not put_3.done()
put_3.cancel()
await asyncio.sleep(0)
assert put_3.done()
assert queue.get_nowait() == channel_one_msg_1
await asyncio.sleep(0)
assert queue.get_nowait() == channel_one_msg_2
await put_2
async def test_putters_cleaned_up_correctly_on_cancellation() -> None:
"""Test that putters are cleaned up correctly when a put operation is canceled."""
msg_size = MOCK_MSG_SIZE + HEADER_SIZE
# Max one message
queue = MultiplexerMultiChannelQueue(msg_size, msg_size, msg_size)
channel_one_id = _make_mock_channel_id()
queue.create_channel(channel_one_id, lambda _: None)
channel_one_msg_1 = _make_mock_message(channel_one_id)
channel_one_msg_2 = _make_mock_message(channel_one_id)
queue.put_nowait(channel_one_id, channel_one_msg_1)
put_task = asyncio.create_task(queue.put(channel_one_id, channel_one_msg_2))
await asyncio.sleep(0)
# Check that the putter is correctly removed from channel putters
# the task is canceled.
assert len(queue._channels[channel_one_id].putters) == 1
put_task.cancel()
with pytest.raises(asyncio.CancelledError):
await put_task
assert len(queue._channels[channel_one_id].putters) == 0
async def test_getters_cleaned_up_correctly_on_cancellation() -> None:
"""Test getters are cleaned up correctly when a get operation is canceled."""
msg_size = MOCK_MSG_SIZE + HEADER_SIZE
# Max one message
queue = MultiplexerMultiChannelQueue(msg_size, msg_size, msg_size)
with pytest.raises(TimeoutError):
async with asyncio.timeout(0.1):
await queue.get()
assert len(queue._getters) == 0
async def test_cancelled_when_putter_already_removed() -> None:
"""Test put operation is correctly cancelled when the putter is already removed."""
msg_size = MOCK_MSG_SIZE + HEADER_SIZE
# Max one message
queue = MultiplexerMultiChannelQueue(msg_size, msg_size, msg_size)
channel_one_id = _make_mock_channel_id()
queue.create_channel(channel_one_id, lambda _: None)
channel_one_msg_1 = _make_mock_message(channel_one_id)
queue.put_nowait(channel_one_id, channel_one_msg_1)
put_task = asyncio.create_task(queue.put(channel_one_id, channel_one_msg_1))
await asyncio.sleep(0)
queue.get_nowait()
put_task.cancel()
with pytest.raises(asyncio.CancelledError):
await put_task
async def test_multiple_getters_waiting_multiple_putters() -> None:
"""Test that multiple getters and putters are correctly handled."""
msg_size = MOCK_MSG_SIZE + HEADER_SIZE
# Max one message
queue = MultiplexerMultiChannelQueue(msg_size, msg_size, msg_size)
channel_one_id = _make_mock_channel_id()
queue.create_channel(channel_one_id, lambda _: None)
channel_one_msg_1 = _make_mock_message(channel_one_id)
channel_one_msg_2 = _make_mock_message(channel_one_id)
t1 = asyncio.create_task(queue.put(channel_one_id, channel_one_msg_1))
t2 = asyncio.create_task(queue.put(channel_one_id, channel_one_msg_2))
assert await queue.get() == channel_one_msg_1
assert await queue.get() == channel_one_msg_2
await t1
await t2
async def test_get_cancelled_race() -> None:
"""Test cancelling a get operation while another get operation is in progress."""
queue = MultiplexerMultiChannelQueue(10000000, 10, 10000)
channel_one_id = _make_mock_channel_id()
queue.create_channel(channel_one_id, lambda _: None)
channel_one_msg_1 = _make_mock_message(channel_one_id)
t1 = asyncio.create_task(queue.get())
t2 = asyncio.create_task(queue.get())
await asyncio.sleep(0)
t1.cancel()
await asyncio.sleep(0)
assert t1.done()
await queue.put(channel_one_id, channel_one_msg_1)
await asyncio.sleep(0)
assert await t2 == channel_one_msg_1
async def test_get_with_other_putters() -> None:
"""Test that a get operation is correctly handled when other putters are waiting."""
loop = asyncio.get_running_loop()
queue = MultiplexerMultiChannelQueue(10000000, 10, 10000)
channel_one_id = _make_mock_channel_id()
queue.create_channel(channel_one_id, lambda _: None)
channel_one_msg_1 = _make_mock_message(channel_one_id)
queue.put_nowait(channel_one_id, channel_one_msg_1)
other_putter = loop.create_future()
queue._channels[channel_one_id].putters.append(other_putter)
assert await queue.get() == channel_one_msg_1
assert other_putter.done()
assert await other_putter is None
await queue.put(channel_one_id, channel_one_msg_1)
assert queue.get_nowait() == channel_one_msg_1
async def test_get_with_other_putter_already_one() -> None:
"""Test that a get operation is correctly handled when other putters are waiting."""
loop = asyncio.get_running_loop()
queue = MultiplexerMultiChannelQueue(10000000, 10, 10000)
channel_one_id = _make_mock_channel_id()
queue.create_channel(channel_one_id, lambda _: None)
channel_one_msg_1 = _make_mock_message(channel_one_id)
queue.put_nowait(channel_one_id, channel_one_msg_1)
other_putter = loop.create_future()
other_putter.set_result(None)
queue._channels[channel_one_id].putters.append(other_putter)
assert await queue.get() == channel_one_msg_1
assert other_putter.done()
assert await other_putter is None
await queue.put(channel_one_id, channel_one_msg_1)
assert queue.get_nowait() == channel_one_msg_1
async def test_single_channel_queue_under_water() -> None:
"""Test MultiplexerSingleChannelQueue under water."""
msg_size = MOCK_MSG_SIZE + HEADER_SIZE
under_water_callbacks: list[bool] = []
def on_under_water(under_water: bool) -> None:
under_water_callbacks.append(under_water)
queue = MultiplexerSingleChannelQueue(
msg_size * 10,
msg_size * 2,
msg_size * 4,
on_under_water,
)
channel_id = _make_mock_channel_id()
msg = _make_mock_message(channel_id)
assert queue.qsize() == 0
queue.put_nowait(msg)
assert queue.qsize() == len(msg.data) + HEADER_SIZE
assert not under_water_callbacks
queue.put_nowait(msg) # now 2 messages
assert not under_water_callbacks
queue.put_nowait(msg) # now 3 messages
assert not under_water_callbacks
queue.put_nowait(msg) # now 4 messages -- under water
assert under_water_callbacks == [True]
queue.put_nowait(msg) # now 5 messages -- still under water
assert under_water_callbacks == [True]
queue.get_nowait() # now 4 messages -- have not reached low watermark
assert under_water_callbacks == [True]
queue.get_nowait() # now 3 messages -- have not reached low watermark
assert under_water_callbacks == [True]
queue.get_nowait() # now 2 messages -- reached low watermark
assert under_water_callbacks == [True, False]
queue.get_nowait() # now 1 message -- still below low watermark
assert under_water_callbacks == [True, False]
queue.get_nowait() # now 0 messages -- empty
assert under_water_callbacks == [True, False]
queue.put_nowait(msg) # now 1 message -- below high watermark
assert under_water_callbacks == [True, False]
queue.put_nowait(msg) # now 2 messages -- still below high watermark
assert under_water_callbacks == [True, False]
queue.put_nowait(msg) # now 3 messages -- still below high watermark
assert under_water_callbacks == [True, False]
queue.put_nowait(msg) # now 4 messages -- reached high watermark
assert under_water_callbacks == [True, False, True]
queue.get_nowait() # now 3 messages -- below high watermark, but still above low watermark
assert under_water_callbacks == [True, False, True]
queue.get_nowait() # now 2 messages -- below high watermark and below low watermark
assert under_water_callbacks == [True, False, True, False]
async def test_multi_channel_queue_under_water() -> None:
"""Test MultiplexerMultiChannelQueue under water."""
msg_size = MOCK_MSG_SIZE + HEADER_SIZE
under_water_callbacks: list[bool] = []
def on_under_water(under_water: bool) -> None:
under_water_callbacks.append(under_water)
queue = MultiplexerMultiChannelQueue(
msg_size * 10,
msg_size * 2,
msg_size * 4,
)
channel_id = _make_mock_channel_id()
queue.create_channel(channel_id, on_under_water)
msg = _make_mock_message(channel_id)
assert queue.empty(channel_id)
queue.put_nowait(channel_id, msg)
assert not under_water_callbacks
queue.put_nowait(channel_id, msg) # now 2 messages
assert not under_water_callbacks
queue.put_nowait(channel_id, msg) # now 3 messages
assert not under_water_callbacks
queue.put_nowait(channel_id, msg) # now 4 messages -- under water
assert under_water_callbacks == [True]
queue.put_nowait(channel_id, msg) # now 5 messages -- still under water
assert under_water_callbacks == [True]
queue.get_nowait() # now 4 messages -- have not reached low watermark
assert under_water_callbacks == [True]
queue.get_nowait() # now 3 messages -- have not reached low watermark
assert under_water_callbacks == [True]
queue.get_nowait() # now 2 messages -- reached low watermark
assert under_water_callbacks == [True, False]
queue.get_nowait() # now 1 message -- still below low watermark
assert under_water_callbacks == [True, False]
queue.get_nowait() # now 0 messages -- empty
assert under_water_callbacks == [True, False]
queue.put_nowait(channel_id, msg) # now 1 message -- below high watermark
assert under_water_callbacks == [True, False]
queue.put_nowait(channel_id, msg) # now 2 messages -- still below high watermark
assert under_water_callbacks == [True, False]
queue.put_nowait(channel_id, msg) # now 3 messages -- still below high watermark
assert under_water_callbacks == [True, False]
queue.put_nowait(channel_id, msg) # now 4 messages -- reached high watermark
assert under_water_callbacks == [True, False, True]
queue.get_nowait() # now 3 messages -- below high watermark, but still above low watermark
assert under_water_callbacks == [True, False, True]
queue.get_nowait() # now 2 messages -- below high watermark and below low watermark
assert under_water_callbacks == [True, False, True, False]
async def test_put_nowait_to_non_existent_multi_channel_queue() -> None:
"""Test writing to a non-existent channel."""
queue = MultiplexerMultiChannelQueue(100000, 10, 1000)
channel_id = _make_mock_channel_id()
msg = _make_mock_message(channel_id)
with pytest.raises(RuntimeError, match=f"Channel {channel_id} does not exist"):
queue.put_nowait(channel_id, msg)
async def test_put_to_non_existent_multi_channel_queue() -> None:
"""Test writing to a non-existent channel."""
queue = MultiplexerMultiChannelQueue(100000, 10, 1000)
channel_id = _make_mock_channel_id()
msg = _make_mock_message(channel_id)
with pytest.raises(RuntimeError, match=f"Channel {channel_id} does not exist"):
await queue.put(channel_id, msg)
async def test_multiple_delete_channel_is_forgiving() -> None:
"""Test a channel can be deleted multiple times."""
queue = MultiplexerMultiChannelQueue(100000, 10, 1000)
channel_id = _make_mock_channel_id()
queue.create_channel(channel_id, lambda _: None)
queue.delete_channel(channel_id)
queue.delete_channel(channel_id)
async def test_delete_channel_when_queue_is_not_empty() -> None:
"""Test a channel can be deleted when its queue is not empty."""
queue = MultiplexerMultiChannelQueue(100000, 10, 1000)
channel_id = _make_mock_channel_id()
queue.create_channel(channel_id, lambda _: None)
queue.put_nowait(channel_id, _make_mock_message(channel_id))
queue.delete_channel(channel_id)
assert not queue.empty(channel_id)
assert queue.get_nowait() is not None
queue.delete_channel(channel_id)
assert queue.empty(channel_id)
async def test_multiple_create_channel_raises() -> None:
"""Test the same channel can only be created once."""
queue = MultiplexerMultiChannelQueue(100000, 10, 1000)
channel_id = _make_mock_channel_id()
queue.create_channel(channel_id, lambda _: None)
with pytest.raises(RuntimeError, match=f"Channel {channel_id} already exists"):
queue.create_channel(channel_id, lambda _: None)
snitun-0.42.0/tests/server/ 0000775 0000000 0000000 00000000000 14773172534 0015602 5 ustar 00root root 0000000 0000000 snitun-0.42.0/tests/server/__init__.py 0000664 0000000 0000000 00000000033 14773172534 0017707 0 ustar 00root root 0000000 0000000 """SniTun server tests."""
snitun-0.42.0/tests/server/const_fernet.py 0000664 0000000 0000000 00000001531 14773172534 0020645 0 ustar 00root root 0000000 0000000 """Const value for Fernet tests."""
import json
from cryptography.fernet import Fernet, MultiFernet
from snitun import PROTOCOL_VERSION
FERNET_TOKENS = [
"XIKL24X0Fu83UmPLmWkXOBvvqsLq41tz2LljwafDyZw=",
"ep1FyYA6epwbFxrtEJ2dii5BGvTx5-xU1oUCrF61qMA=",
]
def create_peer_config(
valid: int,
hostname: str,
aes_key: bytes,
aes_iv: bytes,
alias: list[str] | None = None,
) -> bytes:
"""Create a fernet token."""
fernet = MultiFernet([Fernet(key) for key in FERNET_TOKENS])
return fernet.encrypt(
json.dumps(
{
"protocol_version": PROTOCOL_VERSION,
"valid": valid,
"hostname": hostname,
"alias": alias or [],
"aes_key": aes_key.hex(),
"aes_iv": aes_iv.hex(),
},
).encode(),
)
snitun-0.42.0/tests/server/const_tls.py 0000664 0000000 0000000 00000031212 14773172534 0020163 0 ustar 00root root 0000000 0000000 """TLS ClientHello packages from Wireshark."""
# yapf: disable
TLS_1_0: bytes = bytes([
# TLS record
0x16, # Content Type: Handshake
0x03, 0x01, # Version: TLS 1.0
0x00, 0x68, # Length
# Handshake
0x01, # Handshake Type: Client Hello
0x00, 0x00, 0x64, # Length
0x03, 0x01, # Version: TLS 1.0
# Random
0x4e, 0x55, 0xde, 0x32, 0x80, 0x07, 0x92, 0x9f,
0x50, 0x41, 0xe4, 0xf9, 0x58, 0x32, 0xfc, 0x4f,
0x10, 0xb3, 0xde, 0x44, 0x4d, 0xa9, 0x67, 0x78,
0xea, 0xd1, 0x5f, 0x29, 0x09, 0x04, 0xc1, 0x06,
0x00, # Session ID Length
0x00, 0x28, # Cipher Suites Length
0x00, 0x39,
0x00, 0x38,
0x00, 0x35,
0x00, 0x16,
0x00, 0x13,
0x00, 0x0a,
0x00, 0x33,
0x00, 0x32,
0x00, 0x2f,
0x00, 0x05,
0x00, 0x04,
0x00, 0x15,
0x00, 0x12,
0x00, 0x09,
0x00, 0x14,
0x00, 0x11,
0x00, 0x08,
0x00, 0x06,
0x00, 0x03,
0x00, 0xff,
0x02, # Compression Methods
0x01,
0x00,
0x00, 0x12, # Extensions Length
0x00, 0x00, # Extension Type: Server Name
0x00, 0x0e, # Length
0x00, 0x0c, # Server Name Indication Length
0x00, # Server Name Type: host_name
0x00, 0x09, # Length
# "localhost"
0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, 0x6f, 0x73, 0x74,
])
TLS_1_2: bytes = bytes([
# TLS record
0x16, # Content Type: Handshake
0x03, 0x01, # Version: TLS 1.0
0x00, 0x48, # Length
# Handshake
0x01, # Handshake Type: Client Hello
0x00, 0x00, 0x42, # Length
0x03, 0x03, # Version: TLS 1.2
# Random
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, # Session ID Length
0x00, 0x04, # Cipher Suites Length
0x00, 0x01, # NULL-MD5
0x00, 0xff, # RENEGOTIATION INFO SCSV
0x01, # Compression Methods
0x00, # NULL
0x00, 0x17, # Extensions Length
# Extension
0x00, 0x00, # Extension Type: Server Name
0x00, 0x0e, # Length
0x00, 0x0c, # Server Name Indication Length
0x00, # Server Name Type: host_name
0x00, 0x09, # Length
# "localhost"
0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, 0x6f, 0x73, 0x74,
# Extension
0x00, 0x0f, # Extension Type: Heart Beat
0x00, 0x01, # Length
0x01, # Mode: Peer allows to send requests
])
TLS_1_2_ORDER: bytes = bytes([
# TLS record
0x16, # Content Type: Handshake
0x03, 0x01, # Version: TLS 1.0
0x00, 0x48, # Length
# Handshake
0x01, # Handshake Type: Client Hello
0x00, 0x00, 0x42, # Length
0x03, 0x03, # Version: TLS 1.2
# Random
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, # Session ID Length
0x00, 0x04, # Cipher Suites Length
0x00, 0x01, # NULL-MD5
0x00, 0xff, # RENEGOTIATION INFO SCSV
0x01, # Compression Methods
0x00, # NULL
0x00, 0x17, # Extensions Length
# Extension
0x00, 0x0f, # Extension Type: Heart Beat
0x00, 0x01, # Length
0x01, # Mode: Peer allows to send requests
# Extension
0x00, 0x00, # Extension Type: Server Name
0x00, 0x0e, # Length
0x00, 0x0c, # Server Name Indication Length
0x00, # Server Name Type: host_name
0x00, 0x09, # Length
# "localhost"
0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, 0x6f, 0x73, 0x74,
])
TLS_1_2_MORE: bytes = bytes([
# TLS record
0x16, # Content Type: Handshake
0x03, 0x01, # Version: TLS 1.0
0x00, 0x47, # Length
# Handshake
0x01, # Handshake Type: Client Hello
0x00, 0x00, 0x41, # Length
0x03, 0x03, # Version: TLS 1.2
# Random
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, # Session ID Length
0x00, 0x04, # Cipher Suites Length
0x00, 0x01, # NULL-MD5
0x00, 0xff, # RENEGOTIATION INFO SCSV
0x01, # Compression Methods
0x00, # NULL
0x00, 0x16, # Extensions Length
# Extension
0x00, 0x00, # Extension Type: Server Name
0x00, 0x0e, # Length
0x00, 0x0c, # Server Name Indication Length
0x00, # Server Name Type: host_name
0x00, 0x09, # Length
# "localhost"
0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, 0x6f, 0x73, 0x74,
# Extension
0x00, 0x23, # Extension Type: Session Ticket TLS
0x00, 0x00, # Length
])
TLS_1_0_OLD: bytes = bytes([
# TLS record
0x16, # Content Type: Handshake
0x03, 0x01, # Version: TLS 1.0
0x00, 0xec, # Length 104
# Handshake
0x01, # Handshake Type: Client Hello
0x00, 0x00, 0xe8, # Length 100
0x03, 0x01, # Version: TLS 1.0
# Random
0x4e, 0x55, 0xde, 0x32, 0x80, 0x07, 0x92, 0x9f,
0x50, 0x41, 0xe4, 0xf9, 0x58, 0x32, 0xfc, 0x4f,
0x10, 0xb3, 0xde, 0x44, 0x4d, 0xa9, 0x67, 0x78,
0xea, 0xd1, 0x5f, 0x29, 0x09, 0x04, 0xc1, 0x06,
0x00, # Session ID Length
0x00, 0x28, # Cipher Suites Length
0x00, 0x39,
0x00, 0x38,
0x00, 0x35,
0x00, 0x16,
0x00, 0x13,
0x00, 0x0a,
0x00, 0x33,
0x00, 0x32,
0x00, 0x2f,
0x00, 0x05,
0x00, 0x04,
0x00, 0x15,
0x00, 0x12,
0x00, 0x09,
0x00, 0x14,
0x00, 0x11,
0x00, 0x08,
0x00, 0x06,
0x00, 0x03,
0x00, 0xff,
0x02, # Compression Methods
0x01,
0x00,
0x00, 0x96, # Extensions Length 18 + 4 + 132 = 150
0x00, 0x15, # Extension Type: Padding
0x00, 0x80, # Length
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0x00, 0x00, # Extension Type: Server Name
0x00, 0x0e, # Length
0x00, 0x0c, # Server Name Indication Length
0x00, # Server Name Type: host_name
0x00, 0x09, # Length
# "localhost"
0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, 0x6f, 0x73, 0x74,
])
SSL_3_0: bytes = bytes([
# TLS record
0x16, # Content Type: Handshake
0x03, 0x00, # Version: SSL 3.0
0x00, 0x7f, # Length
# Handshake
0x01, # Handshake Type: Client Hello
0x00, 0x00, 0x7b, # Length
0x03, 0x00, # Version: SSL 3.0
# Random
0x53, 0x11, 0x25, 0xc2, 0x92, 0xd6, 0xca, 0xf1,
0x79, 0x90, 0xba, 0x38, 0x8f, 0xad, 0xc8, 0x13,
0xa3, 0x1b, 0x57, 0xd9, 0xf4, 0x3e, 0xd2, 0x8b,
0xb6, 0x5e, 0xe3, 0x12, 0xca, 0x81, 0x2f, 0xc5,
0x00, # Session ID Length
0x00, 0x54, # Cipher Suites Length
0xc0, 0x14,
0xc0, 0x0a,
0xc0, 0x22,
0xc0, 0x21,
0x00, 0x39,
0x00, 0x38,
0xc0, 0x0f,
0xc0, 0x05,
0x00, 0x35,
0xc0, 0x12,
0xc0, 0x08,
0xc0, 0x1c,
0xc0, 0x1b,
0x00, 0x16,
0x00, 0x13,
0xc0, 0x0d,
0xc0, 0x03,
0x00, 0x0a,
0xc0, 0x13,
0xc0, 0x09,
0xc0, 0x1f,
0xc0, 0x1e,
0x00, 0x33,
0x00, 0x32,
0xc0, 0x0e,
0xc0, 0x04,
0x00, 0x2f,
0xc0, 0x11,
0xc0, 0x07,
0xc0, 0x0c,
0xc0, 0x02,
0x00, 0x05,
0x00, 0x04,
0x00, 0x15,
0x00, 0x12,
0x00, 0x09,
0x00, 0x14,
0x00, 0x11,
0x00, 0x08,
0x00, 0x06,
0x00, 0x03,
0x00, 0xff,
0x01, # Compression Methods
0x00,
])
SSL_2_0: bytes = bytes([
0x80, 0x67, # Length (leading bit set)
0x01, # Handshake Type: Client Hello
0x03, 0x01, # Version
0x00, 0x4e, # Cipher spec length
0x00, 0x00, # Session ID length
0x00, 0x10, # Challenge length
# Cipher Suites
0x00, 0x00, 0x39,
0x00, 0x00, 0x38,
0x00, 0x00, 0x35,
0x00, 0x00, 0x16,
0x00, 0x00, 0x13,
0x00, 0x00, 0x0a,
0x07, 0x00,
0xc0, 0x00,
0x00, 0x33,
0x00, 0x00, 0x32,
0x00, 0x00, 0x2f,
0x03, 0x00,
0x80, 0x00,
0x00, 0x05,
0x00, 0x00, 0x04,
0x01, 0x00,
0x80, 0x00,
0x00, 0x15,
0x00, 0x00, 0x12,
0x00, 0x00, 0x09,
0x06, 0x00,
0x40, 0x00,
0x00, 0x14, 0x00,
0x00, 0x11, 0x00,
0x00, 0x08, 0x00,
0x00, 0x06, 0x04,
0x00, 0x80, 0x00,
0x00, 0x03, 0x02,
0x00, 0x80, 0x00,
0x00, 0xff,
# Session ID
0x74, 0x15, 0xdc, 0x11, 0x0b, 0xcb, 0x2b, 0x03,
0x5d, 0xb1, 0x5a, 0x2f, 0xac, 0x72, 0x45, 0x2e,
])
BAD_DATA1: bytes = bytes([
0x16, 0x03, 0x01, 0x00, 0x68, 0x01, 0x00, 0x00,
0x64, 0x03, 0x01, 0x4e, 0x4e, 0xbe, 0xc2, 0xa1,
0x21, 0xad, 0xbc, 0x28, 0x33, 0xca, 0xa1, 0xd6,
0x6e, 0x57, 0xb9, 0x1f, 0x8c, 0x19, 0x0e, 0x44,
0x16, 0x9e, 0x7d, 0x20, 0x35, 0x4b, 0x65, 0xb2,
0xc0, 0xd5, 0xa8, 0x00, 0x00, 0x28, 0x00, 0x39,
0x00, 0x38, 0x00, 0x35, 0x00, 0x16, 0x00, 0x13,
0x00, 0x0a, 0x00, 0x33, 0x00, 0x32, 0x00, 0x2f,
0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12,
0x00, 0x09, 0x00, 0x14, 0x00, 0x11, 0x00, 0x08,
0x00, 0x06, 0x00, 0x03, 0x00, 0xff, 0x02, 0x01,
0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x0e, 0x00,
])
BAD_DATA2: bytes = bytes([
0x16, 0x03, 0x01, 0x00,
])
TLS_1_2_BAD: bytes = bytes([
# TLS record
0x16, # Content Type: Handshake
0x03, 0x01, # Version: TLS 1.0
0x00, 0x48, # Length
# Handshake
0x01, # Handshake Type: Client Hello
0x00, 0x00, 0x42, # Length
0x03, 0x03, # Version: TLS 1.2
# Random
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, # Session ID Length
0x00, 0x04, # Cipher Suites Length
0x00, 0x01, # NULL-MD5
0x00, 0xff, # RENEGOTIATION INFO SCSV
0x01, # Compression Methods
0x00, # NULL
0x00, 0x17, # Extensions Length
# Extension
0x00, 0x00, # Extension Type: Server Name
0x00, 0x0e, # Length
0x00, 0x0c, # Server Name Indication Length
0x00, # Server Name Type: host_name
0x00, 0x09, # Length
# "localhost"
0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, 0x6f, 0x73, 0x74,
# Extension
0x00, 0x0f, # Extension Type: Heart Beat
0x00, 0x01, # Length
0x01, # Mode: Peer allows to send requests
])
snitun-0.42.0/tests/server/test_all.py 0000664 0000000 0000000 00000004616 14773172534 0017772 0 ustar 00root root 0000000 0000000 """Tests for peer listener & manager."""
import asyncio
from datetime import UTC, datetime, timedelta
import hashlib
import ipaddress
import os
import snitun
from snitun.multiplexer.channel import MultiplexerChannel
from snitun.multiplexer.core import Multiplexer
from snitun.multiplexer.crypto import CryptoTransport
from snitun.server.listener_peer import PeerListener
from snitun.server.listener_sni import SNIProxy
from snitun.server.peer_manager import PeerManager
from ..conftest import Client
from .const_fernet import create_peer_config
from .const_tls import TLS_1_2
IP_ADDR = ipaddress.ip_address("127.0.0.1")
async def test_server_full(
peer_manager: PeerManager,
peer_listener: PeerListener,
test_client_peer: Client,
sni_proxy: SNIProxy,
test_client_ssl: Client,
) -> None:
"""Run a full flow of with a peer after that disconnect."""
peer_messages = []
peer_address = []
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
crypto = CryptoTransport(aes_key, aes_iv)
test_client_peer.writer.write(fernet_token)
await test_client_peer.writer.drain()
token = await test_client_peer.reader.readexactly(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
test_client_peer.writer.write(crypto.encrypt(token))
await test_client_peer.writer.drain()
await asyncio.sleep(0.1)
assert peer_manager.peer_available(hostname)
async def mock_new_channel(
multiplexer: Multiplexer,
channel: MultiplexerChannel,
) -> None:
"""Mock new channel."""
while True:
message = await channel.read()
peer_messages.append(message)
peer_address.append(channel.ip_address)
multiplexer = Multiplexer(
crypto,
test_client_peer.reader,
test_client_peer.writer,
snitun.PROTOCOL_VERSION,
mock_new_channel,
)
test_client_ssl.writer.write(TLS_1_2)
await test_client_ssl.writer.drain()
await asyncio.sleep(0.1)
assert peer_messages
assert peer_messages[0] == TLS_1_2
assert peer_address
assert peer_address[0] == IP_ADDR
multiplexer.shutdown()
await multiplexer.wait()
await asyncio.sleep(0.1)
assert not peer_manager.peer_available(hostname)
snitun-0.42.0/tests/server/test_listener_peer.py 0000664 0000000 0000000 00000012020 14773172534 0022046 0 ustar 00root root 0000000 0000000 """Tests for peer listener & manager."""
import asyncio
from datetime import UTC, datetime, timedelta
import hashlib
import os
import pytest
from snitun.multiplexer.crypto import CryptoTransport
from snitun.server.listener_peer import PeerListener
from snitun.server.peer_manager import PeerManager
from ..conftest import Client
from .const_fernet import create_peer_config
async def test_init_listener(peer_manager: PeerManager) -> None:
"""Create a PeerListener instance and start/stop it."""
listener = PeerListener(peer_manager, "127.0.0.1", "8893")
await listener.start()
await asyncio.sleep(0.1)
await listener.stop()
async def test_peer_listener(
peer_manager: PeerManager,
peer_listener: PeerListener,
test_client_peer: Client,
) -> None:
"""Run a full flow of with a peer."""
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
crypto = CryptoTransport(aes_key, aes_iv)
test_client_peer.writer.write(fernet_token)
await test_client_peer.writer.drain()
token = await test_client_peer.reader.readexactly(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
test_client_peer.writer.write(crypto.encrypt(token))
await test_client_peer.writer.drain()
await asyncio.sleep(0.1)
assert peer_manager.peer_available(hostname)
async def test_peer_listener_invalid(
peer_manager: PeerManager,
peer_listener: PeerListener,
test_client_peer: Client,
) -> None:
"""Run a full flow of with a peer."""
valid = datetime.now(tz=UTC) - timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
crypto = CryptoTransport(aes_key, aes_iv)
test_client_peer.writer.write(fernet_token)
await test_client_peer.writer.drain()
with pytest.raises(asyncio.IncompleteReadError):
token = await test_client_peer.reader.readexactly(32)
async def test_peer_listener_disconnect(
peer_manager: PeerManager,
peer_listener: PeerListener,
test_client_peer: Client,
) -> None:
"""Run a full flow of with a peer after that disconnect."""
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
crypto = CryptoTransport(aes_key, aes_iv)
test_client_peer.writer.write(fernet_token)
await test_client_peer.writer.drain()
token = await test_client_peer.reader.readexactly(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
test_client_peer.writer.write(crypto.encrypt(token))
await test_client_peer.writer.drain()
await asyncio.sleep(0.1)
assert peer_manager.peer_available(hostname)
test_client_peer.writer.close()
await asyncio.sleep(0.1)
assert not peer_manager.peer_available(hostname)
async def test_peer_listener_timeout(
raise_timeout: None,
peer_manager: PeerManager,
peer_listener: PeerListener,
test_client_peer: Client,
) -> None:
"""Run a full flow of with a peer."""
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
crypto = CryptoTransport(aes_key, aes_iv)
test_client_peer.writer.write(fernet_token)
await test_client_peer.writer.drain()
with pytest.raises(asyncio.IncompleteReadError):
token = await test_client_peer.reader.readexactly(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
test_client_peer.writer.write(crypto.encrypt(token))
await test_client_peer.writer.drain()
await asyncio.sleep(0.1)
assert not peer_manager.peer_available(hostname)
async def test_peer_listener_expire(
peer_manager: PeerManager,
peer_listener: PeerListener,
test_client_peer: Client,
) -> None:
"""Run a full flow of with a peer."""
from snitun.server import listener_peer
listener_peer.CHECK_VALID_EXPIRE = 0.1
valid = datetime.now() + timedelta(seconds=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
crypto = CryptoTransport(aes_key, aes_iv)
test_client_peer.writer.write(fernet_token)
await test_client_peer.writer.drain()
token = await test_client_peer.reader.readexactly(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
test_client_peer.writer.write(crypto.encrypt(token))
await test_client_peer.writer.drain()
await asyncio.sleep(0.1)
assert peer_manager.peer_available(hostname)
await asyncio.sleep(1)
assert not peer_manager.peer_available(hostname)
listener_peer.CHECK_VALID_EXPIRE = 3600
snitun-0.42.0/tests/server/test_listener_sni.py 0000664 0000000 0000000 00000025504 14773172534 0021717 0 ustar 00root root 0000000 0000000 """Test for SSL SNI proxy."""
from __future__ import annotations
import asyncio
import errno
import ipaddress
from typing import cast
from unittest.mock import patch
import pytest
from snitun.multiplexer.core import Multiplexer
from snitun.server.listener_sni import ProxyPeerHandler, SNIProxy
from snitun.server.peer import Peer
from snitun.server.peer_manager import PeerManager
from ..conftest import Client
from .const_tls import TLS_1_2
IP_ADDR = ipaddress.ip_address("127.0.0.1")
async def test_proxy_up_down() -> None:
"""Simple start stop of proxy."""
proxy = SNIProxy({}, "127.0.0.1", "8863")
await proxy.start()
await proxy.stop()
@pytest.mark.parametrize(
"payloads",
[
[TLS_1_2],
[TLS_1_2[:6], TLS_1_2[6:]],
[TLS_1_2[:6], TLS_1_2[6:20], TLS_1_2[20:]],
[TLS_1_2[:6], TLS_1_2[6:20], TLS_1_2[20:32], TLS_1_2[32:]],
],
)
async def test_sni_proxy_flow(
multiplexer_client: Multiplexer,
test_client_ssl: Client,
payloads: list[bytes],
) -> None:
"""Test a normal flow of connection and exchange data."""
for payload in payloads:
test_client_ssl.writer.write(payload)
await asyncio.sleep(0.1)
await test_client_ssl.writer.drain()
await asyncio.sleep(0.1)
assert multiplexer_client._channels
channel = next(iter(multiplexer_client._channels.values()))
assert channel.ip_address == IP_ADDR
client_hello = await channel.read()
assert client_hello == TLS_1_2
test_client_ssl.writer.write(b"Very secret!")
await test_client_ssl.writer.drain()
data = await channel.read()
assert data == b"Very secret!"
await channel.write(b"my answer")
data = await test_client_ssl.reader.read(1024)
assert data == b"my answer"
async def test_sni_proxy_flow_close_by_client(
multiplexer_client: Multiplexer,
test_client_ssl: Client,
event_loop: asyncio.AbstractEventLoop,
) -> None:
"""Test a normal flow of connection data and close by client."""
loop = event_loop
test_client_ssl.writer.write(TLS_1_2)
await test_client_ssl.writer.drain()
await asyncio.sleep(0.1)
assert multiplexer_client._channels
channel = next(iter(multiplexer_client._channels.values()))
assert channel.ip_address == IP_ADDR
client_hello = await channel.read()
assert client_hello == TLS_1_2
test_client_ssl.writer.write(b"Very secret!")
await test_client_ssl.writer.drain()
data = await channel.read()
assert data == b"Very secret!"
ssl_client_read = loop.create_task(test_client_ssl.reader.read(2024))
await asyncio.sleep(0.1)
assert not ssl_client_read.done()
multiplexer_client.delete_channel(channel)
await asyncio.sleep(0.1)
assert ssl_client_read.done()
async def test_sni_proxy_flow_close_by_server(
multiplexer_client: Multiplexer,
test_client_ssl: Client,
) -> None:
"""Test a normal flow of connection data and close by server."""
loop = asyncio.get_running_loop()
test_client_ssl.writer.write(TLS_1_2)
await test_client_ssl.writer.drain()
await asyncio.sleep(0.1)
assert multiplexer_client._channels
channel = next(iter(multiplexer_client._channels.values()))
assert channel.ip_address == IP_ADDR
client_hello = await channel.read()
assert client_hello == TLS_1_2
test_client_ssl.writer.write(b"Very secret!")
await test_client_ssl.writer.drain()
data = await channel.read()
assert data == b"Very secret!"
client_read = loop.create_task(channel.read())
await asyncio.sleep(0.1)
assert not client_read.done()
test_client_ssl.writer.close()
await asyncio.sleep(0.1)
assert not multiplexer_client._channels
assert client_read.done()
async def test_sni_proxy_flow_peer_not(
peer: Peer,
multiplexer_client: Multiplexer,
test_client_ssl: Client,
) -> None:
"""Test a normal flow of connection with peer is not ready."""
peer._multiplexer = None # Fake peer state
test_client_ssl.writer.write(TLS_1_2)
await test_client_ssl.writer.drain()
await asyncio.sleep(0.1)
assert not multiplexer_client._channels
async def test_sni_proxy_timeout(
multiplexer_client: Multiplexer,
test_client_ssl: Client,
raise_timeout: None,
) -> None:
"""Test a normal flow of connection and exchange data."""
test_client_ssl.writer.write(TLS_1_2)
await test_client_ssl.writer.drain()
await asyncio.sleep(0.1)
assert not multiplexer_client._channels
async def test_sni_proxy_flow_timeout(
multiplexer_client: Multiplexer,
test_client_ssl: Client,
) -> None:
"""Test a normal flow of connection and exchange data."""
from snitun.server import listener_sni
listener_sni.TCP_SESSION_TIMEOUT = 0.2
test_client_ssl.writer.write(TLS_1_2)
await test_client_ssl.writer.drain()
await asyncio.sleep(0.1)
assert multiplexer_client._channels
channel = next(iter(multiplexer_client._channels.values()))
assert channel.ip_address == IP_ADDR
client_hello = await channel.read()
assert client_hello == TLS_1_2
test_client_ssl.writer.write(b"Very secret!")
await test_client_ssl.writer.drain()
data = await channel.read()
assert data == b"Very secret!"
await channel.write(b"my answer")
data = await test_client_ssl.reader.read(1024)
assert data == b"my answer"
await asyncio.sleep(0.3)
assert not multiplexer_client._channels
async def test_proxy_peer_handler_can_pause(
multiplexer_client: Multiplexer,
peer_manager: PeerManager,
) -> None:
"""Test proxy peer handler can pause."""
proxy_peer_handler: ProxyPeerHandler | None = None
loop = asyncio.get_running_loop()
def save_proxy_peer_handler(
loop: asyncio.AbstractEventLoop,
ip_address: ipaddress.IPv4Address,
) -> ProxyPeerHandler:
nonlocal proxy_peer_handler
proxy_peer_handler = ProxyPeerHandler(loop, ip_address)
return proxy_peer_handler
with patch("snitun.server.listener_sni.ProxyPeerHandler", save_proxy_peer_handler):
proxy = SNIProxy(peer_manager, "127.0.0.1", "8863")
await proxy.start()
reader, writer = await asyncio.open_connection(host="127.0.0.1", port="8863")
test_client_ssl = Client(reader, writer)
test_client_ssl.writer.write(TLS_1_2)
await test_client_ssl.writer.drain()
await asyncio.sleep(0.1)
assert isinstance(proxy_peer_handler, ProxyPeerHandler)
handler = cast(ProxyPeerHandler, proxy_peer_handler)
client_channel = handler._channel
assert client_channel._pause_resume_reader_callback is not None
assert (
client_channel._pause_resume_reader_callback
== handler._pause_resume_reader_callback
)
assert multiplexer_client._channels
server_channel = next(iter(multiplexer_client._channels.values()))
assert server_channel.ip_address == IP_ADDR
client_hello = await server_channel.read()
assert client_hello == TLS_1_2
test_client_ssl.writer.write(b"Very secret!")
await test_client_ssl.writer.drain()
data = await server_channel.read()
assert data == b"Very secret!"
# Now simulate that the remote input is under water
client_channel.on_remote_input_under_water(True)
assert handler._pause_future is not None
assert not handler._pause_future.done()
# This is an implementation detail that we might
# change in the future, but for now we need to
# to read one more message because we don't cancel
# the current read when the reader pauses as the additional
# complexity is not worth it.
test_client_ssl.writer.write(b"one more in before we pause")
await test_client_ssl.writer.drain()
data = await server_channel.read()
assert data == b"one more in before we pause"
test_client_ssl.writer.write(b"now we are paused")
await test_client_ssl.writer.drain()
read_task = loop.create_task(server_channel.read())
await asyncio.sleep(0.1)
# Make sure reader is actually paused
assert not read_task.done()
# Now simulate that the remote input is no longer under water
assert handler._pause_future is not None
assert not handler._pause_future.done()
client_channel.on_remote_input_under_water(False)
assert handler._pause_future is None
data = await read_task
assert data == b"now we are paused"
test_client_ssl.writer.close()
await asyncio.sleep(0.1)
assert not multiplexer_client._channels
await proxy.stop()
async def test_proxy_peer_os_error_on_write(
multiplexer_client: Multiplexer,
peer_manager: PeerManager,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test proxy peer handler handles oserror."""
proxy_peer_handler: ProxyPeerHandler | None = None
class InstrumentedProxyPeerHandler(ProxyPeerHandler):
"""Instrumented Proxy Peer Handler.
This class is used to test the ProxyPeerHandler class
and save the reader and writer for testing.
"""
writer: asyncio.StreamWriter
reader: asyncio.StreamReader
async def start(
self,
multiplexer: Multiplexer,
client_hello: bytes,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
) -> None:
self.reader = reader
self.writer = writer
await super().start(multiplexer, client_hello, reader, writer)
def save_proxy_peer_handler(
loop: asyncio.AbstractEventLoop,
ip_address: ipaddress.IPv4Address,
) -> ProxyPeerHandler:
nonlocal proxy_peer_handler
proxy_peer_handler = InstrumentedProxyPeerHandler(loop, ip_address)
return proxy_peer_handler
with patch("snitun.server.listener_sni.ProxyPeerHandler", save_proxy_peer_handler):
proxy = SNIProxy(peer_manager, "127.0.0.1", "8863")
await proxy.start()
reader, writer = await asyncio.open_connection(host="127.0.0.1", port="8863")
test_client_ssl = Client(reader, writer)
test_client_ssl.writer.write(TLS_1_2)
await test_client_ssl.writer.drain()
await asyncio.sleep(0.1)
assert isinstance(proxy_peer_handler, ProxyPeerHandler)
assert multiplexer_client._channels
server_channel = next(iter(multiplexer_client._channels.values()))
assert server_channel.ip_address == IP_ADDR
client_hello = await server_channel.read()
assert client_hello == TLS_1_2
test_client_ssl.writer.write(b"Very secret!")
await test_client_ssl.writer.drain()
data = await server_channel.read()
assert data == b"Very secret!"
with patch.object(
proxy_peer_handler.writer,
"write",
side_effect=OSError(errno.EPIPE, "Broken Pipe"),
):
await server_channel.write(b"some data that will trigger oserror")
await asyncio.sleep(0.1)
assert not multiplexer_client._channels
assert "Broken Pipe" in caplog.text
await proxy.stop()
snitun-0.42.0/tests/server/test_peer.py 0000664 0000000 0000000 00000014637 14773172534 0020161 0 ustar 00root root 0000000 0000000 """Test a Peer object."""
import asyncio
from datetime import UTC, datetime, timedelta
import hashlib
import os
import pytest
import snitun
from snitun.exceptions import SniTunChallengeError
from snitun.multiplexer.crypto import CryptoTransport
from snitun.multiplexer.message import CHANNEL_FLOW_PING
from snitun.server.peer import Peer
from ..conftest import Client
def test_init_peer() -> None:
"""Test simple init of peer."""
valid = datetime.now(tz=UTC) + timedelta(days=1)
peer = Peer(
"localhost",
valid,
os.urandom(32),
os.urandom(16),
snitun.PROTOCOL_VERSION,
alias="localhost.custom",
)
assert peer.is_valid
assert peer.hostname == "localhost"
assert peer.multiplexer is None
assert peer.alias == "localhost.custom"
async def test_init_peer_multiplexer(
event_loop: asyncio.AbstractEventLoop,
test_client: Client,
test_server: list[Client],
) -> None:
"""Test setup multiplexer."""
loop = event_loop
client = test_server[0]
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
valid = datetime.now(tz=UTC) + timedelta(days=1)
peer = Peer("localhost", valid, aes_key, aes_iv, snitun.PROTOCOL_VERSION)
crypto = CryptoTransport(aes_key, aes_iv)
with pytest.raises(RuntimeError):
await peer.wait_disconnect()
init_task = loop.create_task(
peer.init_multiplexer_challenge(test_client.reader, test_client.writer),
)
await asyncio.sleep(0.1)
assert not init_task.done()
assert not peer.is_ready
assert not peer.is_connected
token = await client.reader.readexactly(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
client.writer.write(crypto.encrypt(token))
await client.writer.drain()
await asyncio.sleep(0.1)
assert init_task.exception() is None
assert init_task.done()
assert peer.is_ready
assert peer.is_connected
assert peer.multiplexer._throttling is None
client.writer.close()
client.close.set()
await asyncio.sleep(0.1)
assert not peer.multiplexer.is_connected
async def test_init_peer_multiplexer_crypto(
event_loop: asyncio.AbstractEventLoop,
test_client: Client,
test_server: list[Client],
) -> None:
"""Test setup multiplexer with crypto."""
loop = event_loop
client = test_server[0]
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
valid = datetime.now(tz=UTC) + timedelta(days=1)
peer = Peer("localhost", valid, aes_key, aes_iv, snitun.PROTOCOL_VERSION)
crypto = CryptoTransport(aes_key, aes_iv)
with pytest.raises(RuntimeError):
await peer.wait_disconnect()
init_task = loop.create_task(
peer.init_multiplexer_challenge(test_client.reader, test_client.writer),
)
await asyncio.sleep(0.1)
assert not init_task.done()
assert not peer.is_ready
assert not peer.is_connected
token = await client.reader.readexactly(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
client.writer.write(crypto.encrypt(token))
await client.writer.drain()
await asyncio.sleep(0.1)
assert init_task.exception() is None
assert init_task.done()
assert peer.is_ready
assert peer.is_connected
ping_task = loop.create_task(peer.multiplexer.ping())
await asyncio.sleep(0.1)
ping_data = await client.reader.read(1024)
ping = crypto.decrypt(ping_data)
assert ping[16] == CHANNEL_FLOW_PING
assert int.from_bytes(ping[17:21], "big") == 0
assert ping[21:25] == b"ping"
ping_task.cancel()
client.writer.close()
client.close.set()
await asyncio.sleep(0.1)
assert peer.multiplexer.wait().done()
async def test_init_peer_wrong_challenge(
event_loop: asyncio.AbstractEventLoop,
test_client: Client,
test_server: list[Client],
) -> None:
"""Test setup multiplexer wrong challenge."""
loop = event_loop
client = test_server[0]
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
valid = datetime.now(tz=UTC) + timedelta(days=1)
peer = Peer("localhost", valid, aes_key, aes_iv, snitun.PROTOCOL_VERSION)
crypto = CryptoTransport(aes_key, aes_iv)
with pytest.raises(RuntimeError):
await peer.wait_disconnect()
init_task = loop.create_task(
peer.init_multiplexer_challenge(test_client.reader, test_client.writer),
)
await asyncio.sleep(0.1)
assert not init_task.done()
token = await client.reader.readexactly(32)
client.writer.write(crypto.encrypt(token))
await client.writer.drain()
await asyncio.sleep(0.1)
with pytest.raises(SniTunChallengeError):
raise init_task.exception()
assert init_task.done()
client.writer.close()
client.close.set()
def test_init_peer_invalid() -> None:
"""Test simple init of peer with invalid date."""
valid = datetime.now(tz=UTC) - timedelta(days=1)
peer = Peer(
"localhost",
valid,
os.urandom(32),
os.urandom(16),
snitun.PROTOCOL_VERSION,
)
assert not peer.is_valid
assert peer.hostname == "localhost"
assert peer.multiplexer is None
async def test_init_peer_multiplexer_throttling(
event_loop: asyncio.AbstractEventLoop,
test_client: Client,
test_server: list[Client],
) -> None:
"""Test setup multiplexer."""
loop = event_loop
client = test_server[0]
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
valid = datetime.now(tz=UTC) + timedelta(days=1)
peer = Peer(
"localhost",
valid,
aes_key,
aes_iv,
snitun.PROTOCOL_VERSION,
throttling=500,
)
crypto = CryptoTransport(aes_key, aes_iv)
with pytest.raises(RuntimeError):
await peer.wait_disconnect()
init_task = loop.create_task(
peer.init_multiplexer_challenge(test_client.reader, test_client.writer),
)
await asyncio.sleep(0.1)
assert not init_task.done()
assert not peer.is_ready
assert not peer.is_connected
token = await client.reader.readexactly(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
client.writer.write(crypto.encrypt(token))
await client.writer.drain()
await asyncio.sleep(0.1)
assert init_task.exception() is None
assert init_task.done()
assert peer.is_ready
assert peer.is_connected
assert peer.multiplexer._throttling == 0.002
client.writer.close()
client.close.set()
await asyncio.sleep(0.1)
assert not peer.multiplexer.is_connected
snitun-0.42.0/tests/server/test_peer_manager.py 0000664 0000000 0000000 00000021212 14773172534 0021636 0 ustar 00root root 0000000 0000000 """Test peer manager."""
import asyncio
from datetime import UTC, datetime, timedelta
import os
import pytest
from snitun.exceptions import SniTunInvalidPeer
from snitun.multiplexer.core import Multiplexer
from snitun.server.peer import Peer
from snitun.server.peer_manager import PeerManager, PeerManagerEvent
from .const_fernet import FERNET_TOKENS, create_peer_config
async def test_simple_init_peer_manager() -> None:
"""Simple init a peer manager."""
manager = PeerManager(FERNET_TOKENS)
assert manager._fernet
assert not manager._peers
assert manager._throttling is None
async def test_init_new_peer() -> None:
"""Init a new peer."""
manager = PeerManager(FERNET_TOKENS)
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
peer = manager.create_peer(fernet_token)
assert peer.hostname == hostname
assert not peer.is_ready
assert not manager.get_peer(hostname)
assert not manager.peer_available(hostname)
assert hostname not in manager._peers
assert manager.connections == 0
manager.add_peer(peer)
assert manager.get_peer(hostname)
assert not manager.peer_available(hostname)
assert hostname in manager._peers
assert manager.connections == 1
async def test_init_new_peer_with_alias() -> None:
"""Init a new peer with custom domain."""
manager = PeerManager(FERNET_TOKENS)
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
alias = "localhost.custom"
fernet_token = create_peer_config(
valid.timestamp(),
hostname,
aes_key,
aes_iv,
alias=[alias],
)
peer = manager.create_peer(fernet_token)
assert peer.hostname == hostname
assert peer.alias == [alias]
assert not peer.is_ready
assert not manager.get_peer(hostname)
assert not manager.get_peer(alias)
assert not manager.peer_available(hostname)
assert not manager.peer_available(alias)
assert hostname not in manager._peers
assert alias not in manager._peers
assert manager.connections == 0
manager.add_peer(peer)
assert manager.get_peer(hostname)
assert manager.get_peer(alias)
assert not manager.peer_available(hostname)
assert not manager.peer_available(alias)
assert hostname in manager._peers
assert alias in manager._peers
assert manager.connections == 2
async def test_init_new_peer_not_valid_time() -> None:
"""Init a new peer."""
manager = PeerManager(FERNET_TOKENS)
valid = datetime.now(tz=UTC) - timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
with pytest.raises(SniTunInvalidPeer):
manager.create_peer(fernet_token)
async def test_init_new_peer_invalid_fernet() -> None:
"""Init a new peer."""
manager = PeerManager(FERNET_TOKENS)
with pytest.raises(SniTunInvalidPeer):
manager.create_peer(os.urandom(100))
async def test_init_new_peer_with_removing() -> None:
"""Init a new peer."""
manager = PeerManager(FERNET_TOKENS)
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
peer = manager.create_peer(fernet_token)
assert peer.hostname == hostname
assert not peer.is_ready
manager.add_peer(peer)
assert manager.get_peer(hostname)
assert not manager.peer_available(hostname)
assert hostname in manager._peers
assert manager.connections == 1
manager.remove_peer(peer)
assert manager.get_peer(hostname) is None
assert not manager.peer_available(hostname)
assert hostname not in manager._peers
async def test_init_new_peer_with_events() -> None:
"""Init a new peer and remove with events."""
events = []
def _events(ev_peer: Peer, type_event: PeerManagerEvent) -> None:
events.append((ev_peer, type_event))
manager = PeerManager(FERNET_TOKENS, event_callback=_events)
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
peer = manager.create_peer(fernet_token)
assert peer.hostname == hostname
assert not peer.is_ready
manager.add_peer(peer)
assert manager.get_peer(hostname)
assert not manager.peer_available(hostname)
assert hostname in manager._peers
assert manager.connections == 1
await asyncio.sleep(0.1)
assert events[-1][0] == peer
assert events[-1][1] == PeerManagerEvent.CONNECTED
manager.remove_peer(peer)
assert manager.get_peer(hostname) is None
assert not manager.peer_available(hostname)
assert hostname not in manager._peers
await asyncio.sleep(0.1)
assert events[-1][0] == peer
assert events[-1][1] == PeerManagerEvent.DISCONNECTED
async def test_init_new_peer_throttling() -> None:
"""Init a new peer."""
manager = PeerManager(FERNET_TOKENS, throttling=500)
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
peer = manager.create_peer(fernet_token)
assert peer.hostname == hostname
assert not peer.is_ready
assert peer._throttling == 500
manager.add_peer(peer)
assert manager.get_peer(hostname)
assert not manager.peer_available(hostname)
assert hostname in manager._peers
assert manager.connections == 1
async def test_init_dual_peer_with_removing() -> None:
"""Init a new peer."""
manager = PeerManager(FERNET_TOKENS)
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
peer1 = manager.create_peer(fernet_token)
peer2 = manager.create_peer(fernet_token)
assert peer1.hostname == hostname
assert peer2.hostname == hostname
assert not peer1.is_ready
assert not peer2.is_ready
manager.add_peer(peer1)
assert manager.get_peer(hostname) == peer1
assert not manager.peer_available(hostname)
assert hostname in manager._peers
assert manager.connections == 1
manager.add_peer(peer2)
assert manager.get_peer(hostname) == peer2
assert not manager.peer_available(hostname)
assert hostname in manager._peers
assert manager.connections == 1
manager.remove_peer(peer1)
assert manager.get_peer(hostname) == peer2
assert not manager.peer_available(hostname)
assert hostname in manager._peers
assert manager.connections == 1
manager.remove_peer(peer2)
assert manager.get_peer(hostname) is None
assert not manager.peer_available(hostname)
assert hostname not in manager._peers
async def test_init_dual_peer_with_multiplexer(multiplexer_client: Multiplexer) -> None:
"""Init a new peer."""
manager = PeerManager(FERNET_TOKENS)
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
peer1 = manager.create_peer(fernet_token)
peer2 = manager.create_peer(fernet_token)
assert peer1.hostname == hostname
assert peer2.hostname == hostname
assert not peer1.is_ready
assert not peer2.is_ready
peer1._multiplexer = multiplexer_client
assert peer1.is_ready
manager.add_peer(peer1)
assert manager.get_peer(hostname) == peer1
assert manager.peer_available(hostname)
assert hostname in manager._peers
assert manager.connections == 1
manager.add_peer(peer2)
assert manager.get_peer(hostname) == peer2
assert not manager.peer_available(hostname)
assert hostname in manager._peers
assert manager.connections == 1
manager.remove_peer(peer1)
assert manager.get_peer(hostname) == peer2
assert not manager.peer_available(hostname)
assert hostname in manager._peers
assert manager.connections == 1
await asyncio.sleep(0.1)
assert not multiplexer_client.is_connected
manager.remove_peer(peer2)
assert manager.get_peer(hostname) is None
assert not manager.peer_available(hostname)
assert hostname not in manager._peers
snitun-0.42.0/tests/server/test_run.py 0000664 0000000 0000000 00000030132 14773172534 0020016 0 ustar 00root root 0000000 0000000 """Test runner of SniTun Server."""
from __future__ import annotations
import asyncio
from datetime import UTC, datetime, timedelta
import hashlib
import ipaddress
import os
import socket
import time
from unittest.mock import MagicMock, patch
import pytest
import snitun
from snitun.multiplexer.channel import MultiplexerChannel
from snitun.multiplexer.core import Multiplexer
from snitun.multiplexer.crypto import CryptoTransport
from snitun.server.run import SniTunServer, SniTunServerSingle, SniTunServerWorker
from .const_fernet import FERNET_TOKENS, create_peer_config
from .const_tls import TLS_1_2
IP_ADDR = ipaddress.ip_address("127.0.0.1")
async def test_snitun_runner_updown() -> None:
"""Test SniTun Server runner object."""
server = SniTunServer(
FERNET_TOKENS,
peer_host="127.0.0.1",
sni_host="127.0.0.1",
sni_port=32000,
)
await server.start()
await asyncio.sleep(0.1)
await server.stop()
async def test_snitun_single_runner_updown() -> None:
"""Test SniTun Single Server runner object."""
server = SniTunServerSingle(FERNET_TOKENS, host="127.0.0.1", port=32000)
await server.start()
await asyncio.sleep(0.1)
await server.stop()
def test_snitun_worker_runner_updown(event_loop: asyncio.AbstractEventLoop) -> None:
"""Test SniTun Worker Server runner object."""
server = SniTunServerWorker(
FERNET_TOKENS,
host="127.0.0.1",
port=32001,
worker_size=2,
)
server.start()
time.sleep(0.1)
server.stop()
async def test_snitun_single_runner() -> None:
"""Test SniTunSingle Server runner object."""
peer_messages = []
peer_address = []
server = SniTunServerSingle(FERNET_TOKENS, host="127.0.0.1", port=32000)
await server.start()
reader_peer, writer_peer = await asyncio.open_connection(
host="127.0.0.1",
port="32000",
)
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
crypto = CryptoTransport(aes_key, aes_iv)
writer_peer.write(fernet_token)
await writer_peer.drain()
token = await reader_peer.readexactly(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
writer_peer.write(crypto.encrypt(token))
await writer_peer.drain()
await asyncio.sleep(0.1)
assert server.peers.peer_available(hostname)
async def mock_new_channel(
multiplexer: Multiplexer,
channel: MultiplexerChannel,
) -> None:
"""Mock new channel."""
while True:
message = await channel.read()
peer_messages.append(message)
peer_address.append(channel.ip_address)
_, writer_ssl = await asyncio.open_connection(host="127.0.0.1", port="32000")
multiplexer = Multiplexer(
crypto,
reader_peer,
writer_peer,
snitun.PROTOCOL_VERSION,
mock_new_channel,
)
writer_ssl.write(TLS_1_2)
await writer_ssl.drain()
await asyncio.sleep(0.1)
assert peer_messages
assert peer_messages[0] == TLS_1_2
assert peer_address
assert peer_address[0] == IP_ADDR
multiplexer.shutdown()
await multiplexer.wait()
await asyncio.sleep(0.1)
assert not server.peers.peer_available(hostname)
writer_ssl.close()
await server.stop()
async def test_snitun_single_runner_timeout(raise_timeout: None) -> None:
"""Test SniTunSingle Server runner object."""
server = SniTunServerSingle(FERNET_TOKENS, host="127.0.0.1", port="32000")
await server.start()
reader_peer, writer_peer = await asyncio.open_connection(
host="127.0.0.1",
port="32000",
)
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
crypto = CryptoTransport(aes_key, aes_iv)
writer_peer.write(fernet_token)
await writer_peer.drain()
with pytest.raises(ConnectionResetError):
token = await reader_peer.readexactly(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
writer_peer.write(crypto.encrypt(token))
await writer_peer.drain()
await asyncio.sleep(0.1)
assert not server.peers.peer_available(hostname)
await server.stop()
async def test_snitun_single_runner_invalid_payload(raise_timeout: None) -> None:
"""Test SniTunSingle Server runner object with invalid payload."""
server = SniTunServerSingle(FERNET_TOKENS, host="127.0.0.1", port="32000")
await server.start()
reader_peer, writer_peer = await asyncio.open_connection(
host="127.0.0.1",
port="32000",
)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
crypto = CryptoTransport(aes_key, aes_iv)
writer_peer.write(b"INVALID")
await writer_peer.drain()
with pytest.raises(ConnectionResetError):
token = await reader_peer.readexactly(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
writer_peer.write(crypto.encrypt(token))
await writer_peer.drain()
await asyncio.sleep(0.1)
assert not server.peers.peer_available(hostname)
await server.stop()
async def test_snitun_single_runner_throttling() -> None:
"""Test SniTunSingle Server runner object."""
peer_messages = []
peer_address = []
server = SniTunServerSingle(
FERNET_TOKENS,
host="127.0.0.1",
port="32000",
throttling=500,
)
await server.start()
reader_peer, writer_peer = await asyncio.open_connection(
host="127.0.0.1",
port="32000",
)
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
crypto = CryptoTransport(aes_key, aes_iv)
writer_peer.write(fernet_token)
await writer_peer.drain()
token = await reader_peer.readexactly(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
writer_peer.write(crypto.encrypt(token))
await writer_peer.drain()
await asyncio.sleep(0.1)
assert server.peers.peer_available(hostname)
async def mock_new_channel(
multiplexer: Multiplexer,
channel: MultiplexerChannel,
) -> None:
"""Mock new channel."""
while True:
message = await channel.read()
peer_messages.append(message)
peer_address.append(channel.ip_address)
_, writer_ssl = await asyncio.open_connection(host="127.0.0.1", port="32000")
multiplexer = Multiplexer(
crypto,
reader_peer,
writer_peer,
snitun.PROTOCOL_VERSION,
mock_new_channel,
)
writer_ssl.write(TLS_1_2)
await writer_ssl.drain()
await asyncio.sleep(0.1)
assert peer_messages
assert peer_messages[0] == TLS_1_2
assert peer_address
assert peer_address[0] == IP_ADDR
peer = server.peers.get_peer(hostname)
assert peer._multiplexer._throttling == 0.002
multiplexer.shutdown()
await multiplexer.wait()
await asyncio.sleep(0.1)
assert not server.peers.peer_available(hostname)
writer_ssl.close()
await server.stop()
@pytest.mark.parametrize(
"payloads",
[
[TLS_1_2],
[TLS_1_2[:6], TLS_1_2[6:]],
[TLS_1_2[:6], TLS_1_2[6:20], TLS_1_2[20:]],
[TLS_1_2[:6], TLS_1_2[6:20], TLS_1_2[20:32], TLS_1_2[32:]],
],
)
def test_snitun_worker_runner(
event_loop: asyncio.AbstractEventLoop,
payloads: list[bytes],
) -> None:
"""Test SniTunWorker Server runner object."""
loop = event_loop
peer_messages = []
peer_address = []
server = SniTunServerWorker(
FERNET_TOKENS,
host="127.0.0.1",
port=32001,
worker_size=2,
)
server.start()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 32001))
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
crypto = CryptoTransport(aes_key, aes_iv)
sock.sendall(fernet_token)
token = sock.recv(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
sock.sendall(crypto.encrypt(token))
time.sleep(1)
assert any(worker.is_responsible_peer(hostname) for worker in server._workers)
assert server.peer_counter == 1
async def mock_new_channel(
multiplexer: Multiplexer,
channel: MultiplexerChannel,
) -> None:
"""Mock new channel."""
while True:
message = await channel.read()
peer_messages.append(message)
peer_address.append(channel.ip_address)
sock_ssl = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_ssl.connect(("127.0.0.1", 32001))
async def _create_multiplexer() -> Multiplexer:
"""Create and return the peer multiplexer."""
reader_peer, writer_peer = await asyncio.open_connection(sock=sock)
return Multiplexer(
crypto,
reader_peer,
writer_peer,
snitun.PROTOCOL_VERSION,
mock_new_channel,
)
multiplexer = loop.run_until_complete(_create_multiplexer())
for payload in payloads:
sock_ssl.sendall(payload)
loop.run_until_complete(asyncio.sleep(0.1))
assert peer_messages
assert peer_messages[0] == TLS_1_2
assert peer_address
assert peer_address[0] == IP_ADDR
loop.call_soon_threadsafe(multiplexer.shutdown)
loop.run_until_complete(multiplexer.wait())
time.sleep(1)
assert not any(worker.is_responsible_peer(hostname) for worker in server._workers)
sock_ssl.close()
server.stop()
def test_snitun_worker_timeout(event_loop: asyncio.AbstractEventLoop) -> None:
"""Test SniTunWorker Server runner object timeout."""
from snitun.server import run
run.WORKER_STALE_MAX = 1
server = SniTunServerWorker(
FERNET_TOKENS,
host="127.0.0.1",
port=32001,
worker_size=2,
)
server.start()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 32001))
time.sleep(1.5)
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
crypto = CryptoTransport(aes_key, aes_iv)
with pytest.raises(OSError):
sock.sendall(fernet_token)
token = sock.recv(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
sock.sendall(crypto.encrypt(token))
server.stop()
def test_snitun_worker_runner_invalid_payload(
event_loop: asyncio.AbstractEventLoop,
) -> None:
"""Test SniTunWorker Server runner invalid payload."""
server = SniTunServerWorker(
FERNET_TOKENS,
host="127.0.0.1",
port=32001,
worker_size=2,
)
server.start()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 32001))
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
crypto = CryptoTransport(aes_key, aes_iv)
sock.sendall(b"INVALID")
with pytest.raises(OSError):
for _ in range(3):
token = sock.recv(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
sock.sendall(crypto.encrypt(token))
server.stop()
@patch("snitun.server.run.os.kill")
def test_snitun_worker_crash(
kill: MagicMock,
event_loop: asyncio.AbstractEventLoop,
) -> None:
"""Test SniTunWorker Server runner object with crashing worker."""
server = SniTunServerWorker(
FERNET_TOKENS,
host="127.0.0.1",
port=32001,
worker_size=2,
)
server.start()
for worker in server._workers:
worker.shutdown()
break
time.sleep(1.5)
assert kill.called
server.stop()
snitun-0.42.0/tests/server/test_sni.py 0000664 0000000 0000000 00000001423 14773172534 0020004 0 ustar 00root root 0000000 0000000 """Tests for SNI parser."""
import pytest
from snitun.exceptions import ParseSNIError
from snitun.server import sni
from . import const_tls as raw
@pytest.mark.parametrize(
"test_package",
[
raw.TLS_1_0,
raw.TLS_1_0_OLD,
raw.TLS_1_2,
raw.TLS_1_2_MORE,
raw.TLS_1_2_ORDER,
raw.TLS_1_2_BAD,
],
)
def test_good_client_hello(test_package: bytes) -> None:
"""Test good TLS packages."""
assert sni.parse_tls_sni(test_package) == "localhost"
@pytest.mark.parametrize(
"test_package",
[raw.BAD_DATA1, raw.BAD_DATA2, raw.SSL_2_0, raw.SSL_3_0],
)
def test_bad_client_hello(test_package: bytes) -> None:
"""Test bad client hello."""
with pytest.raises(ParseSNIError):
sni.parse_tls_sni(test_package)
snitun-0.42.0/tests/server/test_worker.py 0000664 0000000 0000000 00000007443 14773172534 0020534 0 ustar 00root root 0000000 0000000 """Tests for the server worker."""
import asyncio
from datetime import UTC, datetime, timedelta
import hashlib
import os
import socket
import time
from snitun.multiplexer.crypto import CryptoTransport
from snitun.server.worker import ServerWorker
from .const_fernet import FERNET_TOKENS, create_peer_config
from .const_tls import TLS_1_2
def test_worker_up_down(event_loop: asyncio.AbstractEventLoop) -> None:
"""Test if worker start and stop."""
worker = ServerWorker(FERNET_TOKENS)
worker.start()
assert worker.is_alive()
assert worker.peer_size == 0
worker.shutdown()
assert worker.exitcode == 0
assert not worker.is_alive()
def test_peer_connection(
test_server_sync: list[socket.socket],
test_client_sync: socket.socket,
event_loop: asyncio.AbstractEventLoop,
) -> None:
"""Run a full flow of with a peer."""
worker = ServerWorker(FERNET_TOKENS)
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
worker.start()
crypto = CryptoTransport(aes_key, aes_iv)
worker.handover_connection(test_server_sync[-1], fernet_token, None)
token = test_client_sync.recv(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
test_client_sync.sendall(crypto.encrypt(token))
time.sleep(1)
assert worker.is_responsible_peer(hostname)
assert worker.peer_size == 1
worker.shutdown()
assert worker.peer_size == 0
def test_peer_connection_disconnect(
test_server_sync: list[socket.socket],
test_client_sync: socket.socket,
event_loop: asyncio.AbstractEventLoop,
) -> None:
"""Run a full flow of with a peer & disconnect."""
worker = ServerWorker(FERNET_TOKENS)
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = create_peer_config(valid.timestamp(), hostname, aes_key, aes_iv)
worker.start()
crypto = CryptoTransport(aes_key, aes_iv)
worker.handover_connection(test_server_sync[-1], fernet_token, None)
token = test_client_sync.recv(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
test_client_sync.sendall(crypto.encrypt(token))
time.sleep(1)
assert worker.is_responsible_peer(hostname)
assert worker.peer_size == 1
test_client_sync.shutdown(socket.SHUT_RDWR)
time.sleep(1)
assert not worker.is_responsible_peer(hostname)
assert worker.peer_size == 0
worker.shutdown()
def test_sni_connection(
test_server_sync: list[socket.socket],
test_client_sync: socket.socket,
test_client_ssl_sync: socket.socket,
event_loop: asyncio.AbstractEventLoop,
) -> None:
"""Run a full flow of with a peer."""
worker = ServerWorker(FERNET_TOKENS)
valid = datetime.now(tz=UTC) + timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
alias = ["localhost.custom"]
fernet_token = create_peer_config(
valid.timestamp(),
hostname,
aes_key,
aes_iv,
alias=alias,
)
worker.start()
crypto = CryptoTransport(aes_key, aes_iv)
worker.handover_connection(test_server_sync[0], fernet_token, None)
token = test_client_sync.recv(32)
token = hashlib.sha256(crypto.decrypt(token)).digest()
test_client_sync.sendall(crypto.encrypt(token))
time.sleep(1)
assert worker.is_responsible_peer(hostname)
for entry in alias:
assert worker.is_responsible_peer(entry)
worker.handover_connection(test_server_sync[1], TLS_1_2, hostname)
assert len(test_client_sync.recv(1048)) == 32
assert worker.peer_size == 1
worker.shutdown()
assert worker.peer_size == 0
snitun-0.42.0/tests/utils/ 0000775 0000000 0000000 00000000000 14773172534 0015434 5 ustar 00root root 0000000 0000000 snitun-0.42.0/tests/utils/__init__.py 0000664 0000000 0000000 00000000027 14773172534 0017544 0 ustar 00root root 0000000 0000000 """Tests for utils."""
snitun-0.42.0/tests/utils/test_aes.py 0000664 0000000 0000000 00000000710 14773172534 0017613 0 ustar 00root root 0000000 0000000 """Test aes generator function."""
from snitun.multiplexer.crypto import CryptoTransport
from snitun.utils import aes
def test_aes_function() -> None:
"""Test crypto with generated keys."""
key, iv = aes.generate_aes_keyset()
assert CryptoTransport(key, iv)
def test_unique_aes() -> None:
"""Test unique aes function."""
keyset_1 = aes.generate_aes_keyset()
keyset_2 = aes.generate_aes_keyset()
assert keyset_1 != keyset_2
snitun-0.42.0/tests/utils/test_aiohttp_client.py 0000664 0000000 0000000 00000001643 14773172534 0022057 0 ustar 00root root 0000000 0000000 """Tests for aiohttp snitun client."""
from unittest.mock import patch
from snitun.utils.aiohttp_client import SniTunClientAioHttp
async def test_init_client() -> None:
"""Init aiohttp client for test."""
with patch("snitun.utils.aiohttp_client.SockSite"):
client = SniTunClientAioHttp(None, None, "127.0.0.1")
assert not client.is_connected
async def test_client_stop_no_wait() -> None:
"""Test that we do not wait if wait is not passed to the stop"""
with patch("snitun.utils.aiohttp_client.SockSite"):
client = SniTunClientAioHttp(None, None, "127.0.0.1")
with patch(
"snitun.utils.aiohttp_client._async_waitfor_socket_closed",
) as waitfor_socket_closed:
waitfor_socket_closed.assert_not_called()
await client.stop()
waitfor_socket_closed.assert_not_called()
await client.stop(wait=True)
waitfor_socket_closed.assert_called()
snitun-0.42.0/tests/utils/test_asyncio.py 0000664 0000000 0000000 00000004154 14773172534 0020516 0 ustar 00root root 0000000 0000000 """Tests for asyncio utils."""
import asyncio
import pytest
from snitun.utils.asyncio import (
asyncio_timeout,
create_eager_task,
make_task_waiter_future,
)
async def test_asyncio_timeout() -> None:
"""Init aiohttp client for test."""
with pytest.raises(asyncio.TimeoutError):
async with asyncio_timeout.timeout(0.1):
task = asyncio.create_task(asyncio.sleep(10))
await task
with pytest.raises(asyncio.CancelledError):
await task
async def test_create_eager_task() -> None:
"""Test create eager task."""
task = create_eager_task(asyncio.sleep(0.01))
await task
assert task.done()
assert not task.cancelled()
assert task.result() is None
async def test_make_task_waiter_future_running_task() -> None:
"""Test make task waiter future for a running task."""
task = asyncio.create_task(asyncio.sleep(0.01))
future = make_task_waiter_future(task)
assert not future.done()
assert not future.cancelled()
assert await future is None
async def test_make_task_waiter_future_cancelled_task() -> None:
"""Test make task waiter future when the task is cancelled."""
task = asyncio.create_task(asyncio.sleep(0.01))
future = make_task_waiter_future(task)
task.cancel()
assert not future.done()
assert not future.cancelled()
assert await future is None
async def test_make_task_waiter_future_exception_task() -> None:
"""Test make task waiter future when the task raises."""
async def _raise_exception() -> None:
await asyncio.sleep(0)
raise ValueError("test")
task = asyncio.create_task(_raise_exception())
future = make_task_waiter_future(task)
assert not future.done()
assert not future.cancelled()
assert await future is None
async def test_make_task_waiter_future_already_done_task() -> None:
"""Test make task waiter future when the task is already done."""
task = asyncio.create_task(asyncio.sleep(0))
await task
future = make_task_waiter_future(task)
assert future.done()
assert not future.cancelled()
assert future.result() is None
snitun-0.42.0/tests/utils/test_ipaddress.py 0000664 0000000 0000000 00000001033 14773172534 0021020 0 ustar 00root root 0000000 0000000 """Test ipaddress module."""
from ipaddress import ip_address
from snitun.utils import ipaddress as ip_modul
def test_ipaddress_to_binary() -> None:
"""Test ip address to binary."""
my_ip = ip_address("192.168.1.1")
my_ip_bin = b"\xc0\xa8\x01\x01"
assert ip_modul.ip_address_to_bytes(my_ip) == my_ip_bin
def test_binary_to_ipaddress() -> None:
"""Test ip address to binary."""
my_ip = ip_address("192.168.1.1")
my_ip_bin = b"\xc0\xa8\x01\x01"
assert ip_modul.bytes_to_ip_address(my_ip_bin) == my_ip
snitun-0.42.0/tests/utils/test_server.py 0000664 0000000 0000000 00000004115 14773172534 0020354 0 ustar 00root root 0000000 0000000 """Test server utils."""
import asyncio
from datetime import timedelta
import os
import pytest
from snitun.client.client_peer import ClientPeer
from snitun.client.connector import Connector
from snitun.exceptions import SniTunConnectionError
from snitun.server.listener_peer import PeerListener
from snitun.server.peer_manager import PeerManager
from snitun.utils import server
from ..conftest import Client
from ..server.const_fernet import FERNET_TOKENS
async def test_fernet_token(
peer_listener: PeerListener,
peer_manager: PeerManager,
test_endpoint: list[Client],
) -> None:
"""Test fernet token created by server."""
client = ClientPeer("127.0.0.1", "8893")
connector = Connector("127.0.0.1", "8822")
assert not peer_manager.peer_available("localhost")
valid = timedelta(days=1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = server.generate_client_token(
FERNET_TOKENS,
valid,
hostname,
aes_key,
aes_iv,
)
await client.start(connector, fernet_token, aes_key, aes_iv)
await asyncio.sleep(0.1)
assert peer_manager.peer_available("localhost")
await client.stop()
await asyncio.sleep(0.1)
assert not peer_manager.peer_available("localhost")
async def test_fernet_token_date(
peer_listener: PeerListener,
peer_manager: PeerManager,
test_endpoint: list[Client],
) -> None:
"""Test fernet token created by server as invalid."""
client = ClientPeer("127.0.0.1", "8893")
connector = Connector("127.0.0.1", "8822")
assert not peer_manager.peer_available("localhost")
valid = timedelta(days=-1)
aes_key = os.urandom(32)
aes_iv = os.urandom(16)
hostname = "localhost"
fernet_token = server.generate_client_token(
FERNET_TOKENS,
valid,
hostname,
aes_key,
aes_iv,
)
with pytest.raises(SniTunConnectionError):
await client.start(connector, fernet_token, aes_key, aes_iv)
await asyncio.sleep(0.1)
assert not peer_manager.peer_available("localhost")