pax_global_header 0000666 0000000 0000000 00000000064 14401326716 0014516 g ustar 00root root 0000000 0000000 52 comment=82929239874bb680b6317dfa061ef8790da48c9d
opensnitch-1.5.8.1/ 0000775 0000000 0000000 00000000000 14401326716 0014042 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/.github/ 0000775 0000000 0000000 00000000000 14401326716 0015402 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/.github/FUNDING.yml 0000664 0000000 0000000 00000001251 14401326716 0017216 0 ustar 00root root 0000000 0000000 # These are supported funding model platforms
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
patreon: evilsocket
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
opensnitch-1.5.8.1/.github/ISSUE_TEMPLATE/ 0000775 0000000 0000000 00000000000 14401326716 0017565 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/.github/ISSUE_TEMPLATE/bug_report.md 0000664 0000000 0000000 00000003216 14401326716 0022261 0 ustar 00root root 0000000 0000000 ---
name: 🐞 Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
Please, check the FAQ and Known Problems pages before creating the bug report:
https://github.com/evilsocket/opensnitch/wiki/FAQs
https://github.com/evilsocket/opensnitch/wiki/Known-problems
**Describe the bug**
A clear and concise description of what the bug is.
Include the following information:
- OpenSnitch version.
- OS: [e.g. Debian GNU/Linux, ArchLinux, Slackware, ...]
- Version [e.g. Buster, 10.3, 20.04]
- Window Manager: [e.g. GNOME Shell, KDE, enlightenment, i3wm, ...]
- Kernel version: echo $(uname -a)
**To Reproduce**
Describe in detail as much as you can what happened.
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Post error logs:**
If it's a crash of the GUI:
- Launch it from a terminal and reproduce the issue.
- Post the errors logged to the terminal.
If the daemon doesn't start:
- Post last 15 lines of the log file `/var/log/opensnitchd.log`
- Or launch it from a terminal as root (`# /usr/bin/opensnitchd -rules-path /etc/opensnitchd/rules`) and post the errors logged to the terminal.
If the deb or rpm packages fail to install:
- Install them from a terminal (`$ sudo dpkg -i opensnitch*` / `$ sudo yum install opensnitch*`), and post the errors logged to stdout.
**Expected behavior (optional)**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem. It may help to understand the issue much better.
**Additional context**
Add any other context about the problem here.
opensnitch-1.5.8.1/.github/ISSUE_TEMPLATE/config.yml 0000664 0000000 0000000 00000000213 14401326716 0021551 0 ustar 00root root 0000000 0000000 contact_links:
- name: 🙋 Question
url: https://github.com/evilsocket/opensnitch/discussions/new
about: Ask your question here
opensnitch-1.5.8.1/.github/ISSUE_TEMPLATE/feature-request.md 0000664 0000000 0000000 00000000434 14401326716 0023231 0 ustar 00root root 0000000 0000000 ---
name: 💡 Feature request
about: Suggest an idea
title: '[Feature Request]
'
labels: feature
assignees: ''
---
### Summary:
opensnitch-1.5.8.1/.github/workflows/ 0000775 0000000 0000000 00000000000 14401326716 0017437 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/.github/workflows/debian-package.yml 0000664 0000000 0000000 00000002720 14401326716 0022776 0 ustar 00root root 0000000 0000000 name: Build status
on: [push, pull_request]
jobs:
Builddeb:
runs-on: ubuntu-latest
strategy:
matrix:
image: ["debian:bookworm", "debian:sid"]
container:
image: ${{ matrix.image }}
options: --cpus=2
steps:
- name: Dump GitHub context
env:
GITHUB_CONTEXT: ${{ toJson(github) }}
run: echo "$GITHUB_CONTEXT"
- name: Check out git code
uses: actions/checkout@v2
- name: Install pre-dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
set -e
set -x
apt --quiet update
# Install stuff needed to check out the linuxcnc repo and turn it into a debian source package.
apt --yes --quiet install --no-install-suggests eatmydata
eatmydata apt --yes --quiet install --no-install-suggests git devscripts
- name: Install build dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: |
set -e
set -x
eatmydata apt --yes --quiet build-dep --indep-only .
- name: Build source client
env:
DEBIAN_FRONTEND: noninteractive
run: |
set -e
set -x
# Workaround for missing source tarball
echo 1.0 > debian/source/format
yes y | eatmydata debuild -us -uc
- name: Test install debian packages
env:
DEBIAN_FRONTEND: noninteractive
run: |
set -e
set -x
eatmydata apt --yes --quiet install ../*.deb
opensnitch-1.5.8.1/.github/workflows/ebpf.yml 0000664 0000000 0000000 00000002516 14401326716 0021102 0 ustar 00root root 0000000 0000000 name: Build eBPF
on:
# Trigger this workflow only when ebpf modules changes.
push:
paths:
- 'ebpf_prog/*'
- '.github/workflows/ebpf.yml'
pull_request:
paths:
- 'ebpf_prog/*'
- '.github/workflows/ebpf.yml'
# Allow to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
build:
name: Build eBPF object
runs-on: ubuntu-latest
steps:
- name: Check out git code
uses: actions/checkout@v2
- name: Get and prepare dependencies
run: |
set -e
set -x
sudo apt install eatmydata
sudo eatmydata apt install wget tar patch clang llvm libelf-dev libzip-dev flex bison libssl-dev bc rsync python3 binutils
eatmydata wget --no-verbose https://github.com/torvalds/linux/archive/v5.8.tar.gz
eatmydata tar -xf v5.8.tar.gz
- name: Build eBPF module
run: |
set -e
set -x
eatmydata patch linux-5.8/tools/lib/bpf/bpf_helpers.h < ebpf_prog/file.patch
eatmydata cp ebpf_prog/opensnitch.c ebpf_prog/Makefile linux-5.8/samples/bpf
cd linux-5.8 && yes "" | eatmydata make oldconfig
eatmydata make prepare
eatmydata make headers_install
cd samples/bpf
eatmydata make
eatmydata objdump -h opensnitch.o
eatmydata llvm-strip -g opensnitch.o
opensnitch-1.5.8.1/.github/workflows/go.yml 0000664 0000000 0000000 00000002436 14401326716 0020574 0 ustar 00root root 0000000 0000000 name: Build status
on:
# Trigger this workflow only when daemon code changes.
push:
paths:
- 'daemon/*'
- '.github/workflows/go.yml'
pull_request:
paths:
- 'daemon/*'
- '.github/workflows/go.yml'
# Allow to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
build:
name: Build Go code
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.15.15
uses: actions/setup-go@v1
with:
go-version: 1.15.15
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Get dependencies
run: |
sudo apt --yes --quiet install --no-install-suggests eatmydata
sudo eatmydata apt install git libnetfilter-queue-dev libmnl-dev libpcap-dev protobuf-compiler
export GOPATH=~/go
export PATH=$PATH:$GOPATH/bin
eatmydata go get github.com/golang/protobuf/protoc-gen-go
eatmydata go install google.golang.org/protobuf/cmd/protoc-gen-go
eatmydata go get google.golang.org/grpc/cmd/protoc-gen-go-grpc
cd proto
eatmydata make ../daemon/ui/protocol/ui.pb.go
- name: Build
run: |
cd daemon
eatmydata go mod tidy
eatmydata go mod vendor
eatmydata go build -v .
opensnitch-1.5.8.1/.gitignore 0000664 0000000 0000000 00000000035 14401326716 0016030 0 ustar 00root root 0000000 0000000 *.sock
*.pyc
*.profile
rules
opensnitch-1.5.8.1/LICENSE 0000664 0000000 0000000 00000104515 14401326716 0015055 0 ustar 00root root 0000000 0000000 GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
Copyright (C)
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
.
opensnitch-1.5.8.1/Makefile 0000664 0000000 0000000 00000001543 14401326716 0015505 0 ustar 00root root 0000000 0000000 all: protocol opensnitch_daemon gui
install:
@$(MAKE) -C daemon install
@$(MAKE) -C ui install
protocol:
@$(MAKE) -C proto
opensnitch_daemon:
@$(MAKE) -C daemon
gui:
@$(MAKE) -C ui
clean:
@$(MAKE) -C daemon clean
@$(MAKE) -C proto clean
@$(MAKE) -C ui clean
run:
cd ui && pip3 install --upgrade . && cd ..
opensnitch-ui --socket unix:///tmp/osui.sock &
./daemon/opensnitchd -rules-path /etc/opensnitchd/rules -ui-socket unix:///tmp/osui.sock -cpu-profile cpu.profile -mem-profile mem.profile
test:
clear
$(MAKE) clean
clear
mkdir -p rules
$(MAKE)
clear
$(MAKE) run
adblocker:
clear
$(MAKE) clean
clear
$(MAKE)
clear
python make_ads_rules.py
clear
cd ui && pip3 install --upgrade . && cd ..
opensnitch-ui --socket unix:///tmp/osui.sock &
./daemon/opensnitchd -rules-path /etc/opensnitchd/rules -ui-socket unix:///tmp/osui.sock
opensnitch-1.5.8.1/README.md 0000664 0000000 0000000 00000002706 14401326716 0015326 0 ustar 00root root 0000000 0000000
**OpenSnitch** is a GNU/Linux application firewall.
### Installation and configuration
Please, refer to [the documentation](https://github.com/evilsocket/opensnitch/wiki) for detailed information.
### Contributors
[See the list](https://github.com/evilsocket/opensnitch/graphs/contributors)
opensnitch-1.5.8.1/daemon/ 0000775 0000000 0000000 00000000000 14401326716 0015305 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/.gitignore 0000664 0000000 0000000 00000000023 14401326716 0017270 0 ustar 00root root 0000000 0000000 opensnitchd
vendor
opensnitch-1.5.8.1/daemon/Gopkg.toml 0000664 0000000 0000000 00000000540 14401326716 0017250 0 ustar 00root root 0000000 0000000 [[constraint]]
name = "github.com/fsnotify/fsnotify"
version = "1.4.7"
[[constraint]]
name = "github.com/google/gopacket"
version = "~1.1.14"
[[constraint]]
name = "google.golang.org/grpc"
version = "~1.11.2"
[[constraint]]
name = "github.com/evilsocket/ftrace"
version = "~1.2.0"
[prune]
go-tests = true
unused-packages = true
opensnitch-1.5.8.1/daemon/Makefile 0000664 0000000 0000000 00000000740 14401326716 0016746 0 ustar 00root root 0000000 0000000 #SRC contains all *.go *.c *.h files in daemon/ and its subfolders
SRC := $(shell find . -type f -name '*.go' -o -name '*.h' -o -name '*.c')
all: opensnitchd
install:
@mkdir -p /etc/opensnitchd/rules
@cp opensnitchd /usr/local/bin/
@cp opensnitchd.service /etc/systemd/system/
@cp default-config.json /etc/opensnitchd/
@cp system-fw.json /etc/opensnitchd/
@systemctl daemon-reload
opensnitchd: $(SRC)
@go get
@go build -o opensnitchd .
clean:
@rm -rf opensnitchd
opensnitch-1.5.8.1/daemon/conman/ 0000775 0000000 0000000 00000000000 14401326716 0016560 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/conman/connection.go 0000664 0000000 0000000 00000016171 14401326716 0021254 0 ustar 00root root 0000000 0000000 package conman
import (
"errors"
"fmt"
"net"
"os"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/dns"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/evilsocket/opensnitch/daemon/netfilter"
"github.com/evilsocket/opensnitch/daemon/netlink"
"github.com/evilsocket/opensnitch/daemon/netstat"
"github.com/evilsocket/opensnitch/daemon/procmon"
"github.com/evilsocket/opensnitch/daemon/procmon/ebpf"
"github.com/evilsocket/opensnitch/daemon/ui/protocol"
"github.com/google/gopacket/layers"
)
// Connection represents an outgoing connection.
type Connection struct {
Protocol string
SrcIP net.IP
SrcPort uint
DstIP net.IP
DstPort uint
DstHost string
Entry *netstat.Entry
Process *procmon.Process
pkt *netfilter.Packet
}
var showUnknownCons = false
// Parse extracts the IP layers from a network packet to determine what
// process generated a connection.
func Parse(nfp netfilter.Packet, interceptUnknown bool) *Connection {
showUnknownCons = interceptUnknown
if nfp.IsIPv4() {
con, err := NewConnection(&nfp)
if err != nil {
log.Debug("%s", err)
return nil
} else if con == nil {
return nil
}
return con
}
if core.IPv6Enabled == false {
return nil
}
con, err := NewConnection6(&nfp)
if err != nil {
log.Debug("%s", err)
return nil
} else if con == nil {
return nil
}
return con
}
func newConnectionImpl(nfp *netfilter.Packet, c *Connection, protoType string) (cr *Connection, err error) {
// no errors but not enough info neither
if c.parseDirection(protoType) == false {
return nil, nil
}
log.Debug("new connection %s => %d:%v -> %v:%d uid: %d", c.Protocol, c.SrcPort, c.SrcIP, c.DstIP, c.DstPort, nfp.UID)
c.Entry = &netstat.Entry{
Proto: c.Protocol,
SrcIP: c.SrcIP,
SrcPort: c.SrcPort,
DstIP: c.DstIP,
DstPort: c.DstPort,
UserId: -1,
INode: -1,
}
pid := -1
uid := -1
if procmon.MethodIsEbpf() {
pid, uid, err = ebpf.GetPid(c.Protocol, c.SrcPort, c.SrcIP, c.DstIP, c.DstPort)
if err != nil {
log.Warning("ebpf warning: %v", err)
return nil, nil
}
}
// sometimes when using eBPF the connection is not found, but falling back to legacy
// methods helps to find it and avoid "unknown/kernel pop-ups". TODO: investigate
if pid < 0 {
// 0. lookup uid and inode via netlink. Can return several inodes.
// 1. lookup uid and inode using /proc/net/(udp|tcp|udplite)
// 2. lookup pid by inode
// 3. if this is coming from us, just accept
// 4. lookup process info by pid
var inodeList []int
uid, inodeList = netlink.GetSocketInfo(c.Protocol, c.SrcIP, c.SrcPort, c.DstIP, c.DstPort)
if len(inodeList) == 0 {
if c.Entry = netstat.FindEntry(c.Protocol, c.SrcIP, c.SrcPort, c.DstIP, c.DstPort); c.Entry == nil {
return nil, fmt.Errorf("Could not find netstat entry for: %s", c)
}
if c.Entry.INode > 0 {
log.Debug("connection found in netstat: %v", c.Entry)
inodeList = append([]int{c.Entry.INode}, inodeList...)
}
}
if len(inodeList) == 0 {
log.Debug("<== no inodes found, applying default action.")
}
for n, inode := range inodeList {
pid = procmon.GetPIDFromINode(inode, fmt.Sprint(inode, c.SrcIP, c.SrcPort, c.DstIP, c.DstPort))
if pid != -1 {
log.Debug("[%d] PID found %d [%d]", n, pid, inode)
c.Entry.INode = inode
break
}
}
}
if nfp.UID != 0xffffffff {
c.Entry.UserId = int(nfp.UID)
} else {
c.Entry.UserId = uid
}
if pid == os.Getpid() {
// return a Process object with our PID, to be able to exclude our own connections
// (to the UI on a local socket for example)
c.Process = procmon.NewProcess(pid, "")
return c, nil
}
if c.Process = procmon.FindProcess(pid, showUnknownCons); c.Process == nil {
return nil, fmt.Errorf("Could not find process by its pid %d for: %s", pid, c)
}
return c, nil
}
// NewConnection creates a new Connection object, and returns the details of it.
func NewConnection(nfp *netfilter.Packet) (c *Connection, err error) {
ipv4 := nfp.Packet.Layer(layers.LayerTypeIPv4)
if ipv4 == nil {
return nil, errors.New("Error getting IPv4 layer")
}
ip, ok := ipv4.(*layers.IPv4)
if !ok {
return nil, errors.New("Error getting IPv4 layer data")
}
c = &Connection{
SrcIP: ip.SrcIP,
DstIP: ip.DstIP,
DstHost: dns.HostOr(ip.DstIP, ""),
pkt: nfp,
}
return newConnectionImpl(nfp, c, "")
}
// NewConnection6 creates a IPv6 new Connection object, and returns the details of it.
func NewConnection6(nfp *netfilter.Packet) (c *Connection, err error) {
ipv6 := nfp.Packet.Layer(layers.LayerTypeIPv6)
if ipv6 == nil {
return nil, errors.New("Error getting IPv6 layer")
}
ip, ok := ipv6.(*layers.IPv6)
if !ok {
return nil, errors.New("Error getting IPv6 layer data")
}
c = &Connection{
SrcIP: ip.SrcIP,
DstIP: ip.DstIP,
DstHost: dns.HostOr(ip.DstIP, ""),
pkt: nfp,
}
return newConnectionImpl(nfp, c, "6")
}
func (c *Connection) parseDirection(protoType string) bool {
ret := false
if tcpLayer := c.pkt.Packet.Layer(layers.LayerTypeTCP); tcpLayer != nil {
if tcp, ok := tcpLayer.(*layers.TCP); ok == true && tcp != nil {
c.Protocol = "tcp" + protoType
c.DstPort = uint(tcp.DstPort)
c.SrcPort = uint(tcp.SrcPort)
ret = true
if tcp.DstPort == 53 {
c.getDomains(c.pkt, c)
}
}
} else if udpLayer := c.pkt.Packet.Layer(layers.LayerTypeUDP); udpLayer != nil {
if udp, ok := udpLayer.(*layers.UDP); ok == true && udp != nil {
c.Protocol = "udp" + protoType
c.DstPort = uint(udp.DstPort)
c.SrcPort = uint(udp.SrcPort)
ret = true
if udp.DstPort == 53 {
c.getDomains(c.pkt, c)
}
}
} else if udpliteLayer := c.pkt.Packet.Layer(layers.LayerTypeUDPLite); udpliteLayer != nil {
if udplite, ok := udpliteLayer.(*layers.UDPLite); ok == true && udplite != nil {
c.Protocol = "udplite" + protoType
c.DstPort = uint(udplite.DstPort)
c.SrcPort = uint(udplite.SrcPort)
ret = true
}
}
return ret
}
func (c *Connection) getDomains(nfp *netfilter.Packet, con *Connection) {
domains := dns.GetQuestions(nfp)
if len(domains) > 0 {
for _, dns := range domains {
con.DstHost = dns
}
}
}
// To returns the destination host of a connection.
func (c *Connection) To() string {
if c.DstHost == "" {
return c.DstIP.String()
}
return c.DstHost
}
func (c *Connection) String() string {
if c.Entry == nil {
return fmt.Sprintf("%s ->(%s)-> %s:%d", c.SrcIP, c.Protocol, c.To(), c.DstPort)
}
if c.Process == nil {
return fmt.Sprintf("%s (uid:%d) ->(%s)-> %s:%d", c.SrcIP, c.Entry.UserId, c.Protocol, c.To(), c.DstPort)
}
return fmt.Sprintf("%s (%d) -> %s:%d (proto:%s uid:%d)", c.Process.Path, c.Process.ID, c.To(), c.DstPort, c.Protocol, c.Entry.UserId)
}
// Serialize returns a connection serialized.
func (c *Connection) Serialize() *protocol.Connection {
return &protocol.Connection{
Protocol: c.Protocol,
SrcIp: c.SrcIP.String(),
SrcPort: uint32(c.SrcPort),
DstIp: c.DstIP.String(),
DstHost: c.DstHost,
DstPort: uint32(c.DstPort),
UserId: uint32(c.Entry.UserId),
ProcessId: uint32(c.Process.ID),
ProcessPath: c.Process.Path,
ProcessArgs: c.Process.Args,
ProcessEnv: c.Process.Env,
ProcessCwd: c.Process.CWD,
}
}
opensnitch-1.5.8.1/daemon/conman/connection_test.go 0000664 0000000 0000000 00000007042 14401326716 0022310 0 ustar 00root root 0000000 0000000 package conman
import (
"fmt"
"net"
"testing"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/evilsocket/opensnitch/daemon/netfilter"
)
// Adding new packets:
// wireshark -> right click -> Copy as HexDump -> create []byte{}
func NewTCPPacket() gopacket.Packet {
// 47676:192.168.1.100 -> 1.1.1.1:23
testTCPPacket := []byte{0x4c, 0x6e, 0x6e, 0xd5, 0x79, 0xbf, 0x00, 0x28, 0x9d, 0x43, 0x7f, 0xd7, 0x08, 0x00, 0x45, 0x10,
0x00, 0x3c, 0x1d, 0x07, 0x40, 0x00, 0x40, 0x06, 0x59, 0x8e, 0xc0, 0xa8, 0x01, 0x6d, 0x01, 0x01,
0x01, 0x01, 0xba, 0x3c, 0x00, 0x17, 0x47, 0x7e, 0xf3, 0x0b, 0x00, 0x00, 0x00, 0x00, 0xa0, 0x02,
0xfa, 0xf0, 0x4c, 0x27, 0x00, 0x00, 0x02, 0x04, 0x05, 0xb4, 0x04, 0x02, 0x08, 0x0a, 0x91, 0xfb,
0xb5, 0xf4, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03, 0x03, 0x0a}
return gopacket.NewPacket(testTCPPacket, layers.LinkTypeEthernet, gopacket.Default)
}
func NewUDPPacket() gopacket.Packet {
// 29517:192.168.1.109 -> 1.0.0.1:53
testUDPPacketDNS := []byte{
0x4c, 0x6e, 0x6e, 0xd5, 0x79, 0xbf, 0x00, 0x28, 0x9d, 0x43, 0x7f, 0xd7, 0x08, 0x00, 0x45, 0x00,
0x00, 0x40, 0x54, 0x1a, 0x40, 0x00, 0x3f, 0x11, 0x24, 0x7d, 0xc0, 0xa8, 0x01, 0x6d, 0x01, 0x00,
0x00, 0x01, 0x73, 0x4d, 0x00, 0x35, 0x00, 0x2c, 0xf1, 0x17, 0x05, 0x51, 0x00, 0x20, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x70, 0x69, 0x04, 0x68, 0x6f, 0x6c, 0x65, 0x00, 0x00,
0x01, 0x00, 0x01, 0x00, 0x00, 0x29, 0x10, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00,
}
return gopacket.NewPacket(testUDPPacketDNS, layers.LinkTypeEthernet, gopacket.Default)
}
func EstablishConnection(proto, dst string) (net.Conn, error) {
c, err := net.Dial(proto, dst)
if err != nil {
fmt.Println(err)
return nil, err
}
return c, nil
}
func ListenOnPort(proto, port string) (net.Listener, error) {
l, err := net.Listen(proto, port)
if err != nil {
fmt.Println(err)
return nil, err
}
return l, nil
}
func NewPacket(pkt gopacket.Packet) *netfilter.Packet {
return &netfilter.Packet{
Packet: pkt,
UID: 666,
NetworkProtocol: netfilter.IPv4,
}
}
func NewDummyConnection(src, dst net.IP) *Connection {
return &Connection{
SrcIP: src,
DstIP: dst,
}
}
// Test TCP parseDirection()
func TestParseTCPDirection(t *testing.T) {
srcIP := net.IP{192, 168, 1, 100}
dstIP := net.IP{1, 1, 1, 1}
c := NewDummyConnection(srcIP, dstIP)
// 47676:192.168.1.100 -> 1.1.1.1:23
pkt := NewPacket(NewTCPPacket())
c.pkt = pkt
// parseDirection extracts the src and dst port from a network packet.
if c.parseDirection("") == false {
t.Error("parseDirection() should not be false")
t.Fail()
}
if c.SrcPort != 47676 {
t.Error("parseDirection() SrcPort mismatch:", c)
t.Fail()
}
if c.DstPort != 23 {
t.Error("parseDirection() DstPort mismatch:", c)
t.Fail()
}
if c.Protocol != "tcp" {
t.Error("parseDirection() Protocol mismatch:", c)
t.Fail()
}
}
// Test UDP parseDirection()
func TestParseUDPDirection(t *testing.T) {
srcIP := net.IP{192, 168, 1, 100}
dstIP := net.IP{1, 0, 0, 1}
c := NewDummyConnection(srcIP, dstIP)
// 29517:192.168.1.109 -> 1.0.0.1:53
pkt := NewPacket(NewUDPPacket())
c.pkt = pkt
// parseDirection extracts the src and dst port from a network packet.
if c.parseDirection("") == false {
t.Error("parseDirection() should not be false")
t.Fail()
}
if c.SrcPort != 29517 {
t.Error("parseDirection() SrcPort mismatch:", c)
t.Fail()
}
if c.DstPort != 53 {
t.Error("parseDirection() DstPort mismatch:", c)
t.Fail()
}
if c.Protocol != "udp" {
t.Error("parseDirection() Protocol mismatch:", c)
t.Fail()
}
}
opensnitch-1.5.8.1/daemon/core/ 0000775 0000000 0000000 00000000000 14401326716 0016235 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/core/core.go 0000664 0000000 0000000 00000002610 14401326716 0017513 0 ustar 00root root 0000000 0000000 package core
import (
"fmt"
"os"
"os/exec"
"os/user"
"path/filepath"
"strings"
"time"
)
const (
defaultTrimSet = "\r\n\t "
)
// Trim remove trailing spaces from a string.
func Trim(s string) string {
return strings.Trim(s, defaultTrimSet)
}
// Exec spawns a new process and reurns the output.
func Exec(executable string, args []string) (string, error) {
path, err := exec.LookPath(executable)
if err != nil {
return "", err
}
raw, err := exec.Command(path, args...).CombinedOutput()
if err != nil {
return "", err
}
return Trim(string(raw)), nil
}
// Exists checks if a path exists.
func Exists(path string) bool {
if _, err := os.Stat(path); os.IsNotExist(err) {
return false
}
return true
}
// ExpandPath replaces '~' shorthand with the user's home directory.
func ExpandPath(path string) (string, error) {
// Check if path is empty
if path != "" {
if strings.HasPrefix(path, "~") {
usr, err := user.Current()
if err != nil {
return "", err
}
// Replace only the first occurrence of ~
path = strings.Replace(path, "~", usr.HomeDir, 1)
}
return filepath.Abs(path)
}
return "", nil
}
// GetFileModTime checks if a file has been modified.
func GetFileModTime(filepath string) (time.Time, error) {
fi, err := os.Stat(filepath)
if err != nil || fi.IsDir() {
return time.Now(), fmt.Errorf("GetFileModTime() Invalid file")
}
return fi.ModTime(), nil
}
opensnitch-1.5.8.1/daemon/core/system.go 0000664 0000000 0000000 00000001153 14401326716 0020110 0 ustar 00root root 0000000 0000000 package core
import (
"io/ioutil"
"strings"
)
var (
// IPv6Enabled indicates if IPv6 protocol is enabled in the system
IPv6Enabled = Exists("/proc/sys/net/ipv6")
)
// GetHostname returns the name of the host where the daemon is running.
func GetHostname() string {
hostname, _ := ioutil.ReadFile("/proc/sys/kernel/hostname")
return strings.Replace(string(hostname), "\n", "", -1)
}
// GetKernelVersion returns the name of the host where the daemon is running.
func GetKernelVersion() string {
version, _ := ioutil.ReadFile("/proc/sys/kernel/version")
return strings.Replace(string(version), "\n", "", -1)
}
opensnitch-1.5.8.1/daemon/core/version.go 0000664 0000000 0000000 00000000310 14401326716 0020243 0 ustar 00root root 0000000 0000000 package core
// version related consts
const (
Name = "opensnitch-daemon"
Version = "1.5.8"
Author = "Simone 'evilsocket' Margaritelli"
Website = "https://github.com/evilsocket/opensnitch"
)
opensnitch-1.5.8.1/daemon/default-config.json 0000664 0000000 0000000 00000000551 14401326716 0021070 0 ustar 00root root 0000000 0000000 {
"Server":
{
"Address":"unix:///tmp/osui.sock",
"LogFile":"/var/log/opensnitchd.log"
},
"DefaultAction": "allow",
"DefaultDuration": "once",
"InterceptUnknown": false,
"ProcMonitorMethod": "ebpf",
"LogLevel": 2,
"Firewall": "iptables",
"Stats": {
"MaxEvents": 150,
"MaxStats": 25
}
}
opensnitch-1.5.8.1/daemon/dns/ 0000775 0000000 0000000 00000000000 14401326716 0016071 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/dns/parse.go 0000664 0000000 0000000 00000000777 14401326716 0017545 0 ustar 00root root 0000000 0000000 package dns
import (
"github.com/evilsocket/opensnitch/daemon/netfilter"
"github.com/google/gopacket/layers"
)
// GetQuestions retrieves the domain names a process is trying to resolve.
func GetQuestions(nfp *netfilter.Packet) (questions []string) {
dnsLayer := nfp.Packet.Layer(layers.LayerTypeDNS)
if dnsLayer == nil {
return questions
}
dns, _ := dnsLayer.(*layers.DNS)
for _, dnsQuestion := range dns.Questions {
questions = append(questions, string(dnsQuestion.Name))
}
return questions
}
opensnitch-1.5.8.1/daemon/dns/track.go 0000664 0000000 0000000 00000004037 14401326716 0017530 0 ustar 00root root 0000000 0000000 package dns
import (
"net"
"sync"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
)
var (
responses = make(map[string]string, 0)
lock = sync.RWMutex{}
)
// TrackAnswers obtains the resolved domains of a DNS query.
// If the packet is UDP DNS, the domain names are added to the list of resolved domains.
func TrackAnswers(packet gopacket.Packet) bool {
udpLayer := packet.Layer(layers.LayerTypeUDP)
if udpLayer == nil {
return false
}
udp, ok := udpLayer.(*layers.UDP)
if ok == false || udp == nil {
return false
}
if udp.SrcPort != 53 {
return false
}
dnsLayer := packet.Layer(layers.LayerTypeDNS)
if dnsLayer == nil {
return false
}
dnsAns, ok := dnsLayer.(*layers.DNS)
if ok == false || dnsAns == nil {
return false
}
for _, ans := range dnsAns.Answers {
if ans.Name != nil {
if ans.IP != nil {
Track(ans.IP.String(), string(ans.Name))
} else if ans.CNAME != nil {
Track(string(ans.CNAME), string(ans.Name))
}
}
}
return true
}
// Track adds a resolved domain to the list.
func Track(resolved string, hostname string) {
lock.Lock()
defer lock.Unlock()
if resolved == "127.0.0.1" || resolved == "::1" {
return
}
responses[resolved] = hostname
log.Debug("New DNS record: %s -> %s", resolved, hostname)
}
// Host returns if a resolved domain is in the list.
func Host(resolved string) (host string, found bool) {
lock.RLock()
defer lock.RUnlock()
host, found = responses[resolved]
return
}
// HostOr checks if an IP has a domain name already resolved.
// If the domain is in the list it's returned, otherwise the IP will be returned.
func HostOr(ip net.IP, or string) string {
if host, found := Host(ip.String()); found == true {
// host might have been CNAME; go back until we reach the "root"
seen := make(map[string]bool) // prevent possibility of loops
for {
orig, had := Host(host)
if seen[orig] {
break
}
if !had {
break
}
seen[orig] = true
host = orig
}
return host
}
return or
}
opensnitch-1.5.8.1/daemon/firewall/ 0000775 0000000 0000000 00000000000 14401326716 0017112 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/firewall/common/ 0000775 0000000 0000000 00000000000 14401326716 0020402 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/firewall/common/common.go 0000664 0000000 0000000 00000004010 14401326716 0022214 0 ustar 00root root 0000000 0000000 package common
import (
"sync"
"time"
"github.com/evilsocket/opensnitch/daemon/log"
)
type (
callback func()
callbackBool func() bool
stopChecker struct {
sync.RWMutex
ch chan bool
}
// Common holds common fields and functionality of both firewalls,
// iptables and nftables.
Common struct {
sync.RWMutex
QueueNum uint16
Running bool
RulesChecker *time.Ticker
stopCheckerChan *stopChecker
}
)
func (s *stopChecker) exit() chan bool {
s.RLock()
defer s.RUnlock()
return s.ch
}
func (s *stopChecker) stop() {
s.Lock()
defer s.Unlock()
if s.ch != nil {
s.ch <- true
close(s.ch)
s.ch = nil
}
}
// SetQueueNum sets the queue number used by the firewall.
// It's the queue where all intercepted connections will be sent.
func (c *Common) SetQueueNum(qNum *int) {
c.Lock()
defer c.Unlock()
if qNum != nil {
c.QueueNum = uint16(*qNum)
}
}
// IsRunning returns if the firewall is running or not.
func (c *Common) IsRunning() bool {
c.RLock()
defer c.RUnlock()
return c != nil && c.Running
}
// NewRulesChecker starts monitoring firewall for configuration or rules changes.
func (c *Common) NewRulesChecker(areRulesLoaded callbackBool, reloadRules callback) {
c.Lock()
defer c.Unlock()
c.stopCheckerChan = &stopChecker{ch: make(chan bool, 1)}
c.RulesChecker = time.NewTicker(time.Second * 30)
go c.startCheckingRules(areRulesLoaded, reloadRules)
}
// StartCheckingRules monitors if our rules are loaded.
// If the rules to intercept traffic are not loaded, we'll try to insert them again.
func (c *Common) startCheckingRules(areRulesLoaded callbackBool, reloadRules callback) {
for {
select {
case <-c.stopCheckerChan.exit():
goto Exit
case <-c.RulesChecker.C:
if areRulesLoaded() == false {
reloadRules()
}
}
}
Exit:
log.Info("exit checking iptables rules")
}
// StopCheckingRules stops checking if firewall rules are loaded.
func (c *Common) StopCheckingRules() {
if c.RulesChecker != nil {
c.RulesChecker.Stop()
}
c.stopCheckerChan.stop()
}
opensnitch-1.5.8.1/daemon/firewall/config/ 0000775 0000000 0000000 00000000000 14401326716 0020357 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/firewall/config/config.go 0000664 0000000 0000000 00000011273 14401326716 0022157 0 ustar 00root root 0000000 0000000 // Package config provides functionality to load and monitor the system
// firewall rules.
// It's inherited by the different firewall packages (iptables, nftables).
//
// The firewall rules defined by the user are reloaded in these cases:
// - When the file system-fw.json changes.
// - When the firewall rules are not present when listing them.
//
package config
import (
"encoding/json"
"io/ioutil"
"sync"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/fsnotify/fsnotify"
)
type callback func()
// FwRule holds the fields of a rule
type FwRule struct {
sync.RWMutex
Description string
Table string
Chain string
Parameters string
Target string
TargetParameters string
}
type rulesList struct {
sync.RWMutex
Rule *FwRule
}
// SystemConfig holds the list of rules to be added to the system
type SystemConfig struct {
sync.RWMutex
SystemRules []*rulesList
}
// Config holds the functionality to re/load the firewall configuration from disk.
// This is the configuration to manage the system firewall (iptables, nftables).
type Config struct {
sync.Mutex
file string
watcher *fsnotify.Watcher
monitorExitChan chan bool
SysConfig SystemConfig
// subscribe to this channel to receive config reload events
ReloadConfChan chan bool
// preloadCallback is called before reloading the configuration,
// in order to delete old fw rules.
preloadCallback callback
}
// NewSystemFwConfig initializes config fields
func (c *Config) NewSystemFwConfig(preLoadCb callback) (*Config, error) {
var err error
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Warning("Error creating firewall config watcher: %s", err)
return nil, err
}
c.Lock()
defer c.Unlock()
c.file = "/etc/opensnitchd/system-fw.json"
c.monitorExitChan = make(chan bool, 1)
c.preloadCallback = preLoadCb
c.watcher = watcher
c.ReloadConfChan = make(chan bool, 1)
return c, nil
}
// LoadDiskConfiguration reads and loads the firewall configuration from disk
func (c *Config) LoadDiskConfiguration(reload bool) {
c.Lock()
defer c.Unlock()
raw, err := ioutil.ReadFile(c.file)
if err != nil {
log.Error("Error reading firewall configuration from disk %s: %s", c.file, err)
return
}
c.loadConfiguration(raw)
// we need to monitor the configuration file for changes, regardless if it's
// malformed or not.
c.watcher.Remove(c.file)
if err := c.watcher.Add(c.file); err != nil {
log.Error("Could not watch firewall configuration: %s", err)
return
}
if reload {
c.ReloadConfChan <- true
return
}
go c.monitorConfigWorker()
}
// loadConfigutation reads the system firewall rules from disk.
// Then the rules are added based on the configuration defined.
func (c *Config) loadConfiguration(rawConfig []byte) {
c.SysConfig.Lock()
defer c.SysConfig.Unlock()
// delete old system rules, that may be different from the new ones
c.preloadCallback()
if err := json.Unmarshal(rawConfig, &c.SysConfig); err != nil {
// we only log the parser error, giving the user a chance to write a valid config
log.Error("Error parsing firewall configuration %s: %s", c.file, err)
}
log.Info("fw configuration loaded")
}
func (c *Config) saveConfiguration(rawConfig string) error {
conf, err := json.Marshal([]byte(rawConfig))
if err != nil {
log.Error("saving json firewall configuration: %s %s", err, conf)
return err
}
c.loadConfiguration([]byte(rawConfig))
if err = ioutil.WriteFile(c.file, []byte(rawConfig), 0644); err != nil {
log.Error("writing firewall configuration to disk: %s", err)
return err
}
return nil
}
// StopConfigWatcher stops the configuration watcher and stops the subroutine.
func (c *Config) StopConfigWatcher() {
c.Lock()
defer c.Unlock()
if c.monitorExitChan != nil {
c.monitorExitChan <- true
close(c.monitorExitChan)
}
if c.ReloadConfChan != nil {
c.ReloadConfChan <- false // exit
close(c.ReloadConfChan)
}
if c.watcher != nil {
c.watcher.Remove(c.file)
c.watcher.Close()
}
}
func (c *Config) monitorConfigWorker() {
for {
select {
case <-c.monitorExitChan:
goto Exit
case event := <-c.watcher.Events:
if (event.Op&fsnotify.Write == fsnotify.Write) || (event.Op&fsnotify.Remove == fsnotify.Remove) {
c.LoadDiskConfiguration(true)
}
}
}
Exit:
log.Debug("stop monitoring firewall config file")
c.Lock()
c.monitorExitChan = nil
c.Unlock()
}
// MonitorSystemFw waits for configuration reloads.
func (c *Config) MonitorSystemFw(reloadCallback callback) {
for {
select {
case reload := <-c.ReloadConfChan:
if reload {
reloadCallback()
} else {
goto Exit
}
}
}
Exit:
log.Info("iptables, stop monitoring system fw rules")
c.Lock()
c.ReloadConfChan = nil
c.Unlock()
}
opensnitch-1.5.8.1/daemon/firewall/iptables/ 0000775 0000000 0000000 00000000000 14401326716 0020715 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/firewall/iptables/iptables.go 0000664 0000000 0000000 00000006604 14401326716 0023055 0 ustar 00root root 0000000 0000000 package iptables
import (
"os/exec"
"regexp"
"sync"
"github.com/evilsocket/opensnitch/daemon/firewall/common"
"github.com/evilsocket/opensnitch/daemon/firewall/config"
"github.com/evilsocket/opensnitch/daemon/log"
)
// Action is the modifier we apply to a rule.
type Action string
const (
// Name is the name that identifies this firewall
Name = "iptables"
// SystemRulePrefix prefix added to each system rule
SystemRulePrefix = "opensnitch-filter"
)
// Actions we apply to the firewall.
const (
ADD = Action("-A")
INSERT = Action("-I")
DELETE = Action("-D")
FLUSH = Action("-F")
NEWCHAIN = Action("-N")
DELCHAIN = Action("-X")
)
// SystemChains holds the fw rules defined by the user
type SystemChains struct {
sync.RWMutex
Rules map[string]config.FwRule
}
// Iptables struct holds the fields of the iptables fw
type Iptables struct {
sync.Mutex
config.Config
common.Common
bin string
bin6 string
regexRulesQuery *regexp.Regexp
regexSystemRulesQuery *regexp.Regexp
chains SystemChains
}
// Fw initializes a new Iptables object
func Fw() (*Iptables, error) {
if err := IsAvailable(); err != nil {
return nil, err
}
reRulesQuery, _ := regexp.Compile(`NFQUEUE.*ctstate NEW,RELATED.*NFQUEUE num.*bypass`)
reSystemRulesQuery, _ := regexp.Compile(SystemRulePrefix + ".*")
ipt := &Iptables{
bin: "iptables",
bin6: "ip6tables",
regexRulesQuery: reRulesQuery,
regexSystemRulesQuery: reSystemRulesQuery,
chains: SystemChains{Rules: make(map[string]config.FwRule)},
}
return ipt, nil
}
// Name returns the firewall name
func (ipt *Iptables) Name() string {
return Name
}
// Init inserts the firewall rules and starts monitoring for firewall
// changes.
func (ipt *Iptables) Init(qNum *int) {
if ipt.IsRunning() {
return
}
ipt.SetQueueNum(qNum)
// In order to clean up any existing firewall rule before start,
// we need to load the fw configuration first.
ipt.NewSystemFwConfig(ipt.preloadConfCallback)
go ipt.MonitorSystemFw(ipt.AddSystemRules)
ipt.LoadDiskConfiguration(false)
// start from a clean state
ipt.CleanRules(false)
ipt.InsertRules()
ipt.AddSystemRules()
// start monitoring firewall rules to intercept network traffic
ipt.NewRulesChecker(ipt.AreRulesLoaded, ipt.reloadRulesCallback)
ipt.Running = true
}
// Stop deletes the firewall rules, allowing network traffic.
func (ipt *Iptables) Stop() {
if ipt.Running == false {
return
}
ipt.StopConfigWatcher()
ipt.StopCheckingRules()
ipt.CleanRules(log.GetLogLevel() == log.DEBUG)
ipt.Running = false
}
// IsAvailable checks if iptables is installed in the system.
func IsAvailable() error {
_, err := exec.Command("iptables", []string{"-V"}...).CombinedOutput()
if err != nil {
return err
}
return nil
}
// InsertRules adds fw rules to intercept connections
func (ipt *Iptables) InsertRules() {
if err4, err6 := ipt.QueueDNSResponses(true, true); err4 != nil || err6 != nil {
log.Error("Error while running DNS firewall rule: %s %s", err4, err6)
} else if err4, err6 = ipt.QueueConnections(true, true); err4 != nil || err6 != nil {
log.Fatal("Error while running conntrack firewall rule: %s %s", err4, err6)
}
}
// CleanRules deletes the rules we added.
func (ipt *Iptables) CleanRules(logErrors bool) {
ipt.QueueDNSResponses(false, logErrors)
ipt.QueueConnections(false, logErrors)
ipt.DeleteSystemRules(true, logErrors)
}
opensnitch-1.5.8.1/daemon/firewall/iptables/monitor.go 0000664 0000000 0000000 00000003117 14401326716 0022735 0 ustar 00root root 0000000 0000000 package iptables
import (
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/log"
)
// AreRulesLoaded checks if the firewall rules for intercept traffic are loaded.
func (ipt *Iptables) AreRulesLoaded() bool {
var outMangle6 string
outMangle, err := core.Exec("iptables", []string{"-n", "-L", "OUTPUT", "-t", "mangle"})
if err != nil {
return false
}
if core.IPv6Enabled {
outMangle6, err = core.Exec("ip6tables", []string{"-n", "-L", "OUTPUT", "-t", "mangle"})
if err != nil {
return false
}
}
systemRulesLoaded := true
ipt.chains.RLock()
if len(ipt.chains.Rules) > 0 {
for _, rule := range ipt.chains.Rules {
if chainOut4, err4 := core.Exec("iptables", []string{"-n", "-L", rule.Chain, "-t", rule.Table}); err4 == nil {
if ipt.regexSystemRulesQuery.FindString(chainOut4) == "" {
systemRulesLoaded = false
break
}
}
if core.IPv6Enabled {
if chainOut6, err6 := core.Exec("ip6tables", []string{"-n", "-L", rule.Chain, "-t", rule.Table}); err6 == nil {
if ipt.regexSystemRulesQuery.FindString(chainOut6) == "" {
systemRulesLoaded = false
break
}
}
}
}
}
ipt.chains.RUnlock()
result := ipt.regexRulesQuery.FindString(outMangle) != "" &&
systemRulesLoaded
if core.IPv6Enabled {
result = result && ipt.regexRulesQuery.FindString(outMangle6) != ""
}
return result
}
func (ipt *Iptables) reloadRulesCallback() {
log.Important("firewall rules changed, reloading")
ipt.QueueDNSResponses(false, false)
ipt.QueueConnections(false, false)
ipt.InsertRules()
ipt.AddSystemRules()
}
opensnitch-1.5.8.1/daemon/firewall/iptables/rules.go 0000664 0000000 0000000 00000004276 14401326716 0022407 0 ustar 00root root 0000000 0000000 package iptables
import (
"fmt"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/vishvananda/netlink"
)
// RunRule inserts or deletes a firewall rule.
func (ipt *Iptables) RunRule(action Action, enable bool, logError bool, rule []string) (err4, err6 error) {
if enable == false {
action = "-D"
}
rule = append([]string{string(action)}, rule...)
ipt.Lock()
defer ipt.Unlock()
if _, err4 = core.Exec(ipt.bin, rule); err4 != nil {
if logError {
log.Error("Error while running firewall rule, ipv4 err: %s", err4)
log.Error("rule: %s", rule)
}
}
// On some systems IPv6 is disabled
if core.IPv6Enabled {
if _, err6 = core.Exec(ipt.bin6, rule); err6 != nil {
if logError {
log.Error("Error while running firewall rule, ipv6 err: %s", err6)
log.Error("rule: %s", rule)
}
}
}
return
}
// QueueDNSResponses redirects DNS responses to us, in order to keep a cache
// of resolved domains.
// INPUT --protocol udp --sport 53 -j NFQUEUE --queue-num 0 --queue-bypass
func (ipt *Iptables) QueueDNSResponses(enable bool, logError bool) (err4, err6 error) {
return ipt.RunRule(INSERT, enable, logError, []string{
"INPUT",
"--protocol", "udp",
"--sport", "53",
"-j", "NFQUEUE",
"--queue-num", fmt.Sprintf("%d", ipt.QueueNum),
"--queue-bypass",
})
}
// QueueConnections inserts the firewall rule which redirects connections to us.
// They are queued until the user denies/accept them, or reaches a timeout.
// OUTPUT -t mangle -m conntrack --ctstate NEW,RELATED -j NFQUEUE --queue-num 0 --queue-bypass
func (ipt *Iptables) QueueConnections(enable bool, logError bool) (error, error) {
err4, err6 := ipt.RunRule(INSERT, enable, logError, []string{
"OUTPUT",
"-t", "mangle",
"-m", "conntrack",
"--ctstate", "NEW,RELATED",
"-j", "NFQUEUE",
"--queue-num", fmt.Sprintf("%d", ipt.QueueNum),
"--queue-bypass",
})
if enable {
// flush conntrack as soon as netfilter rule is set. This ensures that already-established
// connections will go to netfilter queue.
if err := netlink.ConntrackTableFlush(netlink.ConntrackTable); err != nil {
log.Error("error in ConntrackTableFlush %s", err)
}
}
return err4, err6
}
opensnitch-1.5.8.1/daemon/firewall/iptables/system.go 0000664 0000000 0000000 00000005237 14401326716 0022577 0 ustar 00root root 0000000 0000000 package iptables
import (
"strings"
"github.com/evilsocket/opensnitch/daemon/firewall/config"
"github.com/evilsocket/opensnitch/daemon/log"
)
// CreateSystemRule creates the custom firewall chains and adds them to the system.
func (ipt *Iptables) CreateSystemRule(rule *config.FwRule, logErrors bool) {
ipt.chains.Lock()
defer ipt.chains.Unlock()
if rule == nil {
return
}
chainName := SystemRulePrefix + "-" + rule.Chain
if _, ok := ipt.chains.Rules[rule.Table+"-"+chainName]; ok {
return
}
ipt.RunRule(NEWCHAIN, true, logErrors, []string{chainName, "-t", rule.Table})
// Insert the rule at the top of the chain
if err4, err6 := ipt.RunRule(INSERT, true, logErrors, []string{rule.Chain, "-t", rule.Table, "-j", chainName}); err4 == nil && err6 == nil {
ipt.chains.Rules[rule.Table+"-"+chainName] = *rule
}
}
// DeleteSystemRules deletes the system rules.
// If force is false and the rule has not been previously added,
// it won't try to delete the rules. Otherwise it'll try to delete them.
func (ipt *Iptables) DeleteSystemRules(force, logErrors bool) {
ipt.chains.Lock()
defer ipt.chains.Unlock()
for _, r := range ipt.SysConfig.SystemRules {
if r.Rule == nil {
continue
}
chain := SystemRulePrefix + "-" + r.Rule.Chain
if _, ok := ipt.chains.Rules[r.Rule.Table+"-"+chain]; !ok && !force {
continue
}
ipt.RunRule(FLUSH, true, false, []string{chain, "-t", r.Rule.Table})
ipt.RunRule(DELETE, false, logErrors, []string{r.Rule.Chain, "-t", r.Rule.Table, "-j", chain})
ipt.RunRule(DELCHAIN, true, false, []string{chain, "-t", r.Rule.Table})
delete(ipt.chains.Rules, r.Rule.Table+"-"+chain)
}
}
// AddSystemRule inserts a new rule.
func (ipt *Iptables) AddSystemRule(rule *config.FwRule, enable bool) (err4, err6 error) {
if rule == nil {
return nil, nil
}
rule.RLock()
defer rule.RUnlock()
chain := SystemRulePrefix + "-" + rule.Chain
if rule.Table == "" {
rule.Table = "filter"
}
r := []string{chain, "-t", rule.Table}
if rule.Parameters != "" {
r = append(r, strings.Split(rule.Parameters, " ")...)
}
r = append(r, []string{"-j", rule.Target}...)
if rule.TargetParameters != "" {
r = append(r, strings.Split(rule.TargetParameters, " ")...)
}
return ipt.RunRule(ADD, enable, true, r)
}
// AddSystemRules creates the system firewall from configuration.
func (ipt *Iptables) AddSystemRules() {
ipt.DeleteSystemRules(true, false)
for _, r := range ipt.SysConfig.SystemRules {
ipt.CreateSystemRule(r.Rule, true)
ipt.AddSystemRule(r.Rule, true)
}
}
// preloadConfCallback gets called before the fw configuration is reloaded
func (ipt *Iptables) preloadConfCallback() {
ipt.DeleteSystemRules(true, log.GetLogLevel() == log.DEBUG)
}
opensnitch-1.5.8.1/daemon/firewall/nftables/ 0000775 0000000 0000000 00000000000 14401326716 0020710 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/firewall/nftables/monitor.go 0000664 0000000 0000000 00000002327 14401326716 0022732 0 ustar 00root root 0000000 0000000 package nftables
import (
"github.com/evilsocket/opensnitch/daemon/log"
)
// AreRulesLoaded checks if the firewall rules for intercept traffic are loaded.
func (n *Nft) AreRulesLoaded() bool {
n.Lock()
defer n.Unlock()
nRules := 0
for _, table := range n.mangleTables {
rules, err := n.conn.GetRule(table, n.outputChains[table])
if err != nil {
log.Error("nftables mangle rules error: %s, %s", table.Name, n.outputChains[table].Name)
return false
}
for _, r := range rules {
if string(r.UserData) == fwKey {
nRules++
}
}
}
if nRules != 2 {
log.Warning("nftables mangle rules not loaded: %d", nRules)
return false
}
nRules = 0
for _, table := range n.filterTables {
rules, err := n.conn.GetRule(table, n.inputChains[table])
if err != nil {
log.Error("nftables filter rules error: %s, %s", table.Name, n.inputChains[table].Name)
return false
}
for _, r := range rules {
if string(r.UserData) == fwKey {
nRules++
}
}
}
if nRules != 2 {
log.Warning("nfables filter rules not loaded: %d", nRules)
return false
}
return true
}
func (n *Nft) reloadRulesCallback() {
log.Important("nftables firewall rules changed, reloading")
n.AddSystemRules()
n.InsertRules()
}
opensnitch-1.5.8.1/daemon/firewall/nftables/nftables.go 0000664 0000000 0000000 00000006655 14401326716 0023051 0 ustar 00root root 0000000 0000000 package nftables
import (
"sync"
"github.com/evilsocket/opensnitch/daemon/firewall/common"
"github.com/evilsocket/opensnitch/daemon/firewall/config"
"github.com/evilsocket/opensnitch/daemon/firewall/iptables"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/google/nftables"
)
const (
// Name is the name that identifies this firewall
Name = "nftables"
mangleTableName = "mangle"
filterTableName = "filter"
// The following chains will be under our own mangle or filter tables.
// There shouldn't be other chains with the same name here.
outputChain = "output"
inputChain = "input"
// key assigned to every fw rule we add, in order to get rules by this key.
fwKey = "opensnitch-key"
)
var (
filterTable = &nftables.Table{
Family: nftables.TableFamilyIPv4,
Name: filterTableName,
}
filterTable6 = &nftables.Table{
Family: nftables.TableFamilyIPv6,
Name: filterTableName,
}
mangleTable = &nftables.Table{
Family: nftables.TableFamilyIPv4,
Name: mangleTableName,
}
mangleTable6 = &nftables.Table{
Family: nftables.TableFamilyIPv6,
Name: mangleTableName,
}
)
// Nft holds the fields of our nftables firewall
type Nft struct {
sync.Mutex
config.Config
common.Common
conn *nftables.Conn
mangleTables []*nftables.Table
filterTables []*nftables.Table
outputChains map[*nftables.Table]*nftables.Chain
inputChains map[*nftables.Table]*nftables.Chain
chains iptables.SystemChains
}
// NewNft creates a new nftables object
func NewNft() *nftables.Conn {
return &nftables.Conn{}
}
// Fw initializes a new nftables object
func Fw() (*Nft, error) {
n := &Nft{
outputChains: make(map[*nftables.Table]*nftables.Chain),
inputChains: make(map[*nftables.Table]*nftables.Chain),
chains: iptables.SystemChains{Rules: make(map[string]config.FwRule)},
}
return n, nil
}
// Name returns the name of the firewall
func (n *Nft) Name() string {
return Name
}
// Init inserts the firewall rules and starts monitoring for firewall
// changes.
func (n *Nft) Init(qNum *int) {
if n.IsRunning() {
return
}
n.SetQueueNum(qNum)
n.conn = NewNft()
// In order to clean up any existing firewall rule before start,
// we need to load the fw configuration first.
n.NewSystemFwConfig(n.preloadConfCallback)
go n.MonitorSystemFw(n.AddSystemRules)
n.LoadDiskConfiguration(false)
// start from a clean state
n.CleanRules(false)
n.AddSystemRules()
n.InsertRules()
// start monitoring firewall rules to intercept network traffic.
n.NewRulesChecker(n.AreRulesLoaded, n.reloadRulesCallback)
n.Running = true
}
// Stop deletes the firewall rules, allowing network traffic.
func (n *Nft) Stop() {
if n.IsRunning() == false {
return
}
n.StopConfigWatcher()
n.StopCheckingRules()
n.CleanRules(log.GetLogLevel() == log.DEBUG)
n.Running = false
}
// InsertRules adds fw rules to intercept connections
func (n *Nft) InsertRules() {
n.delInterceptionRules()
n.addGlobalTables()
n.addGlobalChains()
if err, _ := n.QueueDNSResponses(true, true); err != nil {
log.Error("Error while Running DNS nftables rule: %s", err)
} else if err, _ = n.QueueConnections(true, true); err != nil {
log.Fatal("Error while Running conntrack nftables rule: %s", err)
}
}
// CleanRules deletes the rules we added.
func (n *Nft) CleanRules(logErrors bool) {
n.delInterceptionRules()
err := n.conn.Flush()
if err != nil && logErrors {
log.Error("Error cleaning nftables tables: %s", err)
}
n.DeleteSystemRules(true, logErrors)
}
opensnitch-1.5.8.1/daemon/firewall/nftables/rules.go 0000664 0000000 0000000 00000012500 14401326716 0022367 0 ustar 00root root 0000000 0000000 package nftables
import (
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/google/nftables"
"github.com/google/nftables/binaryutil"
"github.com/google/nftables/expr"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
)
func (n *Nft) addGlobalTables() error {
filter := n.conn.AddTable(filterTable)
filter6 := n.conn.AddTable(filterTable6)
mangle := n.conn.AddTable(mangleTable)
mangle6 := n.conn.AddTable(mangleTable6)
n.mangleTables = []*nftables.Table{mangle, mangle6}
n.filterTables = []*nftables.Table{filter, filter6}
// apply changes
if err := n.conn.Flush(); err != nil {
return err
}
return nil
}
// TODO: add more parameters, make it more generic
func (n *Nft) addChain(name string, table *nftables.Table, prio *nftables.ChainPriority, ctype nftables.ChainType, hook *nftables.ChainHook) *nftables.Chain {
// nft list chains
return n.conn.AddChain(&nftables.Chain{
Name: name,
Table: table,
Type: ctype,
Hooknum: hook,
Priority: prio,
//Policy: nftables.ChainPolicyDrop
})
}
func (n *Nft) addGlobalChains() error {
// nft list tables
for _, table := range n.mangleTables {
n.outputChains[table] = n.addChain(outputChain, table, nftables.ChainPriorityMangle, nftables.ChainTypeRoute, nftables.ChainHookOutput)
}
for _, table := range n.filterTables {
n.inputChains[table] = n.addChain(inputChain, table, nftables.ChainPriorityFilter, nftables.ChainTypeFilter, nftables.ChainHookInput)
}
// apply changes
if err := n.conn.Flush(); err != nil {
log.Warning("Error adding nftables mangle tables: %v", err)
}
return nil
}
// QueueDNSResponses redirects DNS responses to us, in order to keep a cache
// of resolved domains.
// nft insert rule ip filter input udp sport 53 queue num 0 bypass
func (n *Nft) QueueDNSResponses(enable bool, logError bool) (error, error) {
if n.conn == nil {
return nil, nil
}
for _, table := range n.filterTables {
// nft list ruleset -a
n.conn.InsertRule(&nftables.Rule{
Position: 0,
Table: table,
Chain: n.inputChains[table],
Exprs: []expr.Any{
&expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1},
&expr.Cmp{
Op: expr.CmpOpEq,
Register: 1,
Data: []byte{unix.IPPROTO_UDP},
},
&expr.Payload{
DestRegister: 1,
Base: expr.PayloadBaseTransportHeader,
Offset: 0,
Len: 2,
},
&expr.Cmp{
Op: expr.CmpOpEq,
Register: 1,
Data: binaryutil.BigEndian.PutUint16(uint16(53)),
},
&expr.Queue{
Num: n.QueueNum,
Flag: expr.QueueFlagBypass,
},
},
// rule key, to allow get it later by key
UserData: []byte(fwKey),
})
}
// apply changes
if err := n.conn.Flush(); err != nil {
return err, nil
}
return nil, nil
}
// QueueConnections inserts the firewall rule which redirects connections to us.
// They are queued until the user denies/accept them, or reaches a timeout.
// nft insert rule ip mangle OUTPUT ct state new queue num 0 bypass
func (n *Nft) QueueConnections(enable bool, logError bool) (error, error) {
if n.conn == nil {
return nil, nil
}
if enable {
// flush conntrack as soon as netfilter rule is set. This ensures that already-established
// connections will go to netfilter queue.
if err := netlink.ConntrackTableFlush(netlink.ConntrackTable); err != nil {
log.Error("nftables, error in ConntrackTableFlush %s", err)
}
}
for _, table := range n.mangleTables {
n.conn.InsertRule(&nftables.Rule{
Position: 0,
Table: table,
Chain: n.outputChains[table],
Exprs: []expr.Any{
&expr.Ct{Register: 1, SourceRegister: false, Key: expr.CtKeySTATE},
&expr.Bitwise{
SourceRegister: 1,
DestRegister: 1,
Len: 4,
Mask: binaryutil.NativeEndian.PutUint32(expr.CtStateBitNEW | expr.CtStateBitRELATED),
Xor: binaryutil.NativeEndian.PutUint32(0),
},
&expr.Cmp{Op: expr.CmpOpNeq, Register: 1, Data: []byte{0, 0, 0, 0}},
&expr.Queue{
Num: n.QueueNum,
Flag: expr.QueueFlagBypass,
},
},
// rule key, to allow get it later by key
UserData: []byte(fwKey),
})
}
// apply changes
if err := n.conn.Flush(); err != nil {
return err, nil
}
return nil, nil
}
func (n *Nft) delInterceptionRules() {
n.delRulesByKey(fwKey)
}
func (n *Nft) delRulesByKey(key string) {
chains, err := n.conn.ListChains()
if err != nil {
log.Warning("nftables, error listing chains: %s", err)
return
}
commit := false
for _, c := range chains {
rules, err := n.conn.GetRule(c.Table, c)
if err != nil {
log.Warning("nftables, error listing rules (%s): %s", c.Table.Name, err)
continue
}
commit = false
for _, r := range rules {
if string(r.UserData) != key {
continue
}
// just passing the rule object doesn't work.
if err := n.conn.DelRule(&nftables.Rule{
Table: c.Table,
Chain: c,
Handle: r.Handle,
}); err != nil {
log.Warning("nftables, error adding rule to be deleted (%s/%s): %s", c.Table.Name, c.Name, err)
continue
}
commit = true
}
if commit {
if err := n.conn.Flush(); err != nil {
log.Warning("nftables, error deleting interception rules (%s/%s): %s", c.Table.Name, c.Name, err)
}
}
if rules, err := n.conn.GetRule(c.Table, c); err == nil {
if commit && len(rules) == 0 {
n.conn.DelChain(c)
n.conn.Flush()
}
}
}
return
}
opensnitch-1.5.8.1/daemon/firewall/nftables/system.go 0000664 0000000 0000000 00000002251 14401326716 0022563 0 ustar 00root root 0000000 0000000 package nftables
import (
"github.com/evilsocket/opensnitch/daemon/firewall/config"
"github.com/evilsocket/opensnitch/daemon/log"
)
// CreateSystemRule create the custom firewall chains and adds them to system.
// nft insert rule ip opensnitch-filter opensnitch-input udp dport 1153
func (n *Nft) CreateSystemRule(rule *config.FwRule, logErrors bool) {
// TODO
}
// DeleteSystemRules deletes the system rules.
// If force is false and the rule has not been previously added,
// it won't try to delete the rules. Otherwise it'll try to delete them.
func (n *Nft) DeleteSystemRules(force, logErrors bool) {
// TODO
}
// AddSystemRule inserts a new rule.
func (n *Nft) AddSystemRule(rule *config.FwRule, enable bool) (error, error) {
// TODO
return nil, nil
}
// AddSystemRules creates the system firewall from configuration
func (n *Nft) AddSystemRules() {
n.DeleteSystemRules(true, false)
for _, r := range n.SysConfig.SystemRules {
n.CreateSystemRule(r.Rule, true)
n.AddSystemRule(r.Rule, true)
}
}
// preloadConfCallback gets called before the fw configuration is reloaded
func (n *Nft) preloadConfCallback() {
n.DeleteSystemRules(true, log.GetLogLevel() == log.DEBUG)
}
opensnitch-1.5.8.1/daemon/firewall/rules.go 0000664 0000000 0000000 00000003566 14401326716 0020605 0 ustar 00root root 0000000 0000000 package firewall
import (
"github.com/evilsocket/opensnitch/daemon/firewall/config"
"github.com/evilsocket/opensnitch/daemon/firewall/iptables"
"github.com/evilsocket/opensnitch/daemon/firewall/nftables"
"github.com/evilsocket/opensnitch/daemon/log"
)
// Firewall is the interface that all firewalls (iptables, nftables) must implement.
type Firewall interface {
Init(*int)
Stop()
Name() string
IsRunning() bool
SetQueueNum(num *int)
InsertRules()
QueueDNSResponses(bool, bool) (error, error)
QueueConnections(bool, bool) (error, error)
CleanRules(bool)
AddSystemRules()
DeleteSystemRules(bool, bool)
AddSystemRule(*config.FwRule, bool) (error, error)
CreateSystemRule(*config.FwRule, bool)
}
var fw Firewall
// IsRunning returns if the firewall is running or not.
func IsRunning() bool {
return fw != nil && fw.IsRunning()
}
// CleanRules deletes the rules we added.
func CleanRules(logErrors bool) {
if fw == nil {
return
}
fw.CleanRules(logErrors)
}
// Stop deletes the firewall rules, allowing network traffic.
func Stop() {
if fw == nil {
return
}
fw.Stop()
}
// Init initializes the firewall and loads firewall rules.
func Init(fwType string, qNum *int) {
var err error
if fwType == iptables.Name {
fw, err = iptables.Fw()
if err != nil {
log.Warning("iptables not available: %s", err)
}
}
// if iptables is not installed, we can add nftables rules directly to the kernel,
// without relying on any binaries.
if fwType == nftables.Name || err != nil {
fw, err = nftables.Fw()
if err != nil {
log.Warning("nftables not available: %s", err)
}
}
if err != nil {
log.Warning("firewall error: %s, not iptables nor nftables are available or are usable. Please, report it on github.", err)
return
}
if fw == nil {
log.Error("firewall not initialized.")
return
}
fw.Stop()
fw.Init(qNum)
log.Info("Using %s firewall", fw.Name())
}
opensnitch-1.5.8.1/daemon/go.mod 0000664 0000000 0000000 00000000746 14401326716 0016422 0 ustar 00root root 0000000 0000000 module github.com/evilsocket/opensnitch/daemon
go 1.15
require (
github.com/fsnotify/fsnotify v1.4.7
github.com/golang/protobuf v1.5.0
github.com/google/gopacket v1.1.14
github.com/google/nftables v0.1.0
github.com/iovisor/gobpf v0.2.0
github.com/vishvananda/netlink v0.0.0-20210811191823-e1a867c6b452
golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1
google.golang.org/grpc v1.32.0
google.golang.org/protobuf v1.26.0
)
opensnitch-1.5.8.1/daemon/log/ 0000775 0000000 0000000 00000000000 14401326716 0016066 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/log/log.go 0000664 0000000 0000000 00000010054 14401326716 0017176 0 ustar 00root root 0000000 0000000 package log
import (
"fmt"
"os"
"strings"
"sync"
"time"
)
type Handler func(format string, args ...interface{})
// https://misc.flogisoft.com/bash/tip_colors_and_formatting
const (
BOLD = "\033[1m"
DIM = "\033[2m"
RED = "\033[31m"
GREEN = "\033[32m"
BLUE = "\033[34m"
YELLOW = "\033[33m"
FG_BLACK = "\033[30m"
FG_WHITE = "\033[97m"
BG_DGRAY = "\033[100m"
BG_RED = "\033[41m"
BG_GREEN = "\033[42m"
BG_YELLOW = "\033[43m"
BG_LBLUE = "\033[104m"
RESET = "\033[0m"
)
// log level constants
const (
DEBUG = iota
INFO
IMPORTANT
WARNING
ERROR
FATAL
)
//
var (
WithColors = true
Output = os.Stdout
StdoutFile = "/dev/stdout"
DateFormat = "2006-01-02 15:04:05"
MinLevel = INFO
mutex = &sync.RWMutex{}
labels = map[int]string{
DEBUG: "DBG",
INFO: "INF",
IMPORTANT: "IMP",
WARNING: "WAR",
ERROR: "ERR",
FATAL: "!!!",
}
colors = map[int]string{
DEBUG: DIM + FG_BLACK + BG_DGRAY,
INFO: FG_WHITE + BG_GREEN,
IMPORTANT: FG_WHITE + BG_LBLUE,
WARNING: FG_WHITE + BG_YELLOW,
ERROR: FG_WHITE + BG_RED,
FATAL: FG_WHITE + BG_RED + BOLD,
}
)
// Wrap wraps a text with effects
func Wrap(s, effect string) string {
if WithColors == true {
s = effect + s + RESET
}
return s
}
// Dim dims a text
func Dim(s string) string {
return Wrap(s, DIM)
}
// Bold bolds a text
func Bold(s string) string {
return Wrap(s, BOLD)
}
// Red reds the text
func Red(s string) string {
return Wrap(s, RED)
}
// Green greens the text
func Green(s string) string {
return Wrap(s, GREEN)
}
// Blue blues the text
func Blue(s string) string {
return Wrap(s, BLUE)
}
// Yellow yellows the text
func Yellow(s string) string {
return Wrap(s, YELLOW)
}
// Raw prints out a text without colors
func Raw(format string, args ...interface{}) {
mutex.Lock()
defer mutex.Unlock()
fmt.Fprintf(Output, format, args...)
}
// SetLogLevel sets the log level
func SetLogLevel(newLevel int) {
mutex.Lock()
defer mutex.Unlock()
MinLevel = newLevel
}
// GetLogLevel returns the current log level configured.
func GetLogLevel() int {
mutex.Lock()
defer mutex.Unlock()
return MinLevel
}
// Log prints out a text with the given color and format
func Log(level int, format string, args ...interface{}) {
mutex.Lock()
defer mutex.Unlock()
if level >= MinLevel {
label := labels[level]
color := colors[level]
when := time.Now().UTC().Format(DateFormat)
what := fmt.Sprintf(format, args...)
if strings.HasSuffix(what, "\n") == false {
what += "\n"
}
l := Dim("[%s]")
r := Wrap(" %s ", color) + " %s"
fmt.Fprintf(Output, l+" "+r, when, label, what)
}
}
func setDefaultLogOutput() {
mutex.Lock()
Output = os.Stdout
mutex.Unlock()
}
// OpenFile opens a file to print out the logs
func OpenFile(logFile string) (err error) {
if logFile == StdoutFile {
setDefaultLogOutput()
return
}
if Output, err = os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644); err != nil {
Error("Error opening log: %s %s", logFile, err)
//fallback to stdout
setDefaultLogOutput()
}
Important("Start writing logs to %s", logFile)
return err
}
// Close closes the current output file descriptor
func Close() {
if Output != os.Stdout {
Output.Close()
}
}
// Debug is the log level for debugging purposes
func Debug(format string, args ...interface{}) {
Log(DEBUG, format, args...)
}
// Info is the log level for informative messages
func Info(format string, args ...interface{}) {
Log(INFO, format, args...)
}
// Important is the log level for things that must pay attention
func Important(format string, args ...interface{}) {
Log(IMPORTANT, format, args...)
}
// Warning is the log level for non-critical errors
func Warning(format string, args ...interface{}) {
Log(WARNING, format, args...)
}
// Error is the log level for errors that should be corrected
func Error(format string, args ...interface{}) {
Log(ERROR, format, args...)
}
// Fatal is the log level for errors that must be corrected before continue
func Fatal(format string, args ...interface{}) {
Log(FATAL, format, args...)
os.Exit(1)
}
opensnitch-1.5.8.1/daemon/main.go 0000664 0000000 0000000 00000026144 14401326716 0016567 0 ustar 00root root 0000000 0000000 package main
import (
"bytes"
"context"
"flag"
"fmt"
"io/ioutil"
golog "log"
"os"
"os/signal"
"runtime"
"runtime/pprof"
"syscall"
"time"
"github.com/evilsocket/opensnitch/daemon/conman"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/dns"
"github.com/evilsocket/opensnitch/daemon/firewall"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/evilsocket/opensnitch/daemon/netfilter"
"github.com/evilsocket/opensnitch/daemon/netlink"
"github.com/evilsocket/opensnitch/daemon/procmon/monitor"
"github.com/evilsocket/opensnitch/daemon/rule"
"github.com/evilsocket/opensnitch/daemon/statistics"
"github.com/evilsocket/opensnitch/daemon/ui"
)
var (
showVersion = false
procmonMethod = ""
logFile = ""
rulesPath = "rules"
noLiveReload = false
queueNum = 0
repeatQueueNum int //will be set later to queueNum + 1
workers = 16
debug = false
warning = false
important = false
errorlog = false
uiSocket = ""
uiClient = (*ui.Client)(nil)
cpuProfile = ""
memProfile = ""
ctx = (context.Context)(nil)
cancel = (context.CancelFunc)(nil)
err = (error)(nil)
rules = (*rule.Loader)(nil)
stats = (*statistics.Statistics)(nil)
queue = (*netfilter.Queue)(nil)
repeatPktChan = (<-chan netfilter.Packet)(nil)
pktChan = (<-chan netfilter.Packet)(nil)
wrkChan = (chan netfilter.Packet)(nil)
sigChan = (chan os.Signal)(nil)
exitChan = (chan bool)(nil)
)
func init() {
flag.BoolVar(&showVersion, "version", debug, "Show daemon version of this executable and exit.")
flag.StringVar(&procmonMethod, "process-monitor-method", procmonMethod, "How to search for processes path. Options: ftrace, audit (experimental), ebpf (experimental), proc (default)")
flag.StringVar(&uiSocket, "ui-socket", uiSocket, "Path the UI gRPC service listener (https://github.com/grpc/grpc/blob/master/doc/naming.md).")
flag.StringVar(&rulesPath, "rules-path", rulesPath, "Path to load JSON rules from.")
flag.IntVar(&queueNum, "queue-num", queueNum, "Netfilter queue number.")
flag.IntVar(&workers, "workers", workers, "Number of concurrent workers.")
flag.BoolVar(&noLiveReload, "no-live-reload", debug, "Disable rules live reloading.")
flag.StringVar(&logFile, "log-file", logFile, "Write logs to this file instead of the standard output.")
flag.BoolVar(&debug, "debug", debug, "Enable debug level logs.")
flag.BoolVar(&warning, "warning", warning, "Enable warning level logs.")
flag.BoolVar(&important, "important", important, "Enable important level logs.")
flag.BoolVar(&errorlog, "error", errorlog, "Enable error level logs.")
flag.StringVar(&cpuProfile, "cpu-profile", cpuProfile, "Write CPU profile to this file.")
flag.StringVar(&memProfile, "mem-profile", memProfile, "Write memory profile to this file.")
}
func overwriteLogging() bool {
return debug || warning || important || errorlog || logFile != ""
}
func setupLogging() {
golog.SetOutput(ioutil.Discard)
if debug {
log.SetLogLevel(log.DEBUG)
} else if warning {
log.SetLogLevel(log.WARNING)
} else if important {
log.SetLogLevel(log.IMPORTANT)
} else if errorlog {
log.SetLogLevel(log.ERROR)
} else {
log.SetLogLevel(log.INFO)
}
var logFileToUse string
if logFile == "" {
logFileToUse = log.StdoutFile
} else {
logFileToUse = logFile
}
log.Close()
if err := log.OpenFile(logFileToUse); err != nil {
log.Error("Error opening user defined log: %s %s", logFileToUse, err)
}
}
func setupSignals() {
sigChan = make(chan os.Signal, 1)
exitChan = make(chan bool, workers+1)
signal.Notify(sigChan,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT)
go func() {
sig := <-sigChan
log.Raw("\n")
log.Important("Got signal: %v", sig)
cancel()
}()
}
func worker(id int) {
log.Debug("Worker #%d started.", id)
for true {
select {
case <-ctx.Done():
goto Exit
default:
pkt, ok := <-wrkChan
if !ok {
log.Debug("worker channel closed %d", id)
goto Exit
}
onPacket(pkt)
}
}
Exit:
log.Debug("worker #%d exit", id)
}
func setupWorkers() {
log.Debug("Starting %d workers ...", workers)
// setup the workers
wrkChan = make(chan netfilter.Packet)
for i := 0; i < workers; i++ {
go worker(i)
}
}
func doCleanup(queue, repeatQueue *netfilter.Queue) {
log.Info("Cleaning up ...")
firewall.Stop()
monitor.End()
uiClient.Close()
queue.Close()
repeatQueue.Close()
if cpuProfile != "" {
pprof.StopCPUProfile()
}
if memProfile != "" {
f, err := os.Create(memProfile)
if err != nil {
fmt.Printf("Could not create memory profile: %s\n", err)
return
}
defer f.Close()
runtime.GC() // get up-to-date statistics
if err := pprof.WriteHeapProfile(f); err != nil {
fmt.Printf("Could not write memory profile: %s\n", err)
}
}
}
func onPacket(packet netfilter.Packet) {
// DNS response, just parse, track and accept.
if dns.TrackAnswers(packet.Packet) == true {
packet.SetVerdictAndMark(netfilter.NF_ACCEPT, packet.Mark)
stats.OnDNSResponse()
return
}
// Parse the connection state
con := conman.Parse(packet, uiClient.InterceptUnknown())
if con == nil {
applyDefaultAction(&packet)
return
}
// accept our own connections
if con.Process.ID == os.Getpid() {
packet.SetVerdict(netfilter.NF_ACCEPT)
return
}
// search a match in preloaded rules
r := acceptOrDeny(&packet, con)
stats.OnConnectionEvent(con, r, r == nil)
}
func applyDefaultAction(packet *netfilter.Packet) {
if uiClient.DefaultAction() == rule.Allow {
packet.SetVerdictAndMark(netfilter.NF_ACCEPT, packet.Mark)
} else {
packet.SetVerdict(netfilter.NF_DROP)
}
}
func acceptOrDeny(packet *netfilter.Packet, con *conman.Connection) *rule.Rule {
r := rules.FindFirstMatch(con)
if r == nil {
// no rule matched
// Note that as soon as we set a verdict on a packet, the next packet in the netfilter queue
// will begin to be processed even if this function hasn't yet returned
// send a request to the UI client if
// 1) connected and running and 2) we are not already asking
if uiClient.Connected() == false || uiClient.GetIsAsking() == true {
applyDefaultAction(packet)
log.Debug("UI is not running or busy, connected: %v, running: %v", uiClient.Connected(), uiClient.GetIsAsking())
return nil
}
uiClient.SetIsAsking(true)
defer uiClient.SetIsAsking(false)
// In order not to block packet processing, we send our packet to a different netfilter queue
// and then immediately pull it back out of that queue
packet.SetRequeueVerdict(uint16(repeatQueueNum))
var o bool
var pkt netfilter.Packet
// don't wait for the packet longer than 1 sec
select {
case pkt, o = <-repeatPktChan:
if !o {
log.Debug("error while receiving packet from repeatPktChan")
return nil
}
case <-time.After(1 * time.Second):
log.Debug("timed out while receiving packet from repeatPktChan")
return nil
}
//check if the pulled out packet is the same we put in
if res := bytes.Compare(packet.Packet.Data(), pkt.Packet.Data()); res != 0 {
log.Error("The packet which was requeued has changed abruptly. This should never happen. Please report this incident to the Opensnitch developers. %v %v ", packet, pkt)
return nil
}
packet = &pkt
r = uiClient.Ask(con)
if r == nil {
log.Error("Invalid rule received, applying default action")
applyDefaultAction(packet)
return nil
}
ok := false
pers := ""
action := string(r.Action)
if r.Action == rule.Allow {
action = log.Green(action)
} else {
action = log.Red(action)
}
// check if and how the rule needs to be saved
if r.Duration == rule.Always {
pers = "Saved"
// add to the loaded rules and persist on disk
if err := rules.Add(r, true); err != nil {
log.Error("Error while saving rule: %s", err)
} else {
ok = true
}
} else {
pers = "Added"
// add to the rules but do not save to disk
if err := rules.Add(r, false); err != nil {
log.Error("Error while adding rule: %s", err)
} else {
ok = true
}
}
if ok {
log.Important("%s new rule: %s if %s", pers, action, r.Operator.String())
}
}
if packet == nil {
log.Debug("Packet nil after processing rules")
return r
}
if r.Enabled == false {
applyDefaultAction(packet)
ruleName := log.Green(r.Name)
log.Info("DISABLED (%s) %s %s -> %s:%d (%s)", uiClient.DefaultAction(), log.Bold(log.Green("✔")), log.Bold(con.Process.Path), log.Bold(con.To()), con.DstPort, ruleName)
} else if r.Action == rule.Allow {
packet.SetVerdictAndMark(netfilter.NF_ACCEPT, packet.Mark)
ruleName := log.Green(r.Name)
if r.Operator.Operand == rule.OpTrue {
ruleName = log.Dim(r.Name)
}
log.Debug("%s %s -> %s:%d (%s)", log.Bold(log.Green("✔")), log.Bold(con.Process.Path), log.Bold(con.To()), con.DstPort, ruleName)
} else {
if r.Action == rule.Reject {
netlink.KillSocket(con.Protocol, con.SrcIP, con.SrcPort, con.DstIP, con.DstPort)
}
packet.SetVerdict(netfilter.NF_DROP)
log.Debug("%s %s -> %s:%d (%s)", log.Bold(log.Red("✘")), log.Bold(con.Process.Path), log.Bold(con.To()), con.DstPort, log.Red(r.Name))
}
return r
}
func main() {
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
flag.Parse()
if showVersion {
fmt.Println(core.Version)
os.Exit(0)
}
setupLogging()
if cpuProfile != "" {
if f, err := os.Create(cpuProfile); err != nil {
log.Fatal("%s", err)
} else if err := pprof.StartCPUProfile(f); err != nil {
log.Fatal("%s", err)
}
}
log.Important("Starting %s v%s", core.Name, core.Version)
rulesPath, err := core.ExpandPath(rulesPath)
if err != nil {
log.Fatal("%s", err)
}
setupSignals()
log.Info("Loading rules from %s ...", rulesPath)
if rules, err = rule.NewLoader(!noLiveReload); err != nil {
log.Fatal("%s", err)
} else if err = rules.Load(rulesPath); err != nil {
log.Fatal("%s", err)
}
stats = statistics.New(rules)
// prepare the queue
setupWorkers()
queue, err := netfilter.NewQueue(uint16(queueNum))
if err != nil {
log.Warning("Is opensnitchd already running?")
log.Fatal("Error while creating queue #%d: %s", queueNum, err)
}
pktChan = queue.Packets()
repeatQueueNum = queueNum + 1
repeatQueue, rqerr := netfilter.NewQueue(uint16(repeatQueueNum))
if rqerr != nil {
log.Warning("Is opensnitchd already running?")
log.Fatal("Error while creating queue #%d: %s", repeatQueueNum, rqerr)
}
repeatPktChan = repeatQueue.Packets()
uiClient = ui.NewClient(uiSocket, stats, rules)
stats.SetConfig(uiClient.GetStatsConfig())
// queue is ready, run firewall rules
firewall.Init(uiClient.GetFirewallType(), &queueNum)
if overwriteLogging() {
setupLogging()
}
// overwrite monitor method from configuration if the user has passed
// the option via command line.
if procmonMethod != "" {
if err := monitor.ReconfigureMonitorMethod(procmonMethod); err != nil {
log.Warning("Unable to set process monitor method via parameter: %v", err)
}
}
log.Info("Running on netfilter queue #%d ...", queueNum)
for {
select {
case <-ctx.Done():
goto Exit
case pkt, ok := <-pktChan:
if !ok {
goto Exit
}
wrkChan <- pkt
}
}
Exit:
close(wrkChan)
doCleanup(queue, repeatQueue)
os.Exit(0)
}
opensnitch-1.5.8.1/daemon/netfilter/ 0000775 0000000 0000000 00000000000 14401326716 0017301 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/netfilter/packet.go 0000664 0000000 0000000 00000002516 14401326716 0021103 0 ustar 00root root 0000000 0000000 package netfilter
import "C"
import (
"github.com/google/gopacket"
)
// packet consts
const (
IPv4 = 4
)
// Verdict holds the action to perform on a packet (NF_DROP, NF_ACCEPT, etc)
type Verdict C.uint
type VerdictContainer struct {
Verdict Verdict
Mark uint32
Packet []byte
}
// Packet holds the data of a network packet
type Packet struct {
Packet gopacket.Packet
Mark uint32
verdictChannel chan VerdictContainer
UID uint32
NetworkProtocol uint8
}
// SetVerdict emits a veredict on a packet
func (p *Packet) SetVerdict(v Verdict) {
p.verdictChannel <- VerdictContainer{Verdict: v, Packet: nil, Mark: 0}
}
// SetVerdictAndMark emits a veredict on a packet and marks it in order to not
// analyze it again.
func (p *Packet) SetVerdictAndMark(v Verdict, mark uint32) {
p.verdictChannel <- VerdictContainer{Verdict: v, Packet: nil, Mark: mark}
}
func (p *Packet) SetRequeueVerdict(newQueueId uint16) {
v := uint(NF_QUEUE)
q := (uint(newQueueId) << 16)
v = v | q
p.verdictChannel <- VerdictContainer{Verdict: Verdict(v), Packet: nil, Mark: 0}
}
func (p *Packet) SetVerdictWithPacket(v Verdict, packet []byte) {
p.verdictChannel <- VerdictContainer{Verdict: v, Packet: packet, Mark: 0}
}
// IsIPv4 returns if the packet is IPv4
func (p *Packet) IsIPv4() bool {
return p.NetworkProtocol == IPv4
}
opensnitch-1.5.8.1/daemon/netfilter/queue.c 0000664 0000000 0000000 00000000024 14401326716 0020565 0 ustar 00root root 0000000 0000000 #include "queue.h"
opensnitch-1.5.8.1/daemon/netfilter/queue.go 0000664 0000000 0000000 00000014647 14401326716 0020770 0 ustar 00root root 0000000 0000000 package netfilter
/*
#cgo pkg-config: libnetfilter_queue
#cgo CFLAGS: -Wall -I/usr/include
#cgo LDFLAGS: -L/usr/lib64/ -ldl
#include "queue.h"
*/
import "C"
import (
"fmt"
"os"
"sync"
"time"
"unsafe"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
)
const (
AF_INET = 2
AF_INET6 = 10
NF_DROP Verdict = 0
NF_ACCEPT Verdict = 1
NF_STOLEN Verdict = 2
NF_QUEUE Verdict = 3
NF_REPEAT Verdict = 4
NF_STOP Verdict = 5
NF_DEFAULT_QUEUE_SIZE uint32 = 4096
NF_DEFAULT_PACKET_SIZE uint32 = 4096
)
var (
queueIndex = make(map[uint32]*chan Packet, 0)
queueIndexLock = sync.RWMutex{}
exitChan = make(chan bool, 1)
gopacketDecodeOptions = gopacket.DecodeOptions{Lazy: true, NoCopy: true}
)
// VerdictContainerC is the struct that contains the mark, action, length and
// payload of a packet.
// It's defined in queue.h, and filled on go_callback()
type VerdictContainerC C.verdictContainer
// Queue holds the information of a netfilter queue.
// The handles of the connection to the kernel and the created queue.
// A channel where the intercepted packets will be received.
// The ID of the queue.
type Queue struct {
h *C.struct_nfq_handle
qh *C.struct_nfq_q_handle
fd C.int
packets chan Packet
idx uint32
}
// NewQueue opens a new netfilter queue to receive packets marked with a mark.
func NewQueue(queueID uint16) (q *Queue, err error) {
q = &Queue{
idx: uint32(time.Now().UnixNano()),
packets: make(chan Packet),
}
if err = q.create(queueID); err != nil {
return nil, err
} else if err = q.setup(); err != nil {
return nil, err
}
go q.run(exitChan)
return q, nil
}
func (q *Queue) create(queueID uint16) (err error) {
var ret C.int
if q.h, err = C.nfq_open(); err != nil {
return fmt.Errorf("Error opening Queue handle: %v", err)
} else if ret, err = C.nfq_unbind_pf(q.h, AF_INET); err != nil || ret < 0 {
return fmt.Errorf("Error unbinding existing q handler from AF_INET protocol family: %v", err)
} else if ret, err = C.nfq_unbind_pf(q.h, AF_INET6); err != nil || ret < 0 {
return fmt.Errorf("Error unbinding existing q handler from AF_INET6 protocol family: %v", err)
} else if ret, err := C.nfq_bind_pf(q.h, AF_INET); err != nil || ret < 0 {
return fmt.Errorf("Error binding to AF_INET protocol family: %v", err)
} else if ret, err := C.nfq_bind_pf(q.h, AF_INET6); err != nil || ret < 0 {
return fmt.Errorf("Error binding to AF_INET6 protocol family: %v", err)
} else if q.qh, err = C.CreateQueue(q.h, C.u_int16_t(queueID), C.u_int32_t(q.idx)); err != nil || q.qh == nil {
q.destroy()
return fmt.Errorf("Error binding to queue: %v", err)
}
queueIndexLock.Lock()
queueIndex[q.idx] = &q.packets
queueIndexLock.Unlock()
return nil
}
func (q *Queue) setup() (err error) {
var ret C.int
queueSize := C.u_int32_t(NF_DEFAULT_QUEUE_SIZE)
bufferSize := C.uint(NF_DEFAULT_PACKET_SIZE)
totSize := C.uint(NF_DEFAULT_QUEUE_SIZE * NF_DEFAULT_PACKET_SIZE)
if ret, err = C.nfq_set_queue_maxlen(q.qh, queueSize); err != nil || ret < 0 {
q.destroy()
return fmt.Errorf("Unable to set max packets in queue: %v", err)
} else if C.nfq_set_mode(q.qh, C.u_int8_t(2), bufferSize) < 0 {
q.destroy()
return fmt.Errorf("Unable to set packets copy mode: %v", err)
} else if q.fd, err = C.nfq_fd(q.h); err != nil {
q.destroy()
return fmt.Errorf("Unable to get queue file-descriptor. %v", err)
} else if C.nfnl_rcvbufsiz(C.nfq_nfnlh(q.h), totSize) < 0 {
q.destroy()
return fmt.Errorf("Unable to increase netfilter buffer space size")
}
return nil
}
func (q *Queue) run(exitCh chan<- bool) {
if errno := C.Run(q.h, q.fd); errno != 0 {
fmt.Fprintf(os.Stderr, "Terminating, unable to receive packet due to errno=%d", errno)
}
exitChan <- true
}
// Close ensures that nfqueue resources are freed and closed.
// C.stop_reading_packets() stops the reading packets loop, which causes
// go-subroutine run() to exit.
// After exit, listening queue is destroyed and closed.
// If for some reason any of the steps stucks while closing it, we'll exit by timeout.
func (q *Queue) Close() {
close(q.packets)
C.stop_reading_packets()
q.destroy()
queueIndexLock.Lock()
delete(queueIndex, q.idx)
queueIndexLock.Unlock()
}
func (q *Queue) destroy() {
// we'll try to exit cleanly, but sometimes nfqueue gets stuck
time.AfterFunc(5*time.Second, func() {
log.Warning("queue stuck, closing by timeout")
if q != nil {
C.close(q.fd)
q.closeNfq()
}
os.Exit(0)
})
C.nfq_unbind_pf(q.h, AF_INET)
C.nfq_unbind_pf(q.h, AF_INET6)
if q.qh != nil {
if ret := C.nfq_destroy_queue(q.qh); ret != 0 {
log.Warning("Queue.destroy(), nfq_destroy_queue() not closed: %d", ret)
}
}
q.closeNfq()
}
func (q *Queue) closeNfq() {
if q.h != nil {
if ret := C.nfq_close(q.h); ret != 0 {
log.Warning("Queue.destroy(), nfq_close() not closed: %d", ret)
}
}
}
// Packets return the list of enqueued packets.
func (q *Queue) Packets() <-chan Packet {
return q.packets
}
// FYI: the export keyword is mandatory to specify that go_callback is defined elsewhere
//export go_callback
func go_callback(queueID C.int, data *C.uchar, length C.int, mark C.uint, idx uint32, vc *VerdictContainerC, uid uint32) {
(*vc).verdict = C.uint(NF_ACCEPT)
(*vc).data = nil
(*vc).mark_set = 0
(*vc).length = 0
queueIndexLock.RLock()
queueChannel, found := queueIndex[idx]
queueIndexLock.RUnlock()
if !found {
fmt.Fprintf(os.Stderr, "Unexpected queue idx %d\n", idx)
return
}
xdata := C.GoBytes(unsafe.Pointer(data), length)
p := Packet{
verdictChannel: make(chan VerdictContainer),
Mark: uint32(mark),
UID: uid,
NetworkProtocol: xdata[0] >> 4, // first 4 bits is the version
}
var packet gopacket.Packet
if p.IsIPv4() {
packet = gopacket.NewPacket(xdata, layers.LayerTypeIPv4, gopacketDecodeOptions)
} else {
packet = gopacket.NewPacket(xdata, layers.LayerTypeIPv6, gopacketDecodeOptions)
}
p.Packet = packet
select {
case *queueChannel <- p:
select {
case v := <-p.verdictChannel:
if v.Packet == nil {
(*vc).verdict = C.uint(v.Verdict)
} else {
(*vc).verdict = C.uint(v.Verdict)
(*vc).data = (*C.uchar)(unsafe.Pointer(&v.Packet[0]))
(*vc).length = C.uint(len(v.Packet))
}
if v.Mark != 0 {
(*vc).mark_set = C.uint(1)
(*vc).mark = C.uint(v.Mark)
}
}
case <-time.After(1 * time.Millisecond):
fmt.Fprintf(os.Stderr, "Timed out while sending packet to queue channel %d\n", idx)
}
}
opensnitch-1.5.8.1/daemon/netfilter/queue.h 0000664 0000000 0000000 00000006020 14401326716 0020574 0 ustar 00root root 0000000 0000000 #ifndef _NETFILTER_QUEUE_H
#define _NETFILTER_QUEUE_H
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
typedef struct {
uint verdict;
uint mark;
uint mark_set;
uint length;
unsigned char *data;
} verdictContainer;
static void *get_uid = NULL;
extern void go_callback(int id, unsigned char* data, int len, uint mark, u_int32_t idx, verdictContainer *vc, uint32_t uid);
static uint8_t stop = 0;
static inline void configure_uid_if_available(struct nfq_q_handle *qh){
void *hndl = dlopen("libnetfilter_queue.so.1", RTLD_LAZY);
if (!hndl) {
hndl = dlopen("libnetfilter_queue.so", RTLD_LAZY);
if (!hndl){
printf("WARNING: libnetfilter_queue not available\n");
return;
}
}
if ((get_uid = dlsym(hndl, "nfq_get_uid")) == NULL){
printf("WARNING: nfq_get_uid not available\n");
return;
}
printf("OK: libnetfiler_queue supports nfq_get_uid\n");
#ifdef NFQA_CFG_F_UID_GID
if (qh != NULL && nfq_set_queue_flags(qh, NFQA_CFG_F_UID_GID, NFQA_CFG_F_UID_GID)){
printf("WARNING: UID not available on this kernel/libnetfilter_queue\n");
}
#endif
}
static int nf_callback(struct nfq_q_handle *qh, struct nfgenmsg *nfmsg, struct nfq_data *nfa, void *arg){
if (stop) {
return -1;
}
uint32_t id = -1, idx = 0, mark = 0;
struct nfqnl_msg_packet_hdr *ph = NULL;
unsigned char *buffer = NULL;
int size = 0;
verdictContainer vc = {0};
uint32_t uid = 0xffffffff;
mark = nfq_get_nfmark(nfa);
ph = nfq_get_msg_packet_hdr(nfa);
id = ntohl(ph->packet_id);
size = nfq_get_payload(nfa, &buffer);
idx = (uint32_t)((uintptr_t)arg);
#ifdef NFQA_CFG_F_UID_GID
if (get_uid)
nfq_get_uid(nfa, &uid);
#endif
go_callback(id, buffer, size, mark, idx, &vc, uid);
if( vc.mark_set == 1 ) {
return nfq_set_verdict2(qh, id, vc.verdict, vc.mark, vc.length, vc.data);
}
return nfq_set_verdict2(qh, id, vc.verdict, vc.mark, vc.length, vc.data);
}
static inline struct nfq_q_handle* CreateQueue(struct nfq_handle *h, u_int16_t queue, u_int32_t idx) {
struct nfq_q_handle* qh = nfq_create_queue(h, queue, &nf_callback, (void*)((uintptr_t)idx));
if (qh == NULL){
printf("ERROR: nfq_create_queue() queue not created\n");
} else {
configure_uid_if_available(qh);
}
return qh;
}
static inline void stop_reading_packets() {
stop = 1;
}
static inline int Run(struct nfq_handle *h, int fd) {
char buf[4096] __attribute__ ((aligned));
int rcvd, opt = 1;
setsockopt(fd, SOL_NETLINK, NETLINK_NO_ENOBUFS, &opt, sizeof(int));
while ((rcvd = recv(fd, buf, sizeof(buf), 0)) >= 0) {
if (stop == 1) {
return errno;
}
nfq_handle_packet(h, buf, rcvd);
}
return errno;
}
#endif
opensnitch-1.5.8.1/daemon/netlink/ 0000775 0000000 0000000 00000000000 14401326716 0016751 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/netlink/socket.go 0000664 0000000 0000000 00000013212 14401326716 0020567 0 ustar 00root root 0000000 0000000 package netlink
import (
"fmt"
"net"
"strconv"
"syscall"
"github.com/evilsocket/opensnitch/daemon/log"
)
// GetSocketInfo asks the kernel via netlink for a given connection.
// If the connection is found, we return the uid and the possible
// associated inodes.
// If the outgoing connection is not found but there're entries with the source
// port and same protocol, add all the inodes to the list.
//
// Some examples:
// outgoing connection as seen by netfilter || connection details dumped from kernel
//
// 47344:192.168.1.106 -> 151.101.65.140:443 || in kernel: 47344:192.168.1.106 -> 151.101.65.140:443
// 8612:192.168.1.5 -> 192.168.1.255:8612 || in kernel: 8612:192.168.1.105 -> 0.0.0.0:0
// 123:192.168.1.5 -> 217.144.138.234:123 || in kernel: 123:0.0.0.0 -> 0.0.0.0:0
// 45015:127.0.0.1 -> 239.255.255.250:1900 || in kernel: 45015:127.0.0.1 -> 0.0.0.0:0
// 50416:fe80::9fc2:ddcf:df22:aa50 -> fe80::1:53 || in kernel: 50416:254.128.0.0 -> 254.128.0.0:53
// 51413:192.168.1.106 -> 103.224.182.250:1337 || in kernel: 51413:0.0.0.0 -> 0.0.0.0:0
func GetSocketInfo(proto string, srcIP net.IP, srcPort uint, dstIP net.IP, dstPort uint) (uid int, inodes []int) {
uid = -1
family := uint8(syscall.AF_INET)
ipproto := uint8(syscall.IPPROTO_TCP)
protoLen := len(proto)
if proto[protoLen-1:protoLen] == "6" {
family = syscall.AF_INET6
}
if proto[:3] == "udp" {
ipproto = syscall.IPPROTO_UDP
if protoLen >= 7 && proto[:7] == "udplite" {
ipproto = syscall.IPPROTO_UDPLITE
}
}
if sockList, err := SocketGet(family, ipproto, uint16(srcPort), uint16(dstPort), srcIP, dstIP); err == nil {
for n, sock := range sockList {
if sock.UID != 0xffffffff {
uid = int(sock.UID)
}
log.Debug("[%d/%d] outgoing connection uid: %d, %d:%v -> %v:%d || netlink response: %d:%v -> %v:%d inode: %d - loopback: %v multicast: %v unspecified: %v linklocalunicast: %v ifaceLocalMulticast: %v GlobalUni: %v ",
n, len(sockList),
int(sock.UID),
srcPort, srcIP, dstIP, dstPort,
sock.ID.SourcePort, sock.ID.Source,
sock.ID.Destination, sock.ID.DestinationPort, sock.INode,
sock.ID.Destination.IsLoopback(),
sock.ID.Destination.IsMulticast(),
sock.ID.Destination.IsUnspecified(),
sock.ID.Destination.IsLinkLocalUnicast(),
sock.ID.Destination.IsLinkLocalMulticast(),
sock.ID.Destination.IsGlobalUnicast(),
)
if sock.ID.SourcePort == uint16(srcPort) && sock.ID.Source.Equal(srcIP) &&
(sock.ID.DestinationPort == uint16(dstPort)) &&
((sock.ID.Destination.IsGlobalUnicast() || sock.ID.Destination.IsLoopback()) && sock.ID.Destination.Equal(dstIP)) {
inodes = append([]int{int(sock.INode)}, inodes...)
continue
}
log.Debug("GetSocketInfo() invalid: %d:%v -> %v:%d", sock.ID.SourcePort, sock.ID.Source, sock.ID.Destination, sock.ID.DestinationPort)
}
// handle special cases (see function description): ntp queries (123), broadcasts, incomming connections.
if len(inodes) == 0 && len(sockList) > 0 {
for n, sock := range sockList {
if sockList[n].ID.Destination.Equal(net.IPv4zero) || sockList[n].ID.Destination.Equal(net.IPv6zero) {
inodes = append([]int{int(sock.INode)}, inodes...)
log.Debug("netlink socket not found, adding entry: %d:%v -> %v:%d || %d:%v -> %v:%d inode: %d state: %s",
srcPort, srcIP, dstIP, dstPort,
sockList[n].ID.SourcePort, sockList[n].ID.Source,
sockList[n].ID.Destination, sockList[n].ID.DestinationPort,
sockList[n].INode, TCPStatesMap[sock.State])
} else if sock.ID.SourcePort == uint16(srcPort) && sock.ID.Source.Equal(srcIP) &&
(sock.ID.DestinationPort == uint16(dstPort)) {
inodes = append([]int{int(sock.INode)}, inodes...)
continue
} else {
log.Debug("netlink socket not found, EXCLUDING entry: %d:%v -> %v:%d || %d:%v -> %v:%d inode: %d state: %s",
srcPort, srcIP, dstIP, dstPort,
sockList[n].ID.SourcePort, sockList[n].ID.Source,
sockList[n].ID.Destination, sockList[n].ID.DestinationPort,
sockList[n].INode, TCPStatesMap[sock.State])
}
}
}
} else {
log.Debug("netlink socket error: %v - %d:%v -> %v:%d", err, srcPort, srcIP, dstIP, dstPort)
}
return uid, inodes
}
// GetSocketInfoByInode dumps the kernel sockets table and searches the given
// inode on it.
func GetSocketInfoByInode(inodeStr string) (*Socket, error) {
inode, err := strconv.ParseUint(inodeStr, 10, 32)
if err != nil {
return nil, err
}
type inetStruct struct{ family, proto uint8 }
socketTypes := []inetStruct{
{syscall.AF_INET, syscall.IPPROTO_TCP},
{syscall.AF_INET, syscall.IPPROTO_UDP},
{syscall.AF_INET6, syscall.IPPROTO_TCP},
{syscall.AF_INET6, syscall.IPPROTO_UDP},
}
for _, socket := range socketTypes {
socketList, err := SocketsDump(socket.family, socket.proto)
if err != nil {
return nil, err
}
for idx := range socketList {
if uint32(inode) == socketList[idx].INode {
return socketList[idx], nil
}
}
}
return nil, fmt.Errorf("Inode not found")
}
// KillSocket kills a socket given the properties of a connection.
func KillSocket(proto string, srcIP net.IP, srcPort uint, dstIP net.IP, dstPort uint) {
family := uint8(syscall.AF_INET)
ipproto := uint8(syscall.IPPROTO_TCP)
protoLen := len(proto)
if proto[protoLen-1:protoLen] == "6" {
family = syscall.AF_INET6
}
if proto[:3] == "udp" {
ipproto = syscall.IPPROTO_UDP
if protoLen >= 7 && proto[:7] == "udplite" {
ipproto = syscall.IPPROTO_UDPLITE
}
}
if sockList, err := SocketGet(family, ipproto, uint16(srcPort), uint16(dstPort), srcIP, dstIP); err == nil {
for _, s := range sockList {
if err := socketKill(family, ipproto, s.ID); err != nil {
log.Debug("Unable to kill socket: %d, %d, %v", srcPort, dstPort, err)
}
}
}
}
opensnitch-1.5.8.1/daemon/netlink/socket_linux.go 0000664 0000000 0000000 00000014546 14401326716 0022021 0 ustar 00root root 0000000 0000000 package netlink
import (
"encoding/binary"
"errors"
"fmt"
"net"
"syscall"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/vishvananda/netlink/nl"
)
// This is a modification of https://github.com/vishvananda/netlink socket_linux.go - Apache2.0 license
// which adds support for query UDP, UDPLITE and IPv6 sockets to SocketGet()
const (
SOCK_DESTROY = 21
sizeofSocketID = 0x30
sizeofSocketRequest = sizeofSocketID + 0x8
sizeofSocket = sizeofSocketID + 0x18
)
var (
native = nl.NativeEndian()
networkOrder = binary.BigEndian
TCP_ALL = uint32(0xfff)
)
// https://elixir.bootlin.com/linux/latest/source/include/net/tcp_states.h
const (
TCP_INVALID = iota
TCP_ESTABLISHED
TCP_SYN_SENT
TCP_SYN_RECV
TCP_FIN_WAIT1
TCP_FIN_WAIT2
TCP_TIME_WAIT
TCP_CLOSE
TCP_CLOSE_WAIT
TCP_LAST_ACK
TCP_LISTEN
TCP_CLOSING
TCP_NEW_SYN_REC
TCP_MAX_STATES
)
// TCPStatesMap holds the list of TCP states
var TCPStatesMap = map[uint8]string{
TCP_INVALID: "invalid",
TCP_ESTABLISHED: "established",
TCP_SYN_SENT: "syn_sent",
TCP_SYN_RECV: "syn_recv",
TCP_FIN_WAIT1: "fin_wait1",
TCP_FIN_WAIT2: "fin_wait2",
TCP_TIME_WAIT: "time_wait",
TCP_CLOSE: "close",
TCP_CLOSE_WAIT: "close_wait",
TCP_LAST_ACK: "last_ack",
TCP_LISTEN: "listen",
TCP_CLOSING: "closing",
}
// SocketID holds the socket information of a request/response to the kernel
type SocketID struct {
SourcePort uint16
DestinationPort uint16
Source net.IP
Destination net.IP
Interface uint32
Cookie [2]uint32
}
// Socket represents a netlink socket.
type Socket struct {
Family uint8
State uint8
Timer uint8
Retrans uint8
ID SocketID
Expires uint32
RQueue uint32
WQueue uint32
UID uint32
INode uint32
}
// SocketRequest holds the request/response of a connection to the kernel
type SocketRequest struct {
Family uint8
Protocol uint8
Ext uint8
pad uint8
States uint32
ID SocketID
}
type writeBuffer struct {
Bytes []byte
pos int
}
func (b *writeBuffer) Write(c byte) {
b.Bytes[b.pos] = c
b.pos++
}
func (b *writeBuffer) Next(n int) []byte {
s := b.Bytes[b.pos : b.pos+n]
b.pos += n
return s
}
// Serialize convert SocketRequest struct to bytes.
func (r *SocketRequest) Serialize() []byte {
b := writeBuffer{Bytes: make([]byte, sizeofSocketRequest)}
b.Write(r.Family)
b.Write(r.Protocol)
b.Write(r.Ext)
b.Write(r.pad)
native.PutUint32(b.Next(4), r.States)
networkOrder.PutUint16(b.Next(2), r.ID.SourcePort)
networkOrder.PutUint16(b.Next(2), r.ID.DestinationPort)
if r.Family == syscall.AF_INET6 {
copy(b.Next(16), r.ID.Source)
copy(b.Next(16), r.ID.Destination)
} else {
copy(b.Next(4), r.ID.Source.To4())
b.Next(12)
copy(b.Next(4), r.ID.Destination.To4())
b.Next(12)
}
native.PutUint32(b.Next(4), r.ID.Interface)
native.PutUint32(b.Next(4), r.ID.Cookie[0])
native.PutUint32(b.Next(4), r.ID.Cookie[1])
return b.Bytes
}
// Len returns the size of a socket request
func (r *SocketRequest) Len() int { return sizeofSocketRequest }
type readBuffer struct {
Bytes []byte
pos int
}
func (b *readBuffer) Read() byte {
c := b.Bytes[b.pos]
b.pos++
return c
}
func (b *readBuffer) Next(n int) []byte {
s := b.Bytes[b.pos : b.pos+n]
b.pos += n
return s
}
func (s *Socket) deserialize(b []byte) error {
if len(b) < sizeofSocket {
return fmt.Errorf("socket data short read (%d); want %d", len(b), sizeofSocket)
}
rb := readBuffer{Bytes: b}
s.Family = rb.Read()
s.State = rb.Read()
s.Timer = rb.Read()
s.Retrans = rb.Read()
s.ID.SourcePort = networkOrder.Uint16(rb.Next(2))
s.ID.DestinationPort = networkOrder.Uint16(rb.Next(2))
if s.Family == syscall.AF_INET6 {
s.ID.Source = net.IP(rb.Next(16))
s.ID.Destination = net.IP(rb.Next(16))
} else {
s.ID.Source = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read())
rb.Next(12)
s.ID.Destination = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read())
rb.Next(12)
}
s.ID.Interface = native.Uint32(rb.Next(4))
s.ID.Cookie[0] = native.Uint32(rb.Next(4))
s.ID.Cookie[1] = native.Uint32(rb.Next(4))
s.Expires = native.Uint32(rb.Next(4))
s.RQueue = native.Uint32(rb.Next(4))
s.WQueue = native.Uint32(rb.Next(4))
s.UID = native.Uint32(rb.Next(4))
s.INode = native.Uint32(rb.Next(4))
return nil
}
// SocketKill kills a connection
func socketKill(family, proto uint8, sockID SocketID) error {
sockReq := &SocketRequest{
Family: family,
Protocol: proto,
ID: sockID,
}
req := nl.NewNetlinkRequest(SOCK_DESTROY, syscall.NLM_F_REQUEST|syscall.NLM_F_ACK)
req.AddData(sockReq)
_, err := req.Execute(syscall.NETLINK_INET_DIAG, 0)
if err != nil {
return err
}
return nil
}
// SocketGet returns the list of active connections in the kernel
// filtered by several fields. Currently it returns connections
// filtered by source port and protocol.
func SocketGet(family uint8, proto uint8, srcPort, dstPort uint16, local, remote net.IP) ([]*Socket, error) {
_Id := SocketID{
SourcePort: srcPort,
Cookie: [2]uint32{nl.TCPDIAG_NOCOOKIE, nl.TCPDIAG_NOCOOKIE},
}
sockReq := &SocketRequest{
Family: family,
Protocol: proto,
States: TCP_ALL,
ID: _Id,
}
return netlinkRequest(sockReq, family, proto, srcPort, dstPort, local, remote)
}
// SocketsDump returns the list of all connections from the kernel
func SocketsDump(family uint8, proto uint8) ([]*Socket, error) {
sockReq := &SocketRequest{
Family: family,
Protocol: proto,
States: TCP_ALL,
}
return netlinkRequest(sockReq, 0, 0, 0, 0, nil, nil)
}
func netlinkRequest(sockReq *SocketRequest, family uint8, proto uint8, srcPort, dstPort uint16, local, remote net.IP) ([]*Socket, error) {
req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, syscall.NLM_F_DUMP)
req.AddData(sockReq)
msgs, err := req.Execute(syscall.NETLINK_INET_DIAG, 0)
if err != nil {
return nil, err
}
if len(msgs) == 0 {
return nil, errors.New("Warning, no message nor error from netlink, or no connections found")
}
var sock []*Socket
for n, m := range msgs {
s := &Socket{}
if err = s.deserialize(m); err != nil {
log.Error("[%d] netlink socket error: %s, %d:%v -> %v:%d - %d:%v -> %v:%d",
n, TCPStatesMap[s.State],
srcPort, local, remote, dstPort,
s.ID.SourcePort, s.ID.Source, s.ID.Destination, s.ID.DestinationPort)
continue
}
if s.INode == 0 {
continue
}
sock = append([]*Socket{s}, sock...)
}
return sock, err
}
opensnitch-1.5.8.1/daemon/netlink/socket_test.go 0000664 0000000 0000000 00000005646 14401326716 0021642 0 ustar 00root root 0000000 0000000 package netlink
import (
"fmt"
"net"
"os"
"strconv"
"strings"
"testing"
)
type Connection struct {
SrcIP net.IP
DstIP net.IP
Protocol string
SrcPort uint
DstPort uint
OutConn net.Conn
Listener net.Listener
}
func EstablishConnection(proto, dst string) (net.Conn, error) {
c, err := net.Dial(proto, dst)
if err != nil {
fmt.Println(err)
return nil, err
}
return c, nil
}
func ListenOnPort(proto, port string) (net.Listener, error) {
// TODO: UDP -> ListenUDP() or ListenPacket()
l, err := net.Listen(proto, port)
if err != nil {
fmt.Println(err)
return nil, err
}
return l, nil
}
func setupConnection(proto string, connChan chan *Connection) {
listnr, _ := ListenOnPort(proto, "127.0.0.1:55555")
conn, err := EstablishConnection(proto, "127.0.0.1:55555")
if err != nil {
connChan <- nil
return
}
laddr := strings.Split(conn.LocalAddr().String(), ":")
daddr := strings.Split(conn.RemoteAddr().String(), ":")
sport, _ := strconv.Atoi(laddr[1])
dport, _ := strconv.Atoi(daddr[1])
lconn := &Connection{
SrcPort: uint(sport),
DstPort: uint(dport),
SrcIP: net.ParseIP(laddr[0]),
DstIP: net.ParseIP(daddr[0]),
Protocol: "tcp",
Listener: listnr,
OutConn: conn,
}
connChan <- lconn
}
// TestNetlinkQueries tests queries to the kernel to get the inode of a connection.
// When using ProcFS as monitor method, we need that value to get the PID of an application.
// We also need it if for any reason auditd or ebpf doesn't return the PID of the application.
// TODO: test all the cases described in the GetSocketInfo() description.
func TestNetlinkTCPQueries(t *testing.T) {
// netlink tests disabled by default, they cause random failures on restricted
// environments.
if os.Getenv("NETLINK_TESTS") == "" {
t.Skip("Skipping netlink tests. Use NETLINK_TESTS=1 to launch these tests.")
}
connChan := make(chan *Connection)
go setupConnection("tcp", connChan)
conn := <-connChan
if conn == nil {
t.Error("TestParseTCPConnection, conn nil")
}
var inodes []int
uid := -1
t.Run("Test GetSocketInfo", func(t *testing.T) {
uid, inodes = GetSocketInfo("tcp", conn.SrcIP, conn.SrcPort, conn.DstIP, conn.DstPort)
if len(inodes) == 0 {
t.Error("inodes empty")
}
if uid != os.Getuid() {
t.Error("GetSocketInfo UID error:", uid, os.Getuid())
}
})
t.Run("Test GetSocketInfoByInode", func(t *testing.T) {
socket, err := GetSocketInfoByInode(fmt.Sprint(inodes[0]))
if err != nil {
t.Error("GetSocketInfoByInode error:", err)
}
if socket == nil {
t.Error("GetSocketInfoByInode inode not found")
}
if socket.ID.SourcePort != uint16(conn.SrcPort) {
t.Error("GetSocketInfoByInode dstPort error:", socket)
}
if socket.ID.DestinationPort != uint16(conn.DstPort) {
t.Error("GetSocketInfoByInode dstPort error:", socket)
}
if socket.UID != uint32(os.Getuid()) {
t.Error("GetSocketInfoByInode UID error:", socket, os.Getuid())
}
})
conn.Listener.Close()
}
opensnitch-1.5.8.1/daemon/netstat/ 0000775 0000000 0000000 00000000000 14401326716 0016767 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/netstat/entry.go 0000664 0000000 0000000 00000001404 14401326716 0020456 0 ustar 00root root 0000000 0000000 package netstat
import (
"net"
)
// Entry holds the information of a /proc/net/* entry.
// For example, /proc/net/tcp:
// sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
// 0: 0100007F:13AD 00000000:0000 0A 00000000:00000000 00:00000000 00000000 1000 0 18083222
type Entry struct {
Proto string
SrcIP net.IP
SrcPort uint
DstIP net.IP
DstPort uint
UserId int
INode int
}
// NewEntry creates a new entry with values from /proc/net/
func NewEntry(proto string, srcIP net.IP, srcPort uint, dstIP net.IP, dstPort uint, userId int, iNode int) Entry {
return Entry{
Proto: proto,
SrcIP: srcIP,
SrcPort: srcPort,
DstIP: dstIP,
DstPort: dstPort,
UserId: userId,
INode: iNode,
}
}
opensnitch-1.5.8.1/daemon/netstat/find.go 0000664 0000000 0000000 00000002462 14401326716 0020242 0 ustar 00root root 0000000 0000000 package netstat
import (
"net"
"strings"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/log"
)
// FindEntry looks for the connection in the list of known connections in ProcFS.
func FindEntry(proto string, srcIP net.IP, srcPort uint, dstIP net.IP, dstPort uint) *Entry {
if entry := findEntryForProtocol(proto, srcIP, srcPort, dstIP, dstPort); entry != nil {
return entry
}
ipv6Suffix := "6"
if core.IPv6Enabled && strings.HasSuffix(proto, ipv6Suffix) == false {
otherProto := proto + ipv6Suffix
log.Debug("Searching for %s netstat entry instead of %s", otherProto, proto)
if entry := findEntryForProtocol(otherProto, srcIP, srcPort, dstIP, dstPort); entry != nil {
return entry
}
}
return &Entry{
Proto: proto,
SrcIP: srcIP,
SrcPort: srcPort,
DstIP: dstIP,
DstPort: dstPort,
UserId: -1,
INode: -1,
}
}
func findEntryForProtocol(proto string, srcIP net.IP, srcPort uint, dstIP net.IP, dstPort uint) *Entry {
entries, err := Parse(proto)
if err != nil {
log.Warning("Error while searching for %s netstat entry: %s", proto, err)
return nil
}
for _, entry := range entries {
if srcIP.Equal(entry.SrcIP) && srcPort == entry.SrcPort && dstIP.Equal(entry.DstIP) && dstPort == entry.DstPort {
return &entry
}
}
return nil
}
opensnitch-1.5.8.1/daemon/netstat/parse.go 0000664 0000000 0000000 00000005254 14401326716 0020436 0 ustar 00root root 0000000 0000000 package netstat
import (
"bufio"
"encoding/binary"
"fmt"
"net"
"os"
"regexp"
"strconv"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/log"
)
var (
parser = regexp.MustCompile(`(?i)` +
`\d+:\s+` + // sl
`([a-f0-9]{8,32}):([a-f0-9]{4})\s+` + // local_address
`([a-f0-9]{8,32}):([a-f0-9]{4})\s+` + // rem_address
`[a-f0-9]{2}\s+` + // st
`[a-f0-9]{8}:[a-f0-9]{8}\s+` + // tx_queue rx_queue
`[a-f0-9]{2}:[a-f0-9]{8}\s+` + // tr tm->when
`[a-f0-9]{8}\s+` + // retrnsmt
`(\d+)\s+` + // uid
`\d+\s+` + // timeout
`(\d+)\s+` + // inode
`.+`) // stuff we don't care about
)
func decToInt(n string) int {
d, err := strconv.ParseInt(n, 10, 64)
if err != nil {
log.Fatal("Error while parsing %s to int: %s", n, err)
}
return int(d)
}
func hexToInt(h string) uint {
d, err := strconv.ParseUint(h, 16, 64)
if err != nil {
log.Fatal("Error while parsing %s to int: %s", h, err)
}
return uint(d)
}
func hexToInt2(h string) (uint, uint) {
if len(h) > 16 {
d, err := strconv.ParseUint(h[:16], 16, 64)
if err != nil {
log.Fatal("Error while parsing %s to int: %s", h[16:], err)
}
d2, err := strconv.ParseUint(h[16:], 16, 64)
if err != nil {
log.Fatal("Error while parsing %s to int: %s", h[16:], err)
}
return uint(d), uint(d2)
}
d, err := strconv.ParseUint(h, 16, 64)
if err != nil {
log.Fatal("Error while parsing %s to int: %s", h[16:], err)
}
return uint(d), 0
}
func hexToIP(h string) net.IP {
n, m := hexToInt2(h)
var ip net.IP
if m != 0 {
ip = make(net.IP, 16)
// TODO: Check if this depends on machine endianness?
binary.LittleEndian.PutUint32(ip, uint32(n>>32))
binary.LittleEndian.PutUint32(ip[4:], uint32(n))
binary.LittleEndian.PutUint32(ip[8:], uint32(m>>32))
binary.LittleEndian.PutUint32(ip[12:], uint32(m))
} else {
ip = make(net.IP, 4)
binary.LittleEndian.PutUint32(ip, uint32(n))
}
return ip
}
// Parse scans and retrieves the opened connections, from /proc/net/ files
func Parse(proto string) ([]Entry, error) {
filename := fmt.Sprintf("/proc/net/%s", proto)
fd, err := os.Open(filename)
if err != nil {
return nil, err
}
defer fd.Close()
entries := make([]Entry, 0)
scanner := bufio.NewScanner(fd)
for lineno := 0; scanner.Scan(); lineno++ {
// skip column names
if lineno == 0 {
continue
}
line := core.Trim(scanner.Text())
m := parser.FindStringSubmatch(line)
if m == nil {
log.Warning("Could not parse netstat line from %s: %s", filename, line)
continue
}
entries = append(entries, NewEntry(
proto,
hexToIP(m[1]),
hexToInt(m[2]),
hexToIP(m[3]),
hexToInt(m[4]),
decToInt(m[5]),
decToInt(m[6]),
))
}
return entries, nil
}
opensnitch-1.5.8.1/daemon/opensnitch.spec 0000664 0000000 0000000 00000005312 14401326716 0020334 0 ustar 00root root 0000000 0000000 Name: opensnitch
Version: 1.5.8
Release: 1%{?dist}
Summary: OpenSnitch is a GNU/Linux application firewall
License: GPLv3+
URL: https://github.com/evilsocket/%{name}
Source0: https://github.com/evilsocket/%{name}/releases/download/v%{version}/%{name}_%{version}.orig.tar.gz
#BuildArch: x86_64
#BuildRequires: godep
Requires(post): info
Requires(preun): info
%description
Whenever a program makes a connection, it'll prompt the user to allow or deny
it.
The user can decide if block the outgoing connection based on properties of
the connection: by port, by uid, by dst ip, by program or a combination
of them.
These rules can last forever, until the app restart or just one time.
The GUI allows the user to view live outgoing connections, as well as search
by process, user, host or port.
%prep
rm -rf %{buildroot}
%setup
%build
mkdir -p go/src/github.com/evilsocket
ln -s $(pwd) go/src/github.com/evilsocket/opensnitch
export GOPATH=$(pwd)/go
cd go/src/github.com/evilsocket/opensnitch/
make protocol
cd go/src/github.com/evilsocket/opensnitch/daemon/
go mod vendor
go build -o opensnitchd .
%install
mkdir -p %{buildroot}/usr/bin/ %{buildroot}/usr/lib/systemd/system/ %{buildroot}/etc/opensnitchd/rules %{buildroot}/etc/logrotate.d
sed -i 's/\/usr\/local/\/usr/' daemon/opensnitchd.service
install -m 755 daemon/opensnitchd %{buildroot}/usr/bin/opensnitchd
install -m 644 daemon/opensnitchd.service %{buildroot}/usr/lib/systemd/system/opensnitch.service
install -m 644 debian/opensnitch.logrotate %{buildroot}/etc/logrotate.d/opensnitch
B=""
if [ -f /etc/opensnitchd/default-config.json ]; then
B="-b"
fi
install -m 644 -b $B daemon/default-config.json %{buildroot}/etc/opensnitchd/default-config.json
B=""
if [ -f /etc/opensnitchd/system-fw.json ]; then
B="-b"
fi
install -m 644 -b $B daemon/system-fw.json %{buildroot}/etc/opensnitchd/system-fw.json
install -m 644 ebpf_prog/opensnitch.o %{buildroot}/etc/opensnitchd/opensnitch.o
# upgrade, uninstall
%preun
systemctl stop opensnitch.service || true
%post
if [ $1 -eq 1 ]; then
systemctl enable opensnitch.service
fi
systemctl start opensnitch.service
# uninstall,upgrade
%postun
if [ $1 -eq 0 ]; then
systemctl disable opensnitch.service
fi
if [ $1 -eq 0 -a -f /etc/logrotate.d/opensnitch ]; then
rm /etc/logrotate.d/opensnitch
fi
# postun is the last step after reinstalling
if [ $1 -eq 1 ]; then
systemctl start opensnitch.service
fi
%clean
rm -rf %{buildroot}
%files
%{_bindir}/opensnitchd
/usr/lib/systemd/system/opensnitch.service
%{_sysconfdir}/opensnitchd/default-config.json
%{_sysconfdir}/opensnitchd/system-fw.json
%{_sysconfdir}/opensnitchd/opensnitch.o
%{_sysconfdir}/logrotate.d/opensnitch
opensnitch-1.5.8.1/daemon/opensnitchd.service 0000664 0000000 0000000 00000000673 14401326716 0021213 0 ustar 00root root 0000000 0000000 [Unit]
Description=OpenSnitch is a GNU/Linux port of the Little Snitch application firewall.
Documentation=https://github.com/gustavo-iniguez-goya/opensnitch/wiki
Wants=network.target
After=network.target
[Service]
Type=simple
PermissionsStartOnly=true
ExecStartPre=/bin/mkdir -p /etc/opensnitchd/rules
ExecStart=/usr/local/bin/opensnitchd -rules-path /etc/opensnitchd/rules
Restart=always
RestartSec=30
[Install]
WantedBy=multi-user.target
opensnitch-1.5.8.1/daemon/procmon/ 0000775 0000000 0000000 00000000000 14401326716 0016762 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/procmon/activepids.go 0000664 0000000 0000000 00000004604 14401326716 0021450 0 ustar 00root root 0000000 0000000 package procmon
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
"sync"
"time"
"github.com/evilsocket/opensnitch/daemon/log"
)
type value struct {
Process *Process
//Starttime uniquely identifies a process, it is the 22nd value in /proc//stat
//if another process starts with the same PID, it's Starttime will be unique
Starttime uint64
}
var (
activePids = make(map[uint64]value)
activePidsLock = sync.RWMutex{}
)
//MonitorActivePids checks that each process in activePids
//is still running and if not running (or another process with the same pid is running),
//removes the pid from activePids
func MonitorActivePids() {
for {
time.Sleep(time.Second)
activePidsLock.Lock()
for k, v := range activePids {
data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/stat", k))
if err != nil {
//file does not exists, pid has quit
delete(activePids, k)
pidsCache.delete(int(k))
continue
}
startTime, err := strconv.ParseInt(strings.Split(string(data), " ")[21], 10, 64)
if err != nil {
log.Error("Could not find or convert Starttime. This should never happen. Please report this incident to the Opensnitch developers: %v", err)
delete(activePids, k)
pidsCache.delete(int(k))
continue
}
if uint64(startTime) != v.Starttime {
//extremely unlikely: the original process has quit and another process
//was started with the same PID - all this in less than 1 second
log.Error("Same PID but different Starttime. Please report this incident to the Opensnitch developers.")
delete(activePids, k)
pidsCache.delete(int(k))
continue
}
}
activePidsLock.Unlock()
}
}
func findProcessInActivePidsCache(pid uint64) *Process {
activePidsLock.Lock()
defer activePidsLock.Unlock()
if value, ok := activePids[pid]; ok {
return value.Process
}
return nil
}
func addToActivePidsCache(pid uint64, proc *Process) {
data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/stat", pid))
if err != nil {
//most likely the process has quit by now
return
}
startTime, err2 := strconv.ParseInt(strings.Split(string(data), " ")[21], 10, 64)
if err2 != nil {
log.Error("Could not find or convert Starttime. This should never happen. Please report this incident to the Opensnitch developers: %v", err)
return
}
activePidsLock.Lock()
activePids[pid] = value{
Process: proc,
Starttime: uint64(startTime),
}
activePidsLock.Unlock()
}
opensnitch-1.5.8.1/daemon/procmon/activepids_test.go 0000664 0000000 0000000 00000006033 14401326716 0022505 0 ustar 00root root 0000000 0000000 package procmon
import (
"fmt"
"math/rand"
"os"
"os/exec"
"syscall"
"testing"
"time"
)
//TestMonitorActivePids starts helper processes, adds them to activePids
//and then kills them and checks if monitorActivePids() removed the killed processes
//from activePids
func TestMonitorActivePids(t *testing.T) {
if os.Getenv("helperBinaryMode") == "on" {
//we are in the "helper binary" mode, we were started with helperCmd.Start() (see below)
//do nothing, just wait to be killed
time.Sleep(time.Second * 10)
os.Exit(1) //will never get here; but keep it here just in case
}
//we are in a normal "go test" mode
tmpDir := "/tmp/ostest_" + randString()
os.Mkdir(tmpDir, 0777)
fmt.Println("tmp dir", tmpDir)
defer os.RemoveAll(tmpDir)
go MonitorActivePids()
//build a "helper binary" with "go test -c -o /tmp/path" and put it into a tmp dir
helperBinaryPath := tmpDir + "/helper1"
goExecutable, _ := exec.LookPath("go")
cmd := exec.Command(goExecutable, "test", "-c", "-o", helperBinaryPath)
if err := cmd.Run(); err != nil {
t.Error("Error running go test -c", err)
}
var numberOfHelpers = 5
var helperProcs []*Process
//start helper binaries
for i := 0; i < numberOfHelpers; i++ {
var helperCmd *exec.Cmd
helperCmd = &exec.Cmd{
Path: helperBinaryPath,
Args: []string{helperBinaryPath},
Env: []string{"helperBinaryMode=on"},
}
if err := helperCmd.Start(); err != nil {
t.Error("Error starting helper binary", err)
}
go func() {
helperCmd.Wait() //must Wait(), otherwise the helper process becomes a zombie when kill()ed
}()
pid := helperCmd.Process.Pid
proc := NewProcess(pid, helperBinaryPath)
helperProcs = append(helperProcs, proc)
addToActivePidsCache(uint64(pid), proc)
}
//sleep to make sure all processes started before we proceed
time.Sleep(time.Second * 1)
//make sure all PIDS are in the cache
for i := 0; i < numberOfHelpers; i++ {
proc := helperProcs[i]
pid := proc.ID
foundProc := findProcessInActivePidsCache(uint64(pid))
if foundProc == nil {
t.Error("PID not found among active processes", pid)
}
if proc.Path != foundProc.Path || proc.ID != foundProc.ID {
t.Error("PID or path doesn't match with the found process")
}
}
//kill all helpers except for one
for i := 0; i < numberOfHelpers-1; i++ {
if err := syscall.Kill(helperProcs[i].ID, syscall.SIGTERM); err != nil {
t.Error("error in syscall.Kill", err)
}
}
//give the cache time to remove killed processes
time.Sleep(time.Second * 1)
//make sure only the alive process is in the cache
foundProc := findProcessInActivePidsCache(uint64(helperProcs[numberOfHelpers-1].ID))
if foundProc == nil {
t.Error("last alive PID is not found among active processes", foundProc)
}
if len(activePids) != 1 {
t.Error("more than 1 active PIDs left in cache")
}
}
func randString() string {
rand.Seed(time.Now().UnixNano())
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
b := make([]rune, 10)
for i := range b {
b[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(b)
}
opensnitch-1.5.8.1/daemon/procmon/audit/ 0000775 0000000 0000000 00000000000 14401326716 0020070 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/procmon/audit/client.go 0000664 0000000 0000000 00000022076 14401326716 0021704 0 ustar 00root root 0000000 0000000 // Package audit reads auditd events from the builtin af_unix plugin, and parses
// the messages in order to proactively monitor pids which make connections.
// Once a connection is made and redirected to us via NFQUEUE, we
// lookup the connection inode in /proc, and add the corresponding PID with all
// the information of the process to a list of known PIDs.
//
// TODO: Prompt the user to allow/deny a connection/program as soon as it's
// started.
//
// Requisities:
// - install auditd and audispd-plugins
// - enable af_unix plugin /etc/audisp/plugins.d/af_unix.conf (active = yes)
// - auditctl -a always,exit -F arch=b64 -S socket,connect,execve -k opensnitchd
// - increase /etc/audisp/audispd.conf q_depth if there're dropped events
// - set write_logs to no if you don't need/want audit logs to be stored in the disk.
//
// read messages from the pipe to verify that it's working:
// socat unix-connect:/var/run/audispd_events stdio
//
// Audit event fields:
// https://github.com/linux-audit/audit-documentation/blob/master/specs/fields/field-dictionary.csv
// Record types:
// https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Security_Guide/sec-Audit_Record_Types.html
//
// Documentation:
// https://github.com/linux-audit/audit-documentation
package audit
import (
"bufio"
"fmt"
"io"
"net"
"os"
"runtime"
"sort"
"sync"
"time"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/log"
)
// Event represents an audit event, which in our case can be an event of type
// socket, execve, socketpair or connect.
type Event struct {
Timestamp string // audit(xxxxxxx:nnnn)
Serial string
ProcName string // comm
ProcPath string // exe
ProcCmdLine string // proctitle
ProcDir string // cwd
ProcMode string // mode
TTY string
Pid int
UID int
Gid int
PPid int
EUid int
EGid int
OUid int
OGid int
UserName string // auid
DstHost net.IP
DstPort int
NetFamily string // inet, inet6, local
Success string
INode int
Dev string
Syscall int
Exit int
EventType string
RawEvent string
LastSeen time.Time
}
// MaxEventAge is the maximum minutes an audit process can live without network activity.
const (
MaxEventAge = int(10)
)
var (
// Lock holds a mutex
Lock sync.RWMutex
ourPid = os.Getpid()
// cache of events
events []*Event
eventsCleaner *time.Ticker
eventsCleanerChan = (chan bool)(nil)
// TODO: EventChan is an output channel where incoming auditd events will be written.
// If a client opens it.
EventChan = (chan Event)(nil)
eventsExitChan = (chan bool)(nil)
auditConn net.Conn
// TODO: we may need arm arch
rule64 = []string{"exit,always", "-F", "arch=b64", "-F", fmt.Sprint("ppid!=", ourPid), "-F", fmt.Sprint("pid!=", ourPid), "-S", "socket,connect", "-k", "opensnitch"}
rule32 = []string{"exit,always", "-F", "arch=b32", "-F", fmt.Sprint("ppid!=", ourPid), "-F", fmt.Sprint("pid!=", ourPid), "-S", "socketcall", "-F", "a0=1", "-k", "opensnitch"}
audispdPath = "/var/run/audispd_events"
)
// OPENSNITCH_RULES_KEY is the mark we place on every event we are interested in.
const (
OpensnitchRulesKey = "key=\"opensnitch\""
)
// GetEvents returns the list of processes which have opened a connection.
func GetEvents() []*Event {
return events
}
// GetEventByPid returns an event given a pid.
func GetEventByPid(pid int) *Event {
Lock.RLock()
defer Lock.RUnlock()
for _, event := range events {
if pid == event.Pid {
return event
}
}
return nil
}
// sortEvents sorts received events by time and elapsed time since latest network activity.
// newest PIDs will be placed on top of the list.
func sortEvents() {
sort.Slice(events, func(i, j int) bool {
now := time.Now()
elapsedTimeT := now.Sub(events[i].LastSeen)
elapsedTimeU := now.Sub(events[j].LastSeen)
t := events[i].LastSeen.UnixNano()
u := events[j].LastSeen.UnixNano()
return t > u && elapsedTimeT < elapsedTimeU
})
}
// cleanOldEvents deletes the PIDs which do not exist or that are too old to
// live.
// We start searching from the oldest to the newest.
// If the last network activity of a PID has been greater than MaxEventAge,
// then it'll be deleted.
func cleanOldEvents() {
Lock.Lock()
defer Lock.Unlock()
for n := len(events) - 1; n >= 0; n-- {
now := time.Now()
elapsedTime := now.Sub(events[n].LastSeen)
if int(elapsedTime.Minutes()) >= MaxEventAge {
events = append(events[:n], events[n+1:]...)
continue
}
if core.Exists(fmt.Sprint("/proc/", events[n].Pid)) == false {
events = append(events[:n], events[n+1:]...)
}
}
}
func deleteEvent(pid int) {
for n := range events {
if events[n].Pid == pid || events[n].PPid == pid {
deleteEventByIndex(n)
break
}
}
}
func deleteEventByIndex(index int) {
Lock.Lock()
events = append(events[:index], events[index+1:]...)
Lock.Unlock()
}
// AddEvent adds new event to the list of PIDs which have generate network
// activity.
// If the PID is already in the list, the LastSeen field is updated, to keep
// it alive.
func AddEvent(aevent *Event) {
if aevent == nil {
return
}
Lock.Lock()
defer Lock.Unlock()
for n := 0; n < len(events); n++ {
if events[n].Pid == aevent.Pid && events[n].Syscall == aevent.Syscall {
if aevent.ProcCmdLine != "" || (aevent.ProcCmdLine == events[n].ProcCmdLine) {
events[n] = aevent
}
events[n].LastSeen = time.Now()
sortEvents()
return
}
}
aevent.LastSeen = time.Now()
events = append([]*Event{aevent}, events...)
}
// startEventsCleaner will review if the events in the cache need to be cleaned
// every 5 minutes.
func startEventsCleaner() {
for {
select {
case <-eventsCleanerChan:
goto Exit
case <-eventsCleaner.C:
cleanOldEvents()
}
}
Exit:
log.Debug("audit: cleanerRoutine stopped")
}
func addRules() bool {
r64 := append([]string{"-A"}, rule64...)
r32 := append([]string{"-A"}, rule32...)
_, err64 := core.Exec("auditctl", r64)
_, err32 := core.Exec("auditctl", r32)
if err64 == nil && err32 == nil {
return true
}
log.Error("Error adding audit rule, err32=%v, err=%v", err32, err64)
return false
}
func configureSyscalls() {
// XXX: what about a i386 process running on a x86_64 system?
if runtime.GOARCH == "386" {
syscallSOCKET = "1"
syscallCONNECT = "3"
syscallSOCKETPAIR = "8"
}
}
func deleteRules() bool {
r64 := []string{"-D", "-k", "opensnitch"}
r32 := []string{"-D", "-k", "opensnitch"}
_, err64 := core.Exec("auditctl", r64)
_, err32 := core.Exec("auditctl", r32)
if err64 == nil && err32 == nil {
return true
}
log.Error("Error deleting audit rules, err32=%v, err64=%v", err32, err64)
return false
}
func checkRules() bool {
// TODO
return true
}
func checkStatus() bool {
// TODO
return true
}
// Reader reads events from audisd af_unix pipe plugin.
// If the auditd daemon is stopped or restarted, the reader handle
// is closed, so we need to restablished the connection.
func Reader(r io.Reader, eventChan chan<- Event) {
if r == nil {
log.Error("Error reading auditd events. Is auditd running? is af_unix plugin enabled?")
return
}
reader := bufio.NewReader(r)
go startEventsCleaner()
for {
select {
case <-eventsExitChan:
goto Exit
default:
buf, _, err := reader.ReadLine()
if err != nil {
if err == io.EOF {
log.Error("AuditReader: auditd stopped, reconnecting in 30s %s", err)
if newReader, err := reconnect(); err == nil {
reader = bufio.NewReader(newReader)
log.Important("Auditd reconnected, continue reading")
}
continue
}
log.Warning("AuditReader: auditd error %s", err)
break
}
parseEvent(string(buf[0:len(buf)]), eventChan)
}
}
Exit:
log.Debug("audit.Reader() closed")
}
// StartChannel creates a channel to receive events from Audit.
// Launch audit.Reader() in a goroutine:
// go audit.Reader(c, (chan<- audit.Event)(audit.EventChan))
func StartChannel() {
EventChan = make(chan Event, 0)
}
func reconnect() (net.Conn, error) {
deleteRules()
time.Sleep(30 * time.Second)
return connect()
}
func connect() (net.Conn, error) {
addRules()
// TODO: make the unix socket path configurable
return net.Dial("unix", audispdPath)
}
// Stop stops listening for events from auditd and delete the auditd rules.
func Stop() {
if auditConn != nil {
if err := auditConn.Close(); err != nil {
log.Warning("audit.Stop() error closing socket: %v", err)
}
}
if eventsCleaner != nil {
eventsCleaner.Stop()
}
if eventsExitChan != nil {
eventsExitChan <- true
close(eventsExitChan)
}
if eventsCleanerChan != nil {
eventsCleanerChan <- true
close(eventsCleanerChan)
}
deleteRules()
if EventChan != nil {
close(EventChan)
}
}
// Start makes a new connection to the audisp af_unix socket.
func Start() (net.Conn, error) {
auditConn, err := connect()
if err != nil {
log.Error("auditd Start() connection error %v", err)
deleteRules()
return nil, err
}
configureSyscalls()
eventsCleaner = time.NewTicker(time.Minute * 5)
eventsCleanerChan = make(chan bool)
eventsExitChan = make(chan bool)
return auditConn, err
}
opensnitch-1.5.8.1/daemon/procmon/audit/parse.go 0000664 0000000 0000000 00000017246 14401326716 0021543 0 ustar 00root root 0000000 0000000 package audit
import (
"encoding/hex"
"fmt"
"net"
"regexp"
"strconv"
"strings"
)
var (
newEvent = false
netEvent = &Event{}
// RegExp for parse audit messages
// https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/security_guide/sec-understanding_audit_log_files
auditRE, _ = regexp.Compile(`([a-zA-Z0-9\-_]+)=([a-zA-Z0-9:'\-\/\"\.\,_\(\)]+)`)
rawEvent = make(map[string]string)
)
// amd64 syscalls definition
// if the platform is not amd64, it's redefined on Start()
var (
syscallSOCKET = "41"
syscallCONNECT = "42"
syscallSOCKETPAIR = "53"
syscallEXECVE = "59"
syscallSOCKETCALL = "102"
)
// /usr/include/x86_64-linux-gnu/bits/socket_type.h
const (
sockSTREAM = "1"
sockDGRAM = "2"
sockRAW = "3"
sockSEQPACKET = "5"
sockPACKET = "10"
// /usr/include/x86_64-linux-gnu/bits/socket.h
pfUNSPEC = "0"
pfLOCAL = "1" // PF_UNIX
pfINET = "2"
pfINET6 = "10"
// /etc/protocols
protoIP = "0"
protoTCP = "6"
protoUDP = "17"
)
// https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Security_Guide/sec-Audit_Record_Types.html
const (
AuditTypePROCTITLE = "type=PROCTITLE"
AuditTypeCWD = "type=CWD"
AuditTypePATH = "type=PATH"
AuditTypeEXECVE = "type=EXECVE"
AuditTypeSOCKADDR = "type=SOCKADDR"
AuditTypeSOCKETCALL = "type=SOCKETCALL"
AuditTypeEOE = "type=EOE"
)
var (
syscallSOCKETstr = fmt.Sprint("syscall=", syscallSOCKET)
syscallCONNECTstr = fmt.Sprint("syscall=", syscallCONNECT)
syscallSOCKETPAIRstr = fmt.Sprint("syscall=", syscallSOCKETPAIR)
syscallEXECVEstr = fmt.Sprint("syscall=", syscallEXECVE)
syscallSOCKETCALLstr = fmt.Sprint("syscall=", syscallSOCKETCALL)
)
// parseNetLine parses a SOCKADDR message type of the form:
// saddr string: inet6 host:2001:4860:4860::8888 serv:53
func parseNetLine(line string, decode bool) (family string, dstHost net.IP, dstPort int) {
// 0:4 - type
// 4:8 - port
// 8:16 - ip
switch family := line[0:4]; family {
// local
// case "0100":
// ipv4
case "0200":
octet2 := decodeString(line[4:8])
octet := decodeString(line[8:16])
host := fmt.Sprint(octet[0], ".", octet[1], ".", octet[2], ".", octet[3])
fmt.Printf("dest ip: %s -- %s:%s\n", line[4:8], octet2, host)
// ipv6
//case "0A00":
}
if decode == true {
line = decodeString(line)
}
pieces := strings.Split(line, " ")
family = pieces[0]
if family[:4] != "inet" {
return family, dstHost, 0
}
if len(pieces) > 1 && pieces[1][:5] == "host:" {
dstHost = net.ParseIP(strings.Split(pieces[1], "host:")[1])
}
if len(pieces) > 2 && pieces[2][:5] == "serv:" {
_dstPort, err := strconv.Atoi(strings.Split(line, "serv:")[1])
if err != nil {
dstPort = -1
} else {
dstPort = _dstPort
}
}
return family, dstHost, dstPort
}
// decodeString will try to decode a string encoded in hexadecimal.
// If the string can not be decoded, the original string will be returned.
// In that case, usually it means that it's a non-encoded string.
func decodeString(s string) string {
decoded, err := hex.DecodeString(s)
if err != nil {
return s
}
return fmt.Sprintf("%s", decoded)
}
// extractFields parsed an audit raw message, and extracts all the fields.
func extractFields(rawMessage string, newEvent *map[string]string) {
Lock.Lock()
defer Lock.Unlock()
if auditRE == nil {
newEvent = nil
return
}
fieldList := auditRE.FindAllStringSubmatch(rawMessage, -1)
if fieldList == nil {
newEvent = nil
return
}
for _, field := range fieldList {
(*newEvent)[field[1]] = field[2]
}
}
// populateEvent populates our Event from a raw parsed message.
func populateEvent(aevent *Event, eventFields *map[string]string) *Event {
if aevent == nil {
return nil
}
Lock.Lock()
defer Lock.Unlock()
for k, v := range *eventFields {
switch k {
//case "a0":
//case "a1":
//case "a2":
case "fam":
if v == "local" {
return nil
}
aevent.NetFamily = v
case "lport":
aevent.DstPort, _ = strconv.Atoi(v)
// TODO
/*case "addr":
fmt.Println("addr: ", v)
case "daddr":
fmt.Println("daddr: ", v)
case "laddr":
aevent.DstHost = net.ParseIP(v)
case "saddr":
parseNetLine(v, true)
fmt.Println("saddr:", v)
*/
case "exe":
aevent.ProcPath = strings.Trim(decodeString(v), "\"")
case "comm":
aevent.ProcName = strings.Trim(decodeString(v), "\"")
// proctitle may be truncated to 128 characters, so don't rely on it, parse /proc//instead
//case "proctitle":
// aevent.ProcCmdLine = strings.Trim(decodeString(v), "\"")
case "tty":
aevent.TTY = v
case "pid":
aevent.Pid, _ = strconv.Atoi(v)
case "ppid":
aevent.PPid, _ = strconv.Atoi(v)
case "uid":
aevent.UID, _ = strconv.Atoi(v)
case "gid":
aevent.Gid, _ = strconv.Atoi(v)
case "success":
aevent.Success = v
case "cwd":
aevent.ProcDir = strings.Trim(decodeString(v), "\"")
case "inode":
aevent.INode, _ = strconv.Atoi(v)
case "dev":
aevent.Dev = v
case "mode":
aevent.ProcMode = v
case "ouid":
aevent.OUid, _ = strconv.Atoi(v)
case "ogid":
aevent.OGid, _ = strconv.Atoi(v)
case "syscall":
aevent.Syscall, _ = strconv.Atoi(v)
case "exit":
aevent.Exit, _ = strconv.Atoi(v)
case "type":
aevent.EventType = v
case "msg":
parts := strings.Split(v[6:], ":")
aevent.Timestamp = parts[0]
aevent.Serial = parts[1][:len(parts[1])-1]
}
}
return aevent
}
// parseEvent parses an auditd event, discards the unwanted ones, and adds
// the ones we're interested in to an array.
// We're only interested in the socket,socketpair,connect and execve syscalls.
// Events from us are excluded.
//
// When we received an event, we parse and add it to the list as soon as we can.
// If the next messages of the set have additional information, we update the
// event.
func parseEvent(rawMessage string, eventChan chan<- Event) {
if newEvent == false && strings.Index(rawMessage, OpensnitchRulesKey) == -1 {
return
}
aEvent := make(map[string]string)
if strings.Index(rawMessage, syscallSOCKETstr) != -1 ||
strings.Index(rawMessage, syscallCONNECTstr) != -1 ||
strings.Index(rawMessage, syscallSOCKETPAIRstr) != -1 ||
strings.Index(rawMessage, syscallEXECVEstr) != -1 ||
strings.Index(rawMessage, syscallSOCKETCALLstr) != -1 {
extractFields(rawMessage, &aEvent)
if aEvent == nil {
return
}
newEvent = true
netEvent = &Event{}
netEvent = populateEvent(netEvent, &aEvent)
AddEvent(netEvent)
} else if newEvent == true && strings.Index(rawMessage, AuditTypePROCTITLE) != -1 {
extractFields(rawMessage, &aEvent)
if aEvent == nil {
return
}
netEvent = populateEvent(netEvent, &aEvent)
AddEvent(netEvent)
} else if newEvent == true && strings.Index(rawMessage, AuditTypeCWD) != -1 {
extractFields(rawMessage, &aEvent)
if aEvent == nil {
return
}
netEvent = populateEvent(netEvent, &aEvent)
AddEvent(netEvent)
} else if newEvent == true && strings.Index(rawMessage, AuditTypeEXECVE) != -1 {
extractFields(rawMessage, &aEvent)
if aEvent == nil {
return
}
netEvent = populateEvent(netEvent, &aEvent)
AddEvent(netEvent)
} else if newEvent == true && strings.Index(rawMessage, AuditTypePATH) != -1 {
extractFields(rawMessage, &aEvent)
if aEvent == nil {
return
}
netEvent = populateEvent(netEvent, &aEvent)
AddEvent(netEvent)
} else if newEvent == true && strings.Index(rawMessage, AuditTypeSOCKADDR) != -1 {
extractFields(rawMessage, &aEvent)
if aEvent == nil {
return
}
netEvent = populateEvent(netEvent, &aEvent)
AddEvent(netEvent)
if EventChan != nil {
eventChan <- *netEvent
}
} else if newEvent == true && strings.Index(rawMessage, AuditTypeEOE) != -1 {
newEvent = false
AddEvent(netEvent)
if EventChan != nil {
eventChan <- *netEvent
}
}
}
opensnitch-1.5.8.1/daemon/procmon/cache.go 0000664 0000000 0000000 00000016423 14401326716 0020362 0 ustar 00root root 0000000 0000000 package procmon
import (
"fmt"
"os"
"sort"
"sync"
"time"
"github.com/evilsocket/opensnitch/daemon/core"
)
// InodeItem represents an item of the InodesCache.
type InodeItem struct {
sync.RWMutex
Pid int
FdPath string
LastSeen int64
}
// ProcItem represents an item of the pidsCache
type ProcItem struct {
sync.RWMutex
Pid int
FdPath string
Descriptors []string
LastSeen int64
}
// CacheProcs holds the cache of processes that have established connections.
type CacheProcs struct {
sync.RWMutex
items []*ProcItem
}
// CacheInodes holds the cache of Inodes.
// The key is formed as follow:
// inode+srcip+srcport+dstip+dstport
type CacheInodes struct {
sync.RWMutex
items map[string]*InodeItem
}
var (
// cache of inodes, which help to not iterate over all the pidsCache and
// descriptors of /proc//fd/
// 15-50us vs 50-80ms
// we hit this cache when:
// - we've blocked a connection and the process retries it several times until it gives up,
// - or when a process timeouts connecting to an IP/domain and it retries it again,
// - or when a process resolves a domain and then connects to the IP.
inodesCache = NewCacheOfInodes()
maxTTL = 3 // maximum 3 minutes of inactivity in cache. Really rare, usually they lasts less than a minute.
// 2nd cache of already known running pids, which also saves time by
// iterating only over a few pids' descriptors, (30us-20ms vs. 50-80ms)
// since it's more likely that most of the connections will be made by the
// same (running) processes.
// The cache is ordered by time, placing in the first places those PIDs with
// active connections.
pidsCache CacheProcs
pidsDescriptorsCache = make(map[int][]string)
cacheTicker = time.NewTicker(2 * time.Minute)
)
// CacheCleanerTask checks periodically if the inodes in the cache must be removed.
func CacheCleanerTask() {
for {
select {
case <-cacheTicker.C:
inodesCache.cleanup()
}
}
}
// NewCacheOfInodes returns a new cache for inodes.
func NewCacheOfInodes() *CacheInodes {
return &CacheInodes{
items: make(map[string]*InodeItem),
}
}
//******************************************************************************
// items of the caches.
func (i *InodeItem) updateTime() {
i.Lock()
i.LastSeen = time.Now().UnixNano()
i.Unlock()
}
func (i *InodeItem) getTime() int64 {
i.RLock()
defer i.RUnlock()
return i.LastSeen
}
func (p *ProcItem) updateTime() {
p.Lock()
p.LastSeen = time.Now().UnixNano()
p.Unlock()
}
func (p *ProcItem) updateDescriptors(descriptors []string) {
p.Lock()
p.Descriptors = descriptors
p.Unlock()
}
//******************************************************************************
// cache of processes
func (c *CacheProcs) add(fdPath string, fdList []string, pid int) {
c.Lock()
defer c.Unlock()
for n := range c.items {
item := c.items[n]
if item == nil {
continue
}
if item.Pid == pid {
item.updateTime()
return
}
}
procItem := &ProcItem{
Pid: pid,
FdPath: fdPath,
Descriptors: fdList,
LastSeen: time.Now().UnixNano(),
}
c.setItems([]*ProcItem{procItem}, c.items)
}
func (c *CacheProcs) sort(pid int) {
item := c.getItem(0)
if item != nil && item.Pid == pid {
return
}
c.RLock()
defer c.RUnlock()
sort.Slice(c.items, func(i, j int) bool {
t := c.items[i].LastSeen
u := c.items[j].LastSeen
return t > u || t == u
})
}
func (c *CacheProcs) delete(pid int) {
c.Lock()
defer c.Unlock()
for n, procItem := range c.items {
if procItem.Pid == pid {
c.deleteItem(n)
inodesCache.delete(pid)
break
}
}
}
func (c *CacheProcs) deleteItem(pos int) {
nItems := len(c.items)
if pos < nItems {
c.setItems(c.items[:pos], c.items[pos+1:])
}
}
func (c *CacheProcs) setItems(newItems []*ProcItem, oldItems []*ProcItem) {
c.items = append(newItems, oldItems...)
}
func (c *CacheProcs) getItem(index int) *ProcItem {
c.RLock()
defer c.RUnlock()
if index >= len(c.items) {
return nil
}
return c.items[index]
}
func (c *CacheProcs) getItems() []*ProcItem {
return c.items
}
func (c *CacheProcs) countItems() int {
c.RLock()
defer c.RUnlock()
return len(c.items)
}
// loop over the processes that have generated connections
func (c *CacheProcs) getPid(inode int, inodeKey string, expect string) (int, int) {
c.Lock()
defer c.Unlock()
for n, procItem := range c.items {
if procItem == nil {
continue
}
if idxDesc, _ := getPidDescriptorsFromCache(procItem.FdPath, inodeKey, expect, &procItem.Descriptors, procItem.Pid); idxDesc != -1 {
procItem.updateTime()
return procItem.Pid, n
}
descriptors := lookupPidDescriptors(procItem.FdPath, procItem.Pid)
if descriptors == nil {
c.deleteItem(n)
continue
}
procItem.updateDescriptors(descriptors)
if idxDesc, _ := getPidDescriptorsFromCache(procItem.FdPath, inodeKey, expect, &descriptors, procItem.Pid); idxDesc != -1 {
procItem.updateTime()
return procItem.Pid, n
}
}
return -1, -1
}
//******************************************************************************
// cache of inodes
func (i *CacheInodes) add(key, descLink string, pid int) {
i.Lock()
defer i.Unlock()
if descLink == "" {
descLink = fmt.Sprint("/proc/", pid, "/exe")
}
i.items[key] = &InodeItem{
FdPath: descLink,
Pid: pid,
LastSeen: time.Now().UnixNano(),
}
}
func (i *CacheInodes) delete(pid int) {
i.Lock()
defer i.Unlock()
for k, inodeItem := range i.items {
if inodeItem.Pid == pid {
delete(i.items, k)
}
}
}
func (i *CacheInodes) getPid(inodeKey string) int {
if item, ok := i.isInCache(inodeKey); ok {
// sometimes the process may have disappeared at this point
if _, err := os.Lstat(item.FdPath); err == nil {
item.updateTime()
return item.Pid
}
pidsCache.delete(item.Pid)
i.delItem(inodeKey)
}
return -1
}
func (i *CacheInodes) delItem(inodeKey string) {
i.Lock()
defer i.Unlock()
delete(i.items, inodeKey)
}
func (i *CacheInodes) getItem(inodeKey string) *InodeItem {
i.RLock()
defer i.RUnlock()
return i.items[inodeKey]
}
func (i *CacheInodes) getItems() map[string]*InodeItem {
i.RLock()
defer i.RUnlock()
return i.items
}
func (i *CacheInodes) isInCache(inodeKey string) (*InodeItem, bool) {
i.RLock()
defer i.RUnlock()
if item, found := i.items[inodeKey]; found {
return item, true
}
return nil, false
}
func (i *CacheInodes) cleanup() {
now := time.Now()
i.Lock()
defer i.Unlock()
for k := range i.items {
if i.items[k] == nil {
continue
}
lastSeen := now.Sub(
time.Unix(0, i.items[k].getTime()),
)
if core.Exists(i.items[k].FdPath) == false || int(lastSeen.Minutes()) > maxTTL {
delete(i.items, k)
}
}
}
func getPidDescriptorsFromCache(fdPath, inodeKey, expect string, descriptors *[]string, pid int) (int, *[]string) {
for fdIdx := 0; fdIdx < len(*descriptors); fdIdx++ {
descLink := fmt.Sprint(fdPath, (*descriptors)[fdIdx])
if link, err := os.Readlink(descLink); err == nil && link == expect {
if fdIdx > 0 {
// reordering helps to reduce look up times by a factor of 10.
fd := (*descriptors)[fdIdx]
*descriptors = append((*descriptors)[:fdIdx], (*descriptors)[fdIdx+1:]...)
*descriptors = append([]string{fd}, *descriptors...)
}
if _, ok := inodesCache.isInCache(inodeKey); ok {
inodesCache.add(inodeKey, descLink, pid)
}
return fdIdx, descriptors
}
}
return -1, descriptors
}
opensnitch-1.5.8.1/daemon/procmon/cache_test.go 0000664 0000000 0000000 00000006605 14401326716 0021422 0 ustar 00root root 0000000 0000000 package procmon
import (
"fmt"
"testing"
"time"
)
func TestCacheProcs(t *testing.T) {
fdList := []string{"0", "1", "2"}
pidsCache.add(fmt.Sprint("/proc/", myPid, "/fd/"), fdList, myPid)
t.Log("Pids in cache: ", pidsCache.countItems())
t.Run("Test addProcEntry", func(t *testing.T) {
if pidsCache.countItems() != 1 {
t.Error("pidsCache should be 1")
}
})
oldPid := pidsCache.getItem(0)
pidsCache.add(fmt.Sprint("/proc/", myPid, "/fd/"), fdList, myPid)
t.Run("Test addProcEntry update", func(t *testing.T) {
if pidsCache.countItems() != 1 {
t.Error("pidsCache should still be 1!", pidsCache)
}
oldTime := time.Unix(0, oldPid.LastSeen)
newTime := time.Unix(0, pidsCache.getItem(0).LastSeen)
if oldTime.Equal(newTime) == false {
t.Error("pidsCache, time not updated: ", oldTime, newTime)
}
})
pidsCache.add("/proc/2/fd", fdList, 2)
pidsCache.delete(2)
t.Run("Test deleteProcEntry", func(t *testing.T) {
if pidsCache.countItems() != 1 {
t.Error("pidsCache should be 1:", pidsCache.countItems())
}
})
pid, _ := pidsCache.getPid(0, "", "/dev/null")
t.Run("Test getPidFromCache", func(t *testing.T) {
if pid != myPid {
t.Error("pid not found in cache", pidsCache.countItems())
}
})
// should not crash, and the number of items should still be 1
pidsCache.deleteItem(1)
t.Run("Test deleteItem check bounds", func(t *testing.T) {
if pidsCache.countItems() != 1 {
t.Error("deleteItem check bounds error", pidsCache.countItems())
}
})
pidsCache.deleteItem(0)
t.Run("Test deleteItem", func(t *testing.T) {
if pidsCache.countItems() != 0 {
t.Error("deleteItem error", pidsCache.countItems())
}
})
t.Log("items in cache:", pidsCache.countItems())
// the key of an inodeCache entry is formed as: inodeNumer + srcIP + srcPort + dstIP + dstPort
inodeKey := "000000000127.0.0.144444127.0.0.153"
// add() expects a path to the inode fd (/proc//fd/12345), but as getPid() will check the path in order to retrieve the pid,
// we just set it to "" and it'll use /proc//exe
inodesCache.add(inodeKey, "", myPid)
t.Run("Test addInodeEntry", func(t *testing.T) {
if _, found := inodesCache.items[inodeKey]; !found {
t.Error("inodesCache, inode not added:", len(inodesCache.items), inodesCache.items)
}
})
pid = inodesCache.getPid(inodeKey)
t.Run("Test getPidByInodeFromCache", func(t *testing.T) {
if pid != myPid {
t.Error("inode not found in cache", pid, inodeKey, len(inodesCache.items), inodesCache.items)
}
})
// should delete all inodes of a pid
inodesCache.delete(myPid)
t.Run("Test deleteInodeEntry", func(t *testing.T) {
if _, found := inodesCache.items[inodeKey]; found {
t.Error("inodesCache, key found in cache but it should not exist", inodeKey, len(inodesCache.items), inodesCache.items)
}
})
}
// Test getPidDescriptorsFromCache descriptors (inodes) reordering.
// When an inode (descriptor) is found, if it's pushed to the top of the list,
// the next time we look for it will cost -10x.
// Without reordering, the inode 0 will always be found on the 10th position,
// taking an average of 100us instead of 30.
// Benchmark results with reordering: ~5600ns/op, without: ~56000ns/op.
func BenchmarkGetPid(b *testing.B) {
fdList := []string{"10", "9", "8", "7", "6", "5", "4", "3", "2", "1", "0"}
pidsCache.add(fmt.Sprint("/proc/", myPid, "/fd/"), fdList, myPid)
for i := 0; i < b.N; i++ {
pidsCache.getPid(0, "", "/dev/null")
}
}
opensnitch-1.5.8.1/daemon/procmon/details.go 0000664 0000000 0000000 00000011244 14401326716 0020740 0 ustar 00root root 0000000 0000000 package procmon
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"regexp"
"strconv"
"strings"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/dns"
"github.com/evilsocket/opensnitch/daemon/netlink"
)
var socketsRegex, _ = regexp.Compile(`socket:\[([0-9]+)\]`)
// GetInfo collects information of a process.
func (p *Process) GetInfo() error {
if err := p.readPath(); err != nil {
return err
}
p.readCwd()
p.readCmdline()
p.readEnv()
p.readDescriptors()
p.readIOStats()
p.readStatus()
p.cleanPath()
return nil
}
func (p *Process) setCwd(cwd string) {
p.CWD = cwd
}
func (p *Process) readComm() error {
data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/comm", p.ID))
if err != nil {
return err
}
p.Comm = core.Trim(string(data))
return nil
}
func (p *Process) readCwd() error {
link, err := os.Readlink(fmt.Sprintf("/proc/%d/cwd", p.ID))
if err != nil {
return err
}
p.CWD = link
return nil
}
// read and parse environment variables of a process.
func (p *Process) readEnv() {
if data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/environ", p.ID)); err == nil {
for _, s := range strings.Split(string(data), "\x00") {
parts := strings.SplitN(core.Trim(s), "=", 2)
if parts != nil && len(parts) == 2 {
key := core.Trim(parts[0])
val := core.Trim(parts[1])
p.Env[key] = val
}
}
}
}
func (p *Process) readPath() error {
linkName := fmt.Sprint("/proc/", p.ID, "/exe")
if _, err := os.Lstat(linkName); err != nil {
return err
}
if link, err := os.Readlink(linkName); err == nil {
p.Path = link
}
return nil
}
func (p *Process) readCmdline() {
if data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cmdline", p.ID)); err == nil {
if len(data) == 0 {
return
}
for i, b := range data {
if b == 0x00 {
data[i] = byte(' ')
}
}
p.Args = make([]string, 0)
args := strings.Split(string(data), " ")
for _, arg := range args {
arg = core.Trim(arg)
if arg != "" {
p.Args = append(p.Args, arg)
}
}
}
}
func (p *Process) readDescriptors() {
f, err := os.Open(fmt.Sprint("/proc/", p.ID, "/fd/"))
if err != nil {
return
}
fDesc, err := f.Readdir(-1)
f.Close()
p.Descriptors = nil
for _, fd := range fDesc {
tempFd := &procDescriptors{
Name: fd.Name(),
}
if link, err := os.Readlink(fmt.Sprint("/proc/", p.ID, "/fd/", fd.Name())); err == nil {
tempFd.SymLink = link
socket := socketsRegex.FindStringSubmatch(link)
if len(socket) > 0 {
socketInfo, err := netlink.GetSocketInfoByInode(socket[1])
if err == nil {
tempFd.SymLink = fmt.Sprintf("socket:[%s] - %d:%s -> %s:%d, state: %s", fd.Name(),
socketInfo.ID.SourcePort,
socketInfo.ID.Source.String(),
dns.HostOr(socketInfo.ID.Destination, socketInfo.ID.Destination.String()),
socketInfo.ID.DestinationPort,
netlink.TCPStatesMap[socketInfo.State])
}
}
if linkInfo, err := os.Lstat(link); err == nil {
tempFd.Size = linkInfo.Size()
tempFd.ModTime = linkInfo.ModTime()
}
}
p.Descriptors = append(p.Descriptors, tempFd)
}
}
func (p *Process) readIOStats() {
f, err := os.Open(fmt.Sprint("/proc/", p.ID, "/io"))
if err != nil {
return
}
defer f.Close()
p.IOStats = &procIOstats{}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
s := strings.Split(scanner.Text(), " ")
switch s[0] {
case "rchar:":
p.IOStats.RChar, _ = strconv.ParseInt(s[1], 10, 64)
case "wchar:":
p.IOStats.WChar, _ = strconv.ParseInt(s[1], 10, 64)
case "syscr:":
p.IOStats.SyscallRead, _ = strconv.ParseInt(s[1], 10, 64)
case "syscw:":
p.IOStats.SyscallWrite, _ = strconv.ParseInt(s[1], 10, 64)
case "read_bytes:":
p.IOStats.ReadBytes, _ = strconv.ParseInt(s[1], 10, 64)
case "write_bytes:":
p.IOStats.WriteBytes, _ = strconv.ParseInt(s[1], 10, 64)
}
}
}
func (p *Process) readStatus() {
if data, err := ioutil.ReadFile(fmt.Sprint("/proc/", p.ID, "/status")); err == nil {
p.Status = string(data)
}
if data, err := ioutil.ReadFile(fmt.Sprint("/proc/", p.ID, "/stat")); err == nil {
p.Stat = string(data)
}
if data, err := ioutil.ReadFile(fmt.Sprint("/proc/", p.ID, "/stack")); err == nil {
p.Stack = string(data)
}
if data, err := ioutil.ReadFile(fmt.Sprint("/proc/", p.ID, "/maps")); err == nil {
p.Maps = string(data)
}
if data, err := ioutil.ReadFile(fmt.Sprint("/proc/", p.ID, "/statm")); err == nil {
p.Statm = &procStatm{}
fmt.Sscanf(string(data), "%d %d %d %d %d %d %d", &p.Statm.Size, &p.Statm.Resident, &p.Statm.Shared, &p.Statm.Text, &p.Statm.Lib, &p.Statm.Data, &p.Statm.Dt)
}
}
func (p *Process) cleanPath() {
pathLen := len(p.Path)
if pathLen >= 10 && p.Path[pathLen-10:] == " (deleted)" {
p.Path = p.Path[:len(p.Path)-10]
}
}
opensnitch-1.5.8.1/daemon/procmon/ebpf/ 0000775 0000000 0000000 00000000000 14401326716 0017676 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/procmon/ebpf/cache.go 0000664 0000000 0000000 00000004000 14401326716 0021262 0 ustar 00root root 0000000 0000000 package ebpf
import (
"sync"
"time"
)
type ebpfCacheItem struct {
Key []byte
LastSeen int64
UID int
Pid int
Hits uint
}
type ebpfCacheType struct {
Items map[string]*ebpfCacheItem
sync.RWMutex
}
var (
maxTTL = 20 // Seconds
maxCacheItems = 5000
ebpfCache *ebpfCacheType
ebpfCacheTicker *time.Ticker
)
// NewEbpfCacheItem creates a new cache item.
func NewEbpfCacheItem(key []byte, pid, uid int) *ebpfCacheItem {
return &ebpfCacheItem{
Key: key,
Hits: 1,
Pid: pid,
UID: uid,
LastSeen: time.Now().UnixNano(),
}
}
func (i *ebpfCacheItem) isValid() bool {
lastSeen := time.Now().Sub(
time.Unix(0, i.LastSeen),
)
return int(lastSeen.Seconds()) < maxTTL
}
// NewEbpfCache creates a new cache store.
func NewEbpfCache() *ebpfCacheType {
ebpfCacheTicker = time.NewTicker(1 * time.Minute)
return &ebpfCacheType{
Items: make(map[string]*ebpfCacheItem, 0),
}
}
func (e *ebpfCacheType) addNewItem(key string, itemKey []byte, pid, uid int) {
e.Lock()
defer e.Unlock()
e.Items[key] = NewEbpfCacheItem(itemKey, pid, uid)
}
func (e *ebpfCacheType) isInCache(key string) (item *ebpfCacheItem, found bool) {
leng := e.Len()
e.Lock()
item, found = e.Items[key]
if found {
if item.isValid() {
e.update(key, item)
} else {
found = false
delete(e.Items, key)
}
}
e.Unlock()
if leng > maxCacheItems {
e.DeleteOldItems()
}
return
}
func (e *ebpfCacheType) update(key string, item *ebpfCacheItem) {
item.Hits++
item.LastSeen = time.Now().UnixNano()
e.Items[key] = item
}
func (e *ebpfCacheType) Len() int {
e.RLock()
defer e.RUnlock()
return len(e.Items)
}
func (e *ebpfCacheType) DeleteOldItems() {
length := e.Len()
e.Lock()
defer e.Unlock()
for k, item := range e.Items {
if length > maxCacheItems || !item.isValid() {
delete(e.Items, k)
}
}
}
func (e *ebpfCacheType) clear() {
if e == nil {
return
}
for k := range e.Items {
delete(e.Items, k)
}
if ebpfCacheTicker != nil {
ebpfCacheTicker.Stop()
}
}
opensnitch-1.5.8.1/daemon/procmon/ebpf/debug.go 0000664 0000000 0000000 00000005205 14401326716 0021315 0 ustar 00root root 0000000 0000000 package ebpf
import (
"fmt"
"os/exec"
"strconv"
"syscall"
"unsafe"
"github.com/evilsocket/opensnitch/daemon/log"
daemonNetlink "github.com/evilsocket/opensnitch/daemon/netlink"
elf "github.com/iovisor/gobpf/elf"
)
// print map contents. used only for debugging
func dumpMap(bpfmap *elf.Map, isIPv6 bool) {
var lookupKey []byte
var nextKey []byte
var value []byte
if !isIPv6 {
lookupKey = make([]byte, 12)
nextKey = make([]byte, 12)
value = make([]byte, 24)
} else {
lookupKey = make([]byte, 36)
nextKey = make([]byte, 36)
value = make([]byte, 24)
}
firstrun := true
i := 0
for {
i++
ok, err := m.LookupNextElement(bpfmap, unsafe.Pointer(&lookupKey[0]),
unsafe.Pointer(&nextKey[0]), unsafe.Pointer(&value[0]))
if err != nil {
log.Error("eBPF LookupNextElement error: %v", err)
return
}
if firstrun {
// on first run lookupKey is a dummy, nothing to delete
firstrun = false
copy(lookupKey, nextKey)
continue
}
fmt.Println("key, value", lookupKey, value)
if !ok { //reached end of map
break
}
copy(lookupKey, nextKey)
}
}
//PrintEverything prints all the stats. used only for debugging
func PrintEverything() {
bash, _ := exec.LookPath("bash")
//get the number of the first map
out, err := exec.Command(bash, "-c", "bpftool map show | head -n 1 | cut -d ':' -f1").Output()
if err != nil {
fmt.Println("bpftool map dump name tcpMap ", err)
}
i, _ := strconv.Atoi(string(out[:len(out)-1]))
fmt.Println("i is", i)
//dump all maps for analysis
for j := i; j < i+14; j++ {
_, _ = exec.Command(bash, "-c", "bpftool map dump id "+strconv.Itoa(j)+" > dump"+strconv.Itoa(j)).Output()
}
alreadyEstablished.RLock()
for sock1, v := range alreadyEstablished.TCP {
fmt.Println(*sock1, v)
}
fmt.Println("---------------------")
for sock1, v := range alreadyEstablished.TCPv6 {
fmt.Println(*sock1, v)
}
alreadyEstablished.RUnlock()
fmt.Println("---------------------")
sockets, _ := daemonNetlink.SocketsDump(syscall.AF_INET, syscall.IPPROTO_TCP)
for idx := range sockets {
fmt.Println("socket tcp: ", sockets[idx])
}
fmt.Println("---------------------")
sockets, _ = daemonNetlink.SocketsDump(syscall.AF_INET6, syscall.IPPROTO_TCP)
for idx := range sockets {
fmt.Println("socket tcp6: ", sockets[idx])
}
fmt.Println("---------------------")
sockets, _ = daemonNetlink.SocketsDump(syscall.AF_INET, syscall.IPPROTO_UDP)
for idx := range sockets {
fmt.Println("socket udp: ", sockets[idx])
}
fmt.Println("---------------------")
sockets, _ = daemonNetlink.SocketsDump(syscall.AF_INET6, syscall.IPPROTO_UDP)
for idx := range sockets {
fmt.Println("socket udp6: ", sockets[idx])
}
}
opensnitch-1.5.8.1/daemon/procmon/ebpf/ebpf.go 0000664 0000000 0000000 00000011676 14401326716 0021154 0 ustar 00root root 0000000 0000000 package ebpf
import (
"encoding/binary"
"fmt"
"net"
"sync"
"syscall"
"unsafe"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/log"
daemonNetlink "github.com/evilsocket/opensnitch/daemon/netlink"
"github.com/evilsocket/opensnitch/daemon/procmon"
elf "github.com/iovisor/gobpf/elf"
)
//contains pointers to ebpf maps for a given protocol (tcp/udp/v6)
type ebpfMapsForProto struct {
counterMap *elf.Map
bpfmap *elf.Map
}
//Not in use, ~4usec faster lookup compared to m.LookupElement()
//mimics union bpf_attr's anonymous struct used by BPF_MAP_*_ELEM commands
//from /include/uapi/linux/bpf.h
type bpf_lookup_elem_t struct {
map_fd uint64 //even though in bpf.h its type is __u32, we must make it 8 bytes long
//because "key" is of type __aligned_u64, i.e. "key" must be aligned on an 8-byte boundary
key uintptr
value uintptr
}
type alreadyEstablishedConns struct {
TCP map[*daemonNetlink.Socket]int
TCPv6 map[*daemonNetlink.Socket]int
sync.RWMutex
}
var (
m *elf.Module
lock = sync.RWMutex{}
mapSize = uint(12000)
ebpfMaps map[string]*ebpfMapsForProto
//connections which were established at the time when opensnitch started
alreadyEstablished = alreadyEstablishedConns{
TCP: make(map[*daemonNetlink.Socket]int),
TCPv6: make(map[*daemonNetlink.Socket]int),
}
//stop == true is a signal for all goroutines to stop
stop = false
// list of local addresses of this machine
localAddresses []net.IP
hostByteOrder binary.ByteOrder
)
//Start installs ebpf kprobes
func Start() error {
if err := mountDebugFS(); err != nil {
log.Error("ebpf.Start -> mount debugfs error. Report on github please: %s", err)
return err
}
m = elf.NewModule("/etc/opensnitchd/opensnitch.o")
if err := m.Load(nil); err != nil {
log.Error("eBPF Failed to load /etc/opensnitchd/opensnitch.o: %v", err)
return err
}
// if previous shutdown was unclean, then we must remove the dangling kprobe
// and install it again (close the module and load it again)
if err := m.EnableKprobes(0); err != nil {
m.Close()
if err := m.Load(nil); err != nil {
log.Error("eBPF failed to load /etc/opensnitchd/opensnitch.o (2): %v", err)
return err
}
if err := m.EnableKprobes(0); err != nil {
log.Error("eBPF error when enabling kprobes: %v", err)
return err
}
}
// init all connection counters to 0
zeroKey := make([]byte, 4)
zeroValue := make([]byte, 8)
for _, name := range []string{"tcpcounter", "tcpv6counter", "udpcounter", "udpv6counter"} {
err := m.UpdateElement(m.Map(name), unsafe.Pointer(&zeroKey[0]), unsafe.Pointer(&zeroValue[0]), 0)
if err != nil {
log.Error("eBPF could not init counters to zero: %v", err)
return err
}
}
ebpfCache = NewEbpfCache()
lock.Lock()
//determine host byte order
buf := [2]byte{}
*(*uint16)(unsafe.Pointer(&buf[0])) = uint16(0xABCD)
switch buf {
case [2]byte{0xCD, 0xAB}:
hostByteOrder = binary.LittleEndian
case [2]byte{0xAB, 0xCD}:
hostByteOrder = binary.BigEndian
default:
log.Error("Could not determine host byte order.")
}
lock.Unlock()
ebpfMaps = map[string]*ebpfMapsForProto{
"tcp": {
counterMap: m.Map("tcpcounter"),
bpfmap: m.Map("tcpMap")},
"tcp6": {
counterMap: m.Map("tcpv6counter"),
bpfmap: m.Map("tcpv6Map")},
"udp": {
counterMap: m.Map("udpcounter"),
bpfmap: m.Map("udpMap")},
"udp6": {
counterMap: m.Map("udpv6counter"),
bpfmap: m.Map("udpv6Map")},
}
saveEstablishedConnections(uint8(syscall.AF_INET))
if core.IPv6Enabled {
saveEstablishedConnections(uint8(syscall.AF_INET6))
}
go monitorCache()
go monitorMaps()
go monitorLocalAddresses()
go monitorAlreadyEstablished()
return nil
}
func saveEstablishedConnections(commDomain uint8) error {
// save already established connections
socketListTCP, err := daemonNetlink.SocketsDump(commDomain, uint8(syscall.IPPROTO_TCP))
if err != nil {
log.Debug("eBPF could not dump TCP (%d) sockets via netlink: %v", commDomain, err)
return err
}
for _, sock := range socketListTCP {
inode := int((*sock).INode)
pid := procmon.GetPIDFromINode(inode, fmt.Sprint(inode,
(*sock).ID.Source, (*sock).ID.SourcePort, (*sock).ID.Destination, (*sock).ID.DestinationPort))
alreadyEstablished.Lock()
alreadyEstablished.TCP[sock] = pid
alreadyEstablished.Unlock()
}
return nil
}
// Stop stops monitoring connections using kprobes
func Stop() {
lock.Lock()
stop = true
lock.Unlock()
if m != nil {
m.Close()
}
ebpfCache.clear()
}
func isStopped() bool {
lock.RLock()
defer lock.RUnlock()
return stop
}
//make bpf() syscall with bpf_lookup prepared by the caller
func makeBpfSyscall(bpf_lookup *bpf_lookup_elem_t) uintptr {
BPF_MAP_LOOKUP_ELEM := 1 //cmd number
syscall_BPF := 321 //syscall number
sizeOfStruct := 24 //sizeof bpf_lookup_elem_t struct
r1, _, _ := syscall.Syscall(uintptr(syscall_BPF), uintptr(BPF_MAP_LOOKUP_ELEM),
uintptr(unsafe.Pointer(bpf_lookup)), uintptr(sizeOfStruct))
return r1
}
opensnitch-1.5.8.1/daemon/procmon/ebpf/find.go 0000664 0000000 0000000 00000013035 14401326716 0021147 0 ustar 00root root 0000000 0000000 package ebpf
import (
"encoding/binary"
"fmt"
"net"
"unsafe"
daemonNetlink "github.com/evilsocket/opensnitch/daemon/netlink"
)
// we need to manually remove old connections from a bpf map
// GetPid looks up process pid in a bpf map. If not found there, then it searches
// already-established TCP connections.
func GetPid(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstPort uint) (int, int, error) {
if hostByteOrder == nil {
return -1, -1, fmt.Errorf("eBPF monitoring method not initialized yet")
}
if pid, uid := getPidFromEbpf(proto, srcPort, srcIP, dstIP, dstPort); pid != -1 {
return pid, uid, nil
}
//check if it comes from already established TCP
if proto == "tcp" || proto == "tcp6" {
if pid, uid, err := findInAlreadyEstablishedTCP(proto, srcPort, srcIP, dstIP, dstPort); err == nil {
return pid, uid, nil
}
}
//using netlink.GetSocketInfo to check if UID is 0 (in-kernel connection)
if uid, _ := daemonNetlink.GetSocketInfo(proto, srcIP, srcPort, dstIP, dstPort); uid == 0 {
return -100, -100, nil
}
if !findAddressInLocalAddresses(srcIP) {
// systemd-resolved sometimes makes a TCP Fast Open connection to a DNS server (8.8.8.8 on my machine)
// and we get a packet here with **source** (not detination!!!) IP 8.8.8.8
// Maybe it's an in-kernel response with spoofed IP because wireshark does not show neither
// resolved's TCP Fast Open packet, nor the response
// Until this is better understood, we simply do not allow this machine to make connections with
// arbitrary source IPs
return -1, -1, fmt.Errorf("eBPF packet with unknown source IP: %s", srcIP)
}
return -1, -1, nil
}
// getPidFromEbpf looks up a connection in bpf map and returns PID if found
// the lookup keys and values are defined in opensnitch.c , e.g.
//
// struct tcp_key_t {
// u16 sport;
// u32 daddr;
// u16 dport;
// u32 saddr;
// }__attribute__((packed));
// struct tcp_value_t{
// u64 pid;
// u64 uid;
// u64 counter;
// }__attribute__((packed));;
func getPidFromEbpf(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstPort uint) (pid int, uid int) {
if hostByteOrder == nil {
return -1, -1
}
// Some connections, like broadcasts, are only seen in eBPF once,
// but some applications send 1 connection per network interface.
// If we delete the eBPF entry the first time we see it, we won't find
// the connection the next times.
delItemIfFound := true
var key []byte
var value []byte
var isIP4 bool = (proto == "tcp") || (proto == "udp") || (proto == "udplite")
if isIP4 {
key = make([]byte, 12)
value = make([]byte, 24)
copy(key[2:6], dstIP)
binary.BigEndian.PutUint16(key[6:8], uint16(dstPort))
copy(key[8:12], srcIP)
} else { // IPv6
key = make([]byte, 36)
value = make([]byte, 24)
copy(key[2:18], dstIP)
binary.BigEndian.PutUint16(key[18:20], uint16(dstPort))
copy(key[20:36], srcIP)
}
hostByteOrder.PutUint16(key[0:2], uint16(srcPort))
k := fmt.Sprint(proto, srcPort, srcIP.String(), dstIP.String(), dstPort)
cacheItem, isInCache := ebpfCache.isInCache(k)
if isInCache {
deleteEbpfEntry(proto, unsafe.Pointer(&key[0]))
return cacheItem.Pid, cacheItem.UID
}
err := m.LookupElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&key[0]), unsafe.Pointer(&value[0]))
if err != nil {
// key not found
// sometimes srcIP is 0.0.0.0. Happens especially with UDP sendto()
// for example: 57621:10.0.3.1 -> 10.0.3.255:57621 , reported as: 0.0.0.0 -> 10.0.3.255
if isIP4 {
zeroes := make([]byte, 4)
copy(key[8:12], zeroes)
} else {
zeroes := make([]byte, 16)
copy(key[20:36], zeroes)
}
err = m.LookupElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&key[0]), unsafe.Pointer(&value[0]))
if err == nil {
delItemIfFound = false
}
}
if err != nil && proto == "udp" && srcIP.String() == dstIP.String() {
// very rarely I see this connection. It has srcIP and dstIP == 0.0.0.0 in ebpf map
// it is a localhost to localhost connection
// srcIP was already set to 0, set dstIP to zero also
// TODO try to reproduce it and look for srcIP/dstIP in other kernel structures
zeroes := make([]byte, 4)
copy(key[2:6], zeroes)
err = m.LookupElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&key[0]), unsafe.Pointer(&value[0]))
}
if err != nil {
// key not found in bpf maps
return -1, -1
}
pid = int(hostByteOrder.Uint32(value[0:4]))
uid = int(hostByteOrder.Uint32(value[8:12]))
ebpfCache.addNewItem(k, key, pid, uid)
if delItemIfFound {
deleteEbpfEntry(proto, unsafe.Pointer(&key[0]))
}
return pid, uid
}
// FindInAlreadyEstablishedTCP searches those TCP connections which were already established at the time
// when opensnitch started
func findInAlreadyEstablishedTCP(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstPort uint) (int, int, error) {
alreadyEstablished.RLock()
defer alreadyEstablished.RUnlock()
var _alreadyEstablished map[*daemonNetlink.Socket]int
if proto == "tcp" {
_alreadyEstablished = alreadyEstablished.TCP
} else if proto == "tcp6" {
_alreadyEstablished = alreadyEstablished.TCPv6
}
for sock, v := range _alreadyEstablished {
if (*sock).ID.SourcePort == uint16(srcPort) && (*sock).ID.Source.Equal(srcIP) &&
(*sock).ID.Destination.Equal(dstIP) && (*sock).ID.DestinationPort == uint16(dstPort) {
return v, int((*sock).UID), nil
}
}
return -1, -1, fmt.Errorf("eBPF inode not found")
}
//returns true if addr is in the list of this machine's addresses
func findAddressInLocalAddresses(addr net.IP) bool {
lock.Lock()
defer lock.Unlock()
for _, a := range localAddresses {
if addr.String() == a.String() {
return true
}
}
return false
}
opensnitch-1.5.8.1/daemon/procmon/ebpf/monitor.go 0000664 0000000 0000000 00000006600 14401326716 0021716 0 ustar 00root root 0000000 0000000 package ebpf
import (
"syscall"
"time"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/log"
daemonNetlink "github.com/evilsocket/opensnitch/daemon/netlink"
"github.com/vishvananda/netlink"
)
// we need to manually remove old connections from a bpf map
// since when a bpf map is full it doesn't allow any more insertions
func monitorMaps() {
for {
if isStopped() {
return
}
time.Sleep(time.Second * 5)
for name := range ebpfMaps {
// using a pointer to the map doesn't delete the items.
// bpftool still counts them.
if items := getItems(name, name == "tcp6" || name == "udp6"); items > 500 {
deleted := deleteOldItems(name, name == "tcp6" || name == "udp6", items/2)
log.Debug("[ebpf] old items deleted: %d", deleted)
}
}
}
}
func monitorCache() {
for {
select {
case <-ebpfCacheTicker.C:
if isStopped() {
return
}
ebpfCache.DeleteOldItems()
}
}
}
// maintains a list of this machine's local addresses
// TODO: use netlink.AddrSubscribeWithOptions()
func monitorLocalAddresses() {
for {
addr, err := netlink.AddrList(nil, netlink.FAMILY_ALL)
if err != nil {
log.Error("eBPF error looking up this machine's addresses via netlink: %v", err)
continue
}
lock.Lock()
localAddresses = nil
for _, a := range addr {
localAddresses = append(localAddresses, a.IP)
}
lock.Unlock()
time.Sleep(time.Second * 1)
if isStopped() {
return
}
}
}
// monitorAlreadyEstablished makes sure that when an already-established connection is closed
// it will be removed from alreadyEstablished. If we don't do this and keep the alreadyEstablished entry forever,
// then after the genuine process quits,a malicious process may reuse PID-srcPort-srcIP-dstPort-dstIP
func monitorAlreadyEstablished() {
for {
time.Sleep(time.Second * 1)
if isStopped() {
return
}
socketListTCP, err := daemonNetlink.SocketsDump(uint8(syscall.AF_INET), uint8(syscall.IPPROTO_TCP))
if err != nil {
log.Debug("eBPF error in dumping TCP sockets via netlink")
continue
}
alreadyEstablished.Lock()
for aesock := range alreadyEstablished.TCP {
found := false
for _, sock := range socketListTCP {
if socketsAreEqual(aesock, sock) {
found = true
break
}
}
if !found {
delete(alreadyEstablished.TCP, aesock)
}
}
alreadyEstablished.Unlock()
if core.IPv6Enabled {
socketListTCPv6, err := daemonNetlink.SocketsDump(uint8(syscall.AF_INET6), uint8(syscall.IPPROTO_TCP))
if err != nil {
log.Debug("eBPF error in dumping TCPv6 sockets via netlink: %s", err)
continue
}
alreadyEstablished.Lock()
for aesock := range alreadyEstablished.TCPv6 {
found := false
for _, sock := range socketListTCPv6 {
if socketsAreEqual(aesock, sock) {
found = true
break
}
}
if !found {
delete(alreadyEstablished.TCPv6, aesock)
}
}
alreadyEstablished.Unlock()
}
}
}
func socketsAreEqual(aSocket, bSocket *daemonNetlink.Socket) bool {
return ((*aSocket).INode == (*bSocket).INode &&
//inodes are unique enough, so the matches below will never have to be checked
(*aSocket).ID.SourcePort == (*bSocket).ID.SourcePort &&
(*aSocket).ID.Source.Equal((*bSocket).ID.Source) &&
(*aSocket).ID.Destination.Equal((*bSocket).ID.Destination) &&
(*aSocket).ID.DestinationPort == (*bSocket).ID.DestinationPort &&
(*aSocket).UID == (*bSocket).UID)
}
opensnitch-1.5.8.1/daemon/procmon/ebpf/utils.go 0000664 0000000 0000000 00000005623 14401326716 0021373 0 ustar 00root root 0000000 0000000 package ebpf
import (
"fmt"
"unsafe"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/log"
)
func mountDebugFS() error {
debugfsPath := "/sys/kernel/debug/"
kprobesPath := fmt.Sprint(debugfsPath, "tracing/kprobe_events")
if core.Exists(kprobesPath) == false {
if _, err := core.Exec("mount", []string{"-t", "debugfs", "none", debugfsPath}); err != nil {
log.Warning("eBPF debugfs error: %s", err)
return err
}
}
return nil
}
func deleteEbpfEntry(proto string, key unsafe.Pointer) bool {
if err := m.DeleteElement(ebpfMaps[proto].bpfmap, key); err != nil {
return false
}
return true
}
func getItems(proto string, isIPv6 bool) (items uint) {
isDup := make(map[string]uint8)
var lookupKey []byte
var nextKey []byte
var value []byte
if !isIPv6 {
lookupKey = make([]byte, 12)
nextKey = make([]byte, 12)
} else {
lookupKey = make([]byte, 36)
nextKey = make([]byte, 36)
}
value = make([]byte, 24)
firstrun := true
for {
ok, err := m.LookupNextElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&lookupKey[0]),
unsafe.Pointer(&nextKey[0]), unsafe.Pointer(&value[0]))
if !ok || err != nil { //reached end of map
log.Debug("[ebpf] %s map: %d active items", proto, items)
return
}
if firstrun {
// on first run lookupKey is a dummy, nothing to delete
firstrun = false
copy(lookupKey, nextKey)
continue
}
if counter, duped := isDup[string(lookupKey)]; duped && counter > 1 {
deleteEbpfEntry(proto, unsafe.Pointer(&lookupKey[0]))
continue
}
isDup[string(lookupKey)]++
copy(lookupKey, nextKey)
items++
}
return items
}
// deleteOldItems deletes maps' elements in order to keep them below maximum capacity.
// If ebpf maps are full they don't allow any more insertions, ending up lossing events.
func deleteOldItems(proto string, isIPv6 bool, maxToDelete uint) (deleted uint) {
isDup := make(map[string]uint8)
var lookupKey []byte
var nextKey []byte
var value []byte
if !isIPv6 {
lookupKey = make([]byte, 12)
nextKey = make([]byte, 12)
} else {
lookupKey = make([]byte, 36)
nextKey = make([]byte, 36)
}
value = make([]byte, 24)
firstrun := true
i := uint(0)
for {
i++
if i > maxToDelete {
return
}
ok, err := m.LookupNextElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&lookupKey[0]),
unsafe.Pointer(&nextKey[0]), unsafe.Pointer(&value[0]))
if !ok || err != nil { //reached end of map
return
}
if counter, duped := isDup[string(lookupKey)]; duped && counter > 1 {
if deleteEbpfEntry(proto, unsafe.Pointer(&lookupKey[0])) {
deleted++
copy(lookupKey, nextKey)
continue
}
return
}
if firstrun {
// on first run lookupKey is a dummy, nothing to delete
firstrun = false
copy(lookupKey, nextKey)
continue
}
if !deleteEbpfEntry(proto, unsafe.Pointer(&lookupKey[0])) {
return
}
deleted++
isDup[string(lookupKey)]++
copy(lookupKey, nextKey)
}
return
}
opensnitch-1.5.8.1/daemon/procmon/find.go 0000664 0000000 0000000 00000005262 14401326716 0020236 0 ustar 00root root 0000000 0000000 package procmon
import (
"fmt"
"os"
"sort"
"strconv"
)
func sortPidsByTime(fdList []os.FileInfo) []os.FileInfo {
sort.Slice(fdList, func(i, j int) bool {
t := fdList[i].ModTime().UnixNano()
u := fdList[j].ModTime().UnixNano()
return t > u
})
return fdList
}
// inodeFound searches for the given inode in /proc//fd/ or
// /proc//task//fd/ and gets the symbolink link it points to,
// in order to compare it against the given inode.
//
// If the inode is found, the cache is updated ans sorted.
func inodeFound(pidsPath, expect, inodeKey string, inode, pid int) bool {
fdPath := fmt.Sprint(pidsPath, pid, "/fd/")
fdList := lookupPidDescriptors(fdPath, pid)
if fdList == nil {
return false
}
for idx := 0; idx < len(fdList); idx++ {
descLink := fmt.Sprint(fdPath, fdList[idx])
if link, err := os.Readlink(descLink); err == nil && link == expect {
inodesCache.add(inodeKey, descLink, pid)
pidsCache.add(fdPath, fdList, pid)
return true
}
}
return false
}
// lookupPidInProc searches for an inode in /proc.
// First it gets the running PIDs and obtains the opened sockets.
// TODO: If the inode is not found, search again in the task/threads
// of every PID (costly).
func lookupPidInProc(pidsPath, expect, inodeKey string, inode int) int {
pidList := getProcPids(pidsPath)
for _, pid := range pidList {
if inodeFound(pidsPath, expect, inodeKey, inode, pid) {
return pid
}
}
return -1
}
// lookupPidDescriptors returns the list of descriptors inside
// /proc//fd/
// TODO: search in /proc//task//fd/ .
func lookupPidDescriptors(fdPath string, pid int) []string {
f, err := os.Open(fdPath)
if err != nil {
return nil
}
// This is where most of the time is wasted when looking for PIDs.
// long running processes like firefox/chrome tend to have a lot of descriptor
// references that points to non existent files on disk, but that remains in
// memory (those with " (deleted)").
// This causes to have to iterate over 300 to 700 items, that are not sockets.
fdList, err := f.Readdir(-1)
f.Close()
if err != nil {
return nil
}
fdList = sortPidsByTime(fdList)
s := make([]string, len(fdList))
for n, f := range fdList {
s[n] = f.Name()
}
return s
}
// getProcPids returns the list of running PIDs, /proc or /proc//task/ .
func getProcPids(pidsPath string) (pidList []int) {
f, err := os.Open(pidsPath)
if err != nil {
return pidList
}
ls, err := f.Readdir(-1)
f.Close()
if err != nil {
return pidList
}
ls = sortPidsByTime(ls)
for _, f := range ls {
if f.IsDir() == false {
continue
}
if pid, err := strconv.Atoi(f.Name()); err == nil {
pidList = append(pidList, []int{pid}...)
}
}
return pidList
}
opensnitch-1.5.8.1/daemon/procmon/find_test.go 0000664 0000000 0000000 00000001574 14401326716 0021277 0 ustar 00root root 0000000 0000000 package procmon
import (
"fmt"
"testing"
)
func TestGetProcPids(t *testing.T) {
pids := getProcPids("/proc")
if len(pids) == 0 {
t.Error("getProcPids() should not be 0", pids)
}
}
func TestLookupPidDescriptors(t *testing.T) {
pidsFd := lookupPidDescriptors(fmt.Sprint("/proc/", myPid, "/fd/"), myPid)
if len(pidsFd) == 0 {
t.Error("getProcPids() should not be 0", pidsFd)
}
}
func TestLookupPidInProc(t *testing.T) {
// we expect that the inode 1 points to /dev/null
expect := "/dev/null"
foundPid := lookupPidInProc("/proc/", expect, "", myPid)
if foundPid == -1 {
t.Error("lookupPidInProc() should not return -1")
}
}
func BenchmarkGetProcs(b *testing.B) {
for i := 0; i < b.N; i++ {
getProcPids("/proc")
}
}
func BenchmarkLookupPidDescriptors(b *testing.B) {
for i := 0; i < b.N; i++ {
lookupPidDescriptors(fmt.Sprint("/proc/", myPid, "/fd/"), myPid)
}
}
opensnitch-1.5.8.1/daemon/procmon/monitor/ 0000775 0000000 0000000 00000000000 14401326716 0020451 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/procmon/monitor/init.go 0000664 0000000 0000000 00000004122 14401326716 0021742 0 ustar 00root root 0000000 0000000 package monitor
import (
"net"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/evilsocket/opensnitch/daemon/procmon"
"github.com/evilsocket/opensnitch/daemon/procmon/audit"
"github.com/evilsocket/opensnitch/daemon/procmon/ebpf"
)
var (
cacheMonitorsRunning = false
)
// ReconfigureMonitorMethod configures a new method for parsing connections.
func ReconfigureMonitorMethod(newMonitorMethod string) error {
if procmon.GetMonitorMethod() == newMonitorMethod {
return nil
}
oldMethod := procmon.GetMonitorMethod()
End()
procmon.SetMonitorMethod(newMonitorMethod)
// if the new monitor method fails to start, rollback the change and exit
// without saving the configuration. Otherwise we can end up with the wrong
// monitor method configured and saved to file.
if err := Init(); err != nil {
procmon.SetMonitorMethod(oldMethod)
return err
}
return nil
}
// End stops the way of parsing new connections.
func End() {
if procmon.MethodIsAudit() {
audit.Stop()
} else if procmon.MethodIsEbpf() {
ebpf.Stop()
}
}
// Init starts parsing connections using the method specified.
func Init() (err error) {
if cacheMonitorsRunning == false {
go procmon.MonitorActivePids()
go procmon.CacheCleanerTask()
cacheMonitorsRunning = true
}
if procmon.MethodIsEbpf() {
err = ebpf.Start()
if err == nil {
log.Info("Process monitor method ebpf")
return nil
}
// we need to stop this method even if it has failed to start, in order to clean up the kprobes
// It helps with the error "cannot write...kprobe_events: file exists".
ebpf.Stop()
log.Warning("error starting ebpf monitor method: %v", err)
} else if procmon.MethodIsAudit() {
var auditConn net.Conn
auditConn, err = audit.Start()
if err == nil {
log.Info("Process monitor method audit")
go audit.Reader(auditConn, (chan<- audit.Event)(audit.EventChan))
return nil
}
log.Warning("error starting audit monitor method: %v", err)
}
// if any of the above methods have failed, fallback to proc
log.Info("Process monitor method /proc")
procmon.SetMonitorMethod(procmon.MethodProc)
return err
}
opensnitch-1.5.8.1/daemon/procmon/parse.go 0000664 0000000 0000000 00000007423 14401326716 0020431 0 ustar 00root root 0000000 0000000 package procmon
import (
"fmt"
"os"
"time"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/evilsocket/opensnitch/daemon/procmon/audit"
)
func getPIDFromAuditEvents(inode int, inodeKey string, expect string) (int, int) {
audit.Lock.RLock()
defer audit.Lock.RUnlock()
auditEvents := audit.GetEvents()
for n := 0; n < len(auditEvents); n++ {
pid := auditEvents[n].Pid
if inodeFound("/proc/", expect, inodeKey, inode, pid) {
return pid, n
}
}
for n := 0; n < len(auditEvents); n++ {
ppid := auditEvents[n].PPid
if inodeFound("/proc/", expect, inodeKey, inode, ppid) {
return ppid, n
}
}
return -1, -1
}
// GetPIDFromINode tries to get the PID from a socket inode following these steps:
// 1. Get the PID from the cache of Inodes.
// 2. Get the PID from the cache of PIDs.
// 3. Look for the PID using one of these methods:
// - audit: listening for socket creation from auditd.
// - proc: search /proc
//
// If the PID is not found by one of the 2 first methods, it'll try it using /proc.
func GetPIDFromINode(inode int, inodeKey string) int {
found := -1
if inode <= 0 {
return found
}
start := time.Now()
expect := fmt.Sprintf("socket:[%d]", inode)
if cachedPidInode := inodesCache.getPid(inodeKey); cachedPidInode != -1 {
log.Debug("Inode found in cache: %v %v %v %v", time.Since(start), inodesCache.getPid(inodeKey), inode, inodeKey)
return cachedPidInode
}
cachedPid, pos := pidsCache.getPid(inode, inodeKey, expect)
if cachedPid != -1 {
log.Debug("Socket found in known pids %v, pid: %d, inode: %d, pos: %d, pids in cache: %d", time.Since(start), cachedPid, inode, pos, pidsCache.countItems())
pidsCache.sort(cachedPid)
inodesCache.add(inodeKey, "", cachedPid)
return cachedPid
}
if MethodIsAudit() {
if aPid, pos := getPIDFromAuditEvents(inode, inodeKey, expect); aPid != -1 {
log.Debug("PID found via audit events: %v, position: %d", time.Since(start), pos)
return aPid
}
}
if found == -1 || methodIsProc() {
found = lookupPidInProc("/proc/", expect, inodeKey, inode)
}
log.Debug("new pid lookup took (%d): %v", found, time.Since(start))
return found
}
// FindProcess checks if a process exists given a PID.
// If it exists in /proc, a new Process{} object is returned with the details
// to identify a process (cmdline, name, environment variables, etc).
func FindProcess(pid int, interceptUnknown bool) *Process {
if interceptUnknown && pid < 0 {
return NewProcess(0, "")
}
if proc := findProcessInActivePidsCache(uint64(pid)); proc != nil {
return proc
}
if MethodIsAudit() {
if aevent := audit.GetEventByPid(pid); aevent != nil {
audit.Lock.RLock()
proc := NewProcess(pid, aevent.ProcPath)
proc.readCmdline()
proc.setCwd(aevent.ProcDir)
audit.Lock.RUnlock()
// if the proc dir contains non alhpa-numeric chars the field is empty
if proc.CWD == "" {
proc.readCwd()
}
proc.readEnv()
proc.cleanPath()
addToActivePidsCache(uint64(pid), proc)
return proc
}
}
// if the PID dir doesn't exist, the process may have exited or be a kernel connection
// XXX: can a kernel connection exist without an entry in ProcFS?
if core.Exists(fmt.Sprint("/proc/", pid)) == false {
log.Debug("PID can't be read /proc/ %d", pid)
return nil
}
linkName := fmt.Sprint("/proc/", pid, "/exe")
link, err := os.Readlink(linkName)
proc := NewProcess(pid, link)
proc.readCmdline()
proc.readCwd()
proc.readEnv()
proc.cleanPath()
if len(proc.Args) == 0 {
proc.readComm()
proc.Args = make([]string, 0)
proc.Args = append(proc.Args, proc.Comm)
}
// If the link to the binary can't be read, the PID may be of a kernel task
if err != nil || proc.Path == "" {
proc.Path = "Kernel connection"
}
addToActivePidsCache(uint64(pid), proc)
return proc
}
opensnitch-1.5.8.1/daemon/procmon/process.go 0000664 0000000 0000000 00000004005 14401326716 0020766 0 ustar 00root root 0000000 0000000 package procmon
import (
"sync"
"time"
)
var (
cacheMonitorsRunning = false
lock = sync.RWMutex{}
monitorMethod = MethodProc
)
// monitor method supported types
const (
MethodProc = "proc"
MethodAudit = "audit"
MethodEbpf = "ebpf"
)
// man 5 proc; man procfs
type procIOstats struct {
RChar int64
WChar int64
SyscallRead int64
SyscallWrite int64
ReadBytes int64
WriteBytes int64
}
type procDescriptors struct {
Name string
SymLink string
Size int64
ModTime time.Time
}
type procStatm struct {
Size int64
Resident int64
Shared int64
Text int64
Lib int64
Data int64 // data + stack
Dt int
}
// Process holds the details of a process.
type Process struct {
ID int
Comm string
Path string
Args []string
Env map[string]string
CWD string
Descriptors []*procDescriptors
IOStats *procIOstats
Status string
Stat string
Statm *procStatm
Stack string
Maps string
}
// NewProcess returns a new Process structure.
func NewProcess(pid int, path string) *Process {
return &Process{
ID: pid,
Path: path,
Args: make([]string, 0),
Env: make(map[string]string),
}
}
// SetMonitorMethod configures a new method for parsing connections.
func SetMonitorMethod(newMonitorMethod string) {
lock.Lock()
defer lock.Unlock()
monitorMethod = newMonitorMethod
}
// GetMonitorMethod configures a new method for parsing connections.
func GetMonitorMethod() string {
lock.Lock()
defer lock.Unlock()
return monitorMethod
}
// MethodIsEbpf returns if the process monitor method is eBPF.
func MethodIsEbpf() bool {
lock.RLock()
defer lock.RUnlock()
return monitorMethod == MethodEbpf
}
// MethodIsAudit returns if the process monitor method is eBPF.
func MethodIsAudit() bool {
lock.RLock()
defer lock.RUnlock()
return monitorMethod == MethodAudit
}
func methodIsProc() bool {
lock.RLock()
defer lock.RUnlock()
return monitorMethod == MethodProc
}
opensnitch-1.5.8.1/daemon/procmon/process_test.go 0000664 0000000 0000000 00000006042 14401326716 0022030 0 ustar 00root root 0000000 0000000 package procmon
import (
"os"
"testing"
)
var (
myPid = os.Getpid()
proc = NewProcess(myPid, "/fake/path")
)
func TestNewProcess(t *testing.T) {
if proc.ID != myPid {
t.Error("NewProcess PID not equal to ", myPid)
}
if proc.Path != "/fake/path" {
t.Error("NewProcess path not equal to /fake/path")
}
}
func TestProcPath(t *testing.T) {
if err := proc.readPath(); err != nil {
t.Error("Proc path error:", err)
}
if proc.Path == "/fake/path" {
t.Error("Proc path equal to /fake/path, should be different:", proc.Path)
}
}
func TestProcCwd(t *testing.T) {
err := proc.readCwd()
if proc.CWD == "" {
t.Error("Proc readCwd() not read:", err)
}
proc.setCwd("/home")
if proc.CWD != "/home" {
t.Error("Proc setCwd() should be /home:", proc.CWD)
}
}
func TestProcCmdline(t *testing.T) {
proc.readCmdline()
if len(proc.Args) == 0 {
t.Error("Proc Args should not be empty:", proc.Args)
}
}
func TestProcDescriptors(t *testing.T) {
proc.readDescriptors()
if len(proc.Descriptors) == 0 {
t.Error("Proc Descriptors should not be empty:", proc.Descriptors)
}
}
func TestProcEnv(t *testing.T) {
proc.readEnv()
if len(proc.Env) == 0 {
t.Error("Proc Env should not be empty:", proc.Env)
}
}
func TestProcIOStats(t *testing.T) {
proc.readIOStats()
if proc.IOStats.RChar == 0 {
t.Error("Proc.IOStats.RChar should not be 0:", proc.IOStats)
}
if proc.IOStats.WChar == 0 {
t.Error("Proc.IOStats.WChar should not be 0:", proc.IOStats)
}
if proc.IOStats.SyscallRead == 0 {
t.Error("Proc.IOStats.SyscallRead should not be 0:", proc.IOStats)
}
if proc.IOStats.SyscallWrite == 0 {
t.Error("Proc.IOStats.SyscallWrite should not be 0:", proc.IOStats)
}
/*if proc.IOStats.ReadBytes == 0 {
t.Error("Proc.IOStats.ReadBytes should not be 0:", proc.IOStats)
}
if proc.IOStats.WriteBytes == 0 {
t.Error("Proc.IOStats.WriteBytes should not be 0:", proc.IOStats)
}*/
}
func TestProcStatus(t *testing.T) {
proc.readStatus()
if proc.Status == "" {
t.Error("Proc Status should not be empty:", proc)
}
if proc.Stat == "" {
t.Error("Proc Stat should not be empty:", proc)
}
/*if proc.Stack == "" {
t.Error("Proc Stack should not be empty:", proc)
}*/
if proc.Maps == "" {
t.Error("Proc Maps should not be empty:", proc)
}
if proc.Statm.Size == 0 {
t.Error("Proc Statm Size should not be 0:", proc.Statm)
}
if proc.Statm.Resident == 0 {
t.Error("Proc Statm Resident should not be 0:", proc.Statm)
}
if proc.Statm.Shared == 0 {
t.Error("Proc Statm Shared should not be 0:", proc.Statm)
}
if proc.Statm.Text == 0 {
t.Error("Proc Statm Text should not be 0:", proc.Statm)
}
if proc.Statm.Lib != 0 {
t.Error("Proc Statm Lib should not be 0:", proc.Statm)
}
if proc.Statm.Data == 0 {
t.Error("Proc Statm Data should not be 0:", proc.Statm)
}
if proc.Statm.Dt != 0 {
t.Error("Proc Statm Dt should not be 0:", proc.Statm)
}
}
func TestProcCleanPath(t *testing.T) {
proc.Path = "/fake/path/binary (deleted)"
proc.cleanPath()
if proc.Path != "/fake/path/binary" {
t.Error("Proc cleanPath() not cleaned:", proc.Path)
}
}
opensnitch-1.5.8.1/daemon/rule/ 0000775 0000000 0000000 00000000000 14401326716 0016254 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/rule/loader.go 0000664 0000000 0000000 00000024253 14401326716 0020057 0 ustar 00root root 0000000 0000000 package rule
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"strings"
"sync"
"time"
"github.com/evilsocket/opensnitch/daemon/conman"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/fsnotify/fsnotify"
)
// Loader is the object that holds the rules loaded from disk, as well as the
// rules watcher.
type Loader struct {
sync.RWMutex
path string
rules map[string]*Rule
rulesKeys []string
watcher *fsnotify.Watcher
liveReload bool
liveReloadRunning bool
}
// NewLoader loads rules from disk, and watches for changes made to the rules files
// on disk.
func NewLoader(liveReload bool) (*Loader, error) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
return nil, err
}
return &Loader{
path: "",
rules: make(map[string]*Rule),
liveReload: liveReload,
watcher: watcher,
liveReloadRunning: false,
}, nil
}
// NumRules returns he number of loaded rules.
func (l *Loader) NumRules() int {
l.RLock()
defer l.RUnlock()
return len(l.rules)
}
// GetAll returns the loaded rules.
func (l *Loader) GetAll() map[string]*Rule {
l.RLock()
defer l.RUnlock()
return l.rules
}
// Load loads rules files from disk.
func (l *Loader) Load(path string) error {
if core.Exists(path) == false {
return fmt.Errorf("Path '%s' does not exist", path)
}
expr := filepath.Join(path, "*.json")
matches, err := filepath.Glob(expr)
if err != nil {
return fmt.Errorf("Error globbing '%s': %s", expr, err)
}
l.path = path
if len(l.rules) == 0 {
l.rules = make(map[string]*Rule)
}
for _, fileName := range matches {
log.Debug("Reading rule from %s", fileName)
if err := l.loadRule(fileName); err != nil {
log.Warning("%s", err)
continue
}
}
if l.liveReload && l.liveReloadRunning == false {
go l.liveReloadWorker()
}
return nil
}
func (l *Loader) loadRule(fileName string) error {
raw, err := ioutil.ReadFile(fileName)
if err != nil {
return fmt.Errorf("Error while reading %s: %s", fileName, err)
}
l.Lock()
defer l.Unlock()
var r Rule
err = json.Unmarshal(raw, &r)
if err != nil {
return fmt.Errorf("Error parsing rule from %s: %s", fileName, err)
}
raw = nil
if oldRule, found := l.rules[r.Name]; found {
l.cleanListsRule(oldRule)
}
if r.Enabled {
if err := r.Operator.Compile(); err != nil {
log.Warning("Operator.Compile() error: %s: %s", err, r.Operator.Data)
return fmt.Errorf("(1) Error compiling rule: %s", err)
}
if r.Operator.Type == List {
for i := 0; i < len(r.Operator.List); i++ {
if err := r.Operator.List[i].Compile(); err != nil {
log.Warning("Operator.Compile() error: %s: ", err)
return fmt.Errorf("(1) Error compiling list rule: %s", err)
}
}
}
}
if oldRule, found := l.rules[r.Name]; found {
l.deleteOldRuleFromDisk(oldRule, &r)
}
log.Debug("Loaded rule from %s: %s", fileName, r.String())
l.rules[r.Name] = &r
l.sortRules()
if l.isTemporary(&r) {
err = l.scheduleTemporaryRule(r)
}
return nil
}
// deleteRule deletes a rule from memory if it has been deleted from disk.
// This is only called if fsnotify's Remove event is fired, thus it doesn't
// have to delete temporary rules (!Always).
func (l *Loader) deleteRule(filePath string) {
fileName := filepath.Base(filePath)
ruleName := fileName[:len(fileName)-5]
l.RLock()
rule, found := l.rules[ruleName]
delRule := found && rule.Duration == Always
l.RUnlock()
if delRule {
l.Delete(ruleName)
}
}
func (l *Loader) deleteRuleFromDisk(ruleName string) error {
path := fmt.Sprint(l.path, "/", ruleName, ".json")
return os.Remove(path)
}
// deleteOldRuleFromDisk deletes a rule from disk if the Duration changes
// from Always (saved on disk), to !Always (temporary).
func (l *Loader) deleteOldRuleFromDisk(oldRule, newRule *Rule) {
if oldRule.Duration == Always && newRule.Duration != Always {
if err := l.deleteRuleFromDisk(oldRule.Name); err != nil {
log.Error("Error deleting old rule from disk: %s", oldRule.Name)
}
}
}
// cleanListsRule erases the list of domains of an Operator of type Lists
func (l *Loader) cleanListsRule(oldRule *Rule) {
if oldRule.Operator.Type == Lists {
oldRule.Operator.StopMonitoringLists()
} else if oldRule.Operator.Type == List {
for i := 0; i < len(oldRule.Operator.List); i++ {
if oldRule.Operator.List[i].Type == Lists {
oldRule.Operator.List[i].StopMonitoringLists()
break
}
}
}
}
func (l *Loader) liveReloadWorker() {
l.liveReloadRunning = true
log.Debug("Rules watcher started on path %s ...", l.path)
if err := l.watcher.Add(l.path); err != nil {
log.Error("Could not watch path: %s", err)
l.liveReloadRunning = false
return
}
for {
select {
case event := <-l.watcher.Events:
// a new rule json file has been created or updated
if event.Op&fsnotify.Write == fsnotify.Write {
if strings.HasSuffix(event.Name, ".json") {
log.Important("Ruleset changed due to %s, reloading ...", path.Base(event.Name))
if err := l.loadRule(event.Name); err != nil {
log.Warning("%s", err)
}
}
} else if event.Op&fsnotify.Remove == fsnotify.Remove {
if strings.HasSuffix(event.Name, ".json") {
log.Important("Rule deleted %s", path.Base(event.Name))
// we only need to delete from memory rules of type Always,
// because the Remove event is of a file, i.e.: Duration == Always
l.deleteRule(event.Name)
}
}
case err := <-l.watcher.Errors:
log.Error("File system watcher error: %s", err)
}
}
}
func (l *Loader) isTemporary(r *Rule) bool {
return r.Duration != Restart && r.Duration != Always && r.Duration != Once
}
func (l *Loader) isUniqueName(name string) bool {
_, found := l.rules[name]
return !found
}
func (l *Loader) setUniqueName(rule *Rule) {
l.Lock()
defer l.Unlock()
idx := 1
base := rule.Name
for l.isUniqueName(rule.Name) == false {
idx++
rule.Name = fmt.Sprintf("%s-%d", base, idx)
}
}
func (l *Loader) sortRules() {
l.rulesKeys = make([]string, 0, len(l.rules))
for k := range l.rules {
l.rulesKeys = append(l.rulesKeys, k)
}
sort.Strings(l.rulesKeys)
}
func (l *Loader) addUserRule(rule *Rule) {
if rule.Duration == Once {
return
}
l.setUniqueName(rule)
l.replaceUserRule(rule)
}
func (l *Loader) replaceUserRule(rule *Rule) (err error) {
l.Lock()
oldRule, found := l.rules[rule.Name]
l.Unlock()
if found {
// If the rule has changed from Always (saved on disk) to !Always (temporary),
// we need to delete the rule from disk and keep it in memory.
l.deleteOldRuleFromDisk(oldRule, rule)
// delete loaded lists, if this is a rule of type Lists
l.cleanListsRule(oldRule)
}
if rule.Enabled {
if err := rule.Operator.Compile(); err != nil {
log.Warning("Operator.Compile() error: %s: %s", err, rule.Operator.Data)
return fmt.Errorf("(2) Error compiling rule: %s", err)
}
if rule.Operator.Type == List {
// TODO: use List protobuf object instead of un/marshalling to/from json
if err = json.Unmarshal([]byte(rule.Operator.Data), &rule.Operator.List); err != nil {
return fmt.Errorf("Error loading rule of type list: %s", err)
}
for i := 0; i < len(rule.Operator.List); i++ {
if err := rule.Operator.List[i].Compile(); err != nil {
log.Warning("Operator.Compile() error: %s: ", err)
return fmt.Errorf("(2) Error compiling list rule: %s", err)
}
}
}
}
l.Lock()
l.rules[rule.Name] = rule
l.sortRules()
l.Unlock()
if l.isTemporary(rule) {
err = l.scheduleTemporaryRule(*rule)
}
return err
}
func (l *Loader) scheduleTemporaryRule(rule Rule) error {
tTime, err := time.ParseDuration(string(rule.Duration))
if err != nil {
return err
}
time.AfterFunc(tTime, func() {
l.Lock()
defer l.Unlock()
log.Info("Temporary rule expired: %s - %s", rule.Name, rule.Duration)
if newRule, found := l.rules[rule.Name]; found {
if newRule.Duration != rule.Duration {
log.Debug("%s temporary rule expired, but has new Duration, old: %s, new: %s", rule.Name, rule.Duration, newRule.Duration)
return
}
delete(l.rules, rule.Name)
l.sortRules()
}
})
return nil
}
// Add adds a rule to the list of rules, and optionally saves it to disk.
func (l *Loader) Add(rule *Rule, saveToDisk bool) error {
l.addUserRule(rule)
if saveToDisk {
fileName := filepath.Join(l.path, fmt.Sprintf("%s.json", rule.Name))
return l.Save(rule, fileName)
}
return nil
}
// Replace adds a rule to the list of rules, and optionally saves it to disk.
func (l *Loader) Replace(rule *Rule, saveToDisk bool) error {
if err := l.replaceUserRule(rule); err != nil {
return err
}
if saveToDisk {
l.Lock()
defer l.Unlock()
fileName := filepath.Join(l.path, fmt.Sprintf("%s.json", rule.Name))
return l.Save(rule, fileName)
}
return nil
}
// Save a rule to disk.
func (l *Loader) Save(rule *Rule, path string) error {
rule.Updated = time.Now()
raw, err := json.MarshalIndent(rule, "", " ")
if err != nil {
return fmt.Errorf("Error while saving rule %s to %s: %s", rule, path, err)
}
if err = ioutil.WriteFile(path, raw, 0644); err != nil {
return fmt.Errorf("Error while saving rule %s to %s: %s", rule, path, err)
}
return nil
}
// Delete deletes a rule from the list by name.
// If the duration is Always (i.e: saved on disk), it'll attempt to delete
// it from disk.
func (l *Loader) Delete(ruleName string) error {
l.Lock()
defer l.Unlock()
rule := l.rules[ruleName]
if rule == nil {
return nil
}
l.cleanListsRule(rule)
delete(l.rules, ruleName)
l.sortRules()
if rule.Duration != Always {
return nil
}
log.Info("Delete() rule: %s", rule)
return l.deleteRuleFromDisk(ruleName)
}
// FindFirstMatch will try match the connection against the existing rule set.
func (l *Loader) FindFirstMatch(con *conman.Connection) (match *Rule) {
l.RLock()
defer l.RUnlock()
for _, idx := range l.rulesKeys {
rule, _ := l.rules[idx]
if rule.Enabled == false {
continue
}
if rule.Match(con) {
// We have a match.
// Save the rule in order to don't ask the user to take action,
// and keep iterating until a Deny or a Priority rule appears.
match = rule
if rule.Action == Reject || rule.Action == Deny || rule.Precedence == true {
return rule
}
}
}
return match
}
opensnitch-1.5.8.1/daemon/rule/loader_test.go 0000664 0000000 0000000 00000016724 14401326716 0021122 0 ustar 00root root 0000000 0000000 package rule
import (
"io"
"math/rand"
"os"
"testing"
"time"
)
var tmpDir string
func TestMain(m *testing.M) {
tmpDir = "/tmp/ostest_" + randString()
os.Mkdir(tmpDir, 0777)
defer os.RemoveAll(tmpDir)
os.Exit(m.Run())
}
func TestRuleLoader(t *testing.T) {
t.Parallel()
t.Log("Test rules loader")
var list []Operator
dur1s := Duration("1s")
dummyOper, _ := NewOperator(Simple, false, OpTrue, "", list)
dummyOper.Compile()
inMem1sRule := Create("000-xxx-name", true, false, Allow, dur1s, dummyOper)
inMemUntilRestartRule := Create("000-aaa-name", true, false, Allow, Restart, dummyOper)
l, err := NewLoader(false)
if err != nil {
t.Fail()
}
if err = l.Load("/non/existent/path/"); err == nil {
t.Error("non existent path test: err should not be nil")
}
if err = l.Load("testdata/"); err != nil {
t.Error("Error loading test rules: ", err)
}
testNumRules(t, l, 2)
if err = l.Add(inMem1sRule, false); err != nil {
t.Error("Error adding temporary rule")
}
testNumRules(t, l, 3)
// test auto deletion of temporary rule
time.Sleep(time.Second * 2)
testNumRules(t, l, 2)
if err = l.Add(inMemUntilRestartRule, false); err != nil {
t.Error("Error adding temporary rule (2)")
}
testNumRules(t, l, 3)
testRulesOrder(t, l)
testSortRules(t, l)
testFindMatch(t, l)
testFindEnabled(t, l)
testDurationChange(t, l)
}
func TestRuleLoaderInvalidRegexp(t *testing.T) {
t.Parallel()
t.Log("Test rules loader: invalid regexp")
l, err := NewLoader(true)
if err != nil {
t.Fail()
}
t.Run("loadRule() from disk test (simple)", func(t *testing.T) {
if err := l.loadRule("testdata/invalid-regexp.json"); err == nil {
t.Error("invalid regexp rule loaded: loadRule()")
}
})
t.Run("loadRule() from disk test (list)", func(t *testing.T) {
if err := l.loadRule("testdata/invalid-regexp-list.json"); err == nil {
t.Error("invalid regexp rule loaded: loadRule()")
}
})
var list []Operator
dur30m := Duration("30m")
opListData := `[{"type": "regexp", "operand": "process.path", "sensitive": false, "data": "^(/di(rmngr)$"}, {"type": "simple", "operand": "dest.port", "data": "53", "sensitive": false}]`
invalidRegexpOp, _ := NewOperator(List, false, OpList, opListData, list)
invalidRegexpRule := Create("invalid-regexp", true, false, Allow, dur30m, invalidRegexpOp)
t.Run("replaceUserRule() test list", func(t *testing.T) {
if err := l.replaceUserRule(invalidRegexpRule); err == nil {
t.Error("invalid regexp rule loaded: replaceUserRule()")
}
})
}
func TestLiveReload(t *testing.T) {
t.Parallel()
t.Log("Test rules loader with live reload")
l, err := NewLoader(true)
if err != nil {
t.Fail()
}
if err = Copy("testdata/000-allow-chrome.json", tmpDir+"/000-allow-chrome.json"); err != nil {
t.Error("Error copying rule into a temp dir")
}
if err = Copy("testdata/001-deny-chrome.json", tmpDir+"/001-deny-chrome.json"); err != nil {
t.Error("Error copying rule into a temp dir")
}
if err = l.Load(tmpDir); err != nil {
t.Error("Error loading test rules: ", err)
}
//wait for watcher to activate
time.Sleep(time.Second)
if err = Copy("testdata/live_reload/test-live-reload-remove.json", tmpDir+"/test-live-reload-remove.json"); err != nil {
t.Error("Error copying rules into temp dir")
}
if err = Copy("testdata/live_reload/test-live-reload-delete.json", tmpDir+"/test-live-reload-delete.json"); err != nil {
t.Error("Error copying rules into temp dir")
}
//wait for watcher to pick up the changes
time.Sleep(time.Second)
testNumRules(t, l, 4)
if err = os.Remove(tmpDir + "/test-live-reload-remove.json"); err != nil {
t.Error("Error Remove()ing file from temp dir")
}
if err = l.Delete("test-live-reload-delete"); err != nil {
t.Error("Error Delete()ing file from temp dir")
}
//wait for watcher to pick up the changes
time.Sleep(time.Second)
testNumRules(t, l, 2)
}
func randString() string {
rand.Seed(time.Now().UnixNano())
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
b := make([]rune, 10)
for i := range b {
b[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(b)
}
func Copy(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, in)
if err != nil {
return err
}
return out.Close()
}
func testNumRules(t *testing.T, l *Loader, num int) {
if l.NumRules() != num {
t.Error("rules number should be (2): ", num)
}
}
func testRulesOrder(t *testing.T, l *Loader) {
if l.rulesKeys[0] != "000-aaa-name" {
t.Error("Rules not in order (0): ", l.rulesKeys)
}
if l.rulesKeys[1] != "000-allow-chrome" {
t.Error("Rules not in order (1): ", l.rulesKeys)
}
if l.rulesKeys[2] != "001-deny-chrome" {
t.Error("Rules not in order (2): ", l.rulesKeys)
}
}
func testSortRules(t *testing.T, l *Loader) {
l.rulesKeys[1] = "001-deny-chrome"
l.rulesKeys[2] = "000-allow-chrome"
l.sortRules()
if l.rulesKeys[1] != "000-allow-chrome" {
t.Error("Rules not in order (1): ", l.rulesKeys)
}
if l.rulesKeys[2] != "001-deny-chrome" {
t.Error("Rules not in order (2): ", l.rulesKeys)
}
}
func testFindMatch(t *testing.T, l *Loader) {
conn.Process.Path = "/opt/google/chrome/chrome"
testFindPriorityMatch(t, l)
testFindDenyMatch(t, l)
testFindAllowMatch(t, l)
restoreConnection()
}
func testFindPriorityMatch(t *testing.T, l *Loader) {
match := l.FindFirstMatch(conn)
if match == nil {
t.Error("FindPriorityMatch didn't match")
}
// test 000-allow-chrome, priority == true
if match.Name != "000-allow-chrome" {
t.Error("findPriorityMatch: priority rule failed: ", match)
}
}
func testFindDenyMatch(t *testing.T, l *Loader) {
l.rules["000-allow-chrome"].Precedence = false
// test 000-allow-chrome, priority == false
// 001-deny-chrome must match
match := l.FindFirstMatch(conn)
if match == nil {
t.Error("FindDenyMatch deny didn't match")
}
if match.Name != "001-deny-chrome" {
t.Error("findDenyMatch: deny rule failed: ", match)
}
}
func testFindAllowMatch(t *testing.T, l *Loader) {
l.rules["000-allow-chrome"].Precedence = false
l.rules["001-deny-chrome"].Action = Allow
// test 000-allow-chrome, priority == false
// 001-deny-chrome must match
match := l.FindFirstMatch(conn)
if match == nil {
t.Error("FindAllowMatch allow didn't match")
}
if match.Name != "001-deny-chrome" {
t.Error("findAllowMatch: allow rule failed: ", match)
}
}
func testFindEnabled(t *testing.T, l *Loader) {
l.rules["000-allow-chrome"].Precedence = false
l.rules["001-deny-chrome"].Action = Allow
l.rules["001-deny-chrome"].Enabled = false
// test 000-allow-chrome, priority == false
// 001-deny-chrome must match
match := l.FindFirstMatch(conn)
if match == nil {
t.Error("FindEnabledMatch, match nil")
}
if match.Name == "001-deny-chrome" {
t.Error("findEnabledMatch: deny rule shouldn't have matched: ", match)
}
}
// test that changing the Duration of a temporary rule doesn't delete
// the new one, ignoring the old timer.
func testDurationChange(t *testing.T, l *Loader) {
l.rules["000-aaa-name"].Duration = "2s"
if err := l.replaceUserRule(l.rules["000-aaa-name"]); err != nil {
t.Error("testDurationChange, error replacing rule: ", err)
}
l.rules["000-aaa-name"].Duration = "1h"
if err := l.replaceUserRule(l.rules["000-aaa-name"]); err != nil {
t.Error("testDurationChange, error replacing rule: ", err)
}
time.Sleep(time.Second * 4)
if _, found := l.rules["000-aaa-name"]; !found {
t.Error("testDurationChange, error: rule has been deleted")
}
}
opensnitch-1.5.8.1/daemon/rule/operator.go 0000664 0000000 0000000 00000016652 14401326716 0020450 0 ustar 00root root 0000000 0000000 package rule
import (
"fmt"
"net"
"reflect"
"regexp"
"strings"
"sync"
"github.com/evilsocket/opensnitch/daemon/conman"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/log"
)
// Type is the type of rule.
// Every type has its own way of checking the user data against connections.
type Type string
// Sensitive defines if a rule is case-sensitive or not. By default no.
type Sensitive bool
// Operand is what we check on a connection.
type Operand string
// Available types
const (
Simple = Type("simple")
Regexp = Type("regexp")
Complex = Type("complex") // for future use
List = Type("list")
Network = Type("network")
Lists = Type("lists")
)
// Available operands
const (
OpTrue = Operand("true")
OpProcessID = Operand("process.id")
OpProcessPath = Operand("process.path")
OpProcessCmd = Operand("process.command")
OpProcessEnvPrefix = Operand("process.env.")
OpProcessEnvPrefixLen = 12
OpUserID = Operand("user.id")
OpDstIP = Operand("dest.ip")
OpDstHost = Operand("dest.host")
OpDstPort = Operand("dest.port")
OpDstNetwork = Operand("dest.network")
OpProto = Operand("protocol")
OpList = Operand("list")
OpDomainsLists = Operand("lists.domains")
OpDomainsRegexpLists = Operand("lists.domains_regexp")
OpIPLists = Operand("lists.ips")
OpNetLists = Operand("lists.nets")
)
type opCallback func(value interface{}) bool
// Operator represents what we want to filter of a connection, and how.
type Operator struct {
Type Type `json:"type"`
Operand Operand `json:"operand"`
Sensitive Sensitive `json:"sensitive"`
Data string `json:"data"`
List []Operator `json:"list"`
sync.RWMutex
cb opCallback
re *regexp.Regexp
netMask *net.IPNet
isCompiled bool
lists map[string]interface{}
listsMonitorRunning bool
exitMonitorChan chan (bool)
}
// NewOperator returns a new operator object
func NewOperator(t Type, s Sensitive, o Operand, data string, list []Operator) (*Operator, error) {
op := Operator{
Type: t,
Sensitive: s,
Operand: o,
Data: data,
List: list,
}
return &op, nil
}
// Compile translates the operator type field to its callback counterpart
func (o *Operator) Compile() error {
if o.isCompiled {
return nil
}
if o.Type == Simple {
o.cb = o.simpleCmp
} else if o.Type == Regexp {
o.cb = o.reCmp
if o.Sensitive == false {
o.Data = strings.ToLower(o.Data)
}
re, err := regexp.Compile(o.Data)
if err != nil {
return err
}
o.re = re
} else if o.Operand == OpDomainsLists {
if o.Data == "" {
return fmt.Errorf("Operand lists is empty, nothing to load: %s", o)
}
o.loadLists()
o.cb = o.domainsListCmp
} else if o.Operand == OpDomainsRegexpLists {
if o.Data == "" {
return fmt.Errorf("Operand regexp lists is empty, nothing to load: %s", o)
}
o.loadLists()
o.cb = o.reListCmp
} else if o.Operand == OpIPLists {
if o.Data == "" {
return fmt.Errorf("Operand ip lists is empty, nothing to load: %s", o)
}
o.loadLists()
o.cb = o.ipListCmp
} else if o.Operand == OpNetLists {
if o.Data == "" {
return fmt.Errorf("Operand net lists is empty, nothing to load: %s", o)
}
o.loadLists()
o.cb = o.ipNetCmp
} else if o.Type == List {
o.Operand = OpList
} else if o.Type == Network {
var err error
_, o.netMask, err = net.ParseCIDR(o.Data)
if err != nil {
return err
}
o.cb = o.cmpNetwork
}
log.Debug("Operator compiled: %s", o)
o.isCompiled = true
return nil
}
func (o *Operator) String() string {
how := "is"
if o.Type == Regexp {
how = "matches"
}
return fmt.Sprintf("%s %s '%s'", log.Bold(string(o.Operand)), how, log.Yellow(string(o.Data)))
}
func (o *Operator) simpleCmp(v interface{}) bool {
if o.Sensitive == false {
return strings.EqualFold(v.(string), o.Data)
}
return v == o.Data
}
func (o *Operator) reCmp(v interface{}) bool {
if vt := reflect.ValueOf(v).Kind(); vt != reflect.String {
log.Warning("Operator.reCmp() bad interface type: %T", v)
return false
}
if o.Sensitive == false {
v = strings.ToLower(v.(string))
}
return o.re.MatchString(v.(string))
}
func (o *Operator) cmpNetwork(destIP interface{}) bool {
// 192.0.2.1/24, 2001:db8:a0b:12f0::1/32
if o.netMask == nil {
log.Warning("cmpNetwork() NULL: %s", destIP)
return false
}
return o.netMask.Contains(destIP.(net.IP))
}
func (o *Operator) domainsListCmp(v interface{}) bool {
dstHost := v.(string)
if dstHost == "" {
return false
}
if o.Sensitive == false {
dstHost = strings.ToLower(dstHost)
}
o.RLock()
defer o.RUnlock()
if _, found := o.lists[dstHost]; found {
log.Debug("%s: %s, %s", log.Red("domain list match"), dstHost, o.lists[dstHost])
return true
}
return false
}
func (o *Operator) ipListCmp(v interface{}) bool {
dstIP := v.(string)
if dstIP == "" {
return false
}
o.RLock()
defer o.RUnlock()
if _, found := o.lists[dstIP]; found {
log.Debug("%s: %s, %s", log.Red("IP list match"), dstIP, o.lists[dstIP].(string))
return true
}
return false
}
func (o *Operator) ipNetCmp(dstIP interface{}) bool {
o.RLock()
defer o.RUnlock()
for host, netMask := range o.lists {
n := netMask.(*net.IPNet)
if n.Contains(dstIP.(net.IP)) {
log.Debug("%s: %s, %s", log.Red("Net list match"), dstIP, host)
return true
}
}
return false
}
func (o *Operator) reListCmp(v interface{}) bool {
dstHost := v.(string)
if dstHost == "" {
return false
}
if o.Sensitive == false {
dstHost = strings.ToLower(dstHost)
}
o.RLock()
defer o.RUnlock()
for file, re := range o.lists {
r := re.(*regexp.Regexp)
if r.MatchString(dstHost) {
log.Debug("%s: %s, %s", log.Red("Regexp list match"), dstHost, file)
return true
}
}
return false
}
func (o *Operator) listMatch(con interface{}) bool {
res := true
for i := 0; i < len(o.List); i++ {
res = res && o.List[i].Match(con.(*conman.Connection))
}
return res
}
// Match tries to match parts of a connection with the given operator.
func (o *Operator) Match(con *conman.Connection) bool {
if o.Operand == OpTrue {
return true
} else if o.Operand == OpList {
return o.listMatch(con)
} else if o.Operand == OpProcessPath {
return o.cb(con.Process.Path)
} else if o.Operand == OpProcessCmd {
return o.cb(strings.Join(con.Process.Args, " "))
} else if o.Operand == OpDstHost && con.DstHost != "" {
return o.cb(con.DstHost)
} else if o.Operand == OpDstIP {
return o.cb(con.DstIP.String())
} else if o.Operand == OpDstPort {
return o.cb(fmt.Sprintf("%d", con.DstPort))
} else if o.Operand == OpUserID {
return o.cb(fmt.Sprintf("%d", con.Entry.UserId))
} else if o.Operand == OpProcessID {
return o.cb(fmt.Sprint(con.Process.ID))
} else if o.Operand == OpDomainsLists {
return o.cb(con.DstHost)
} else if o.Operand == OpIPLists {
return o.cb(con.DstIP.String())
} else if o.Operand == OpDstNetwork {
return o.cb(con.DstIP)
} else if o.Operand == OpNetLists {
return o.cb(con.DstIP)
} else if o.Operand == OpDomainsRegexpLists {
return o.cb(con.DstHost)
} else if o.Operand == OpProto {
return o.cb(con.Protocol)
} else if strings.HasPrefix(string(o.Operand), string(OpProcessEnvPrefix)) {
envVarName := core.Trim(string(o.Operand[OpProcessEnvPrefixLen:]))
envVarValue, _ := con.Process.Env[envVarName]
return o.cb(envVarValue)
}
return false
}
opensnitch-1.5.8.1/daemon/rule/operator_lists.go 0000664 0000000 0000000 00000014673 14401326716 0021667 0 ustar 00root root 0000000 0000000 package rule
import (
"fmt"
"io/ioutil"
"net"
"path/filepath"
"regexp"
"runtime/debug"
"strings"
"time"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/log"
)
func (o *Operator) monitorLists() {
log.Info("monitor lists started: %s", o.Data)
modTimes := make(map[string]time.Time)
totalFiles := 0
needReload := false
numFiles := 0
expr := filepath.Join(o.Data, "/*.*")
for {
select {
case <-o.exitMonitorChan:
goto Exit
default:
fileList, err := filepath.Glob(expr)
if err != nil {
log.Warning("Error reading directory of domains list: %s, %s", o.Data, err)
goto Exit
}
numFiles = 0
for _, filename := range fileList {
// ignore hidden files
name := filepath.Base(filename)
if name[:1] == "." {
delete(modTimes, filename)
continue
}
// an overwrite operation performs two tasks: truncate the file and save the new content,
// causing the file time to be modified twice.
modTime, err := core.GetFileModTime(filename)
if err != nil {
log.Debug("deleting saved mod time due to error reading the list, %s", filename)
delete(modTimes, filename)
} else if lastModTime, found := modTimes[filename]; found {
if lastModTime.Equal(modTime) == false {
log.Debug("list changed: %s, %s, %s", lastModTime, modTime, filename)
needReload = true
}
}
modTimes[filename] = modTime
numFiles++
}
fileList = nil
if numFiles != totalFiles {
needReload = true
}
totalFiles = numFiles
if needReload {
// we can't reload a single list, because the domains of all lists are added to the same map.
// we could have the domains separated by lists/files, but then we'd need to iterate the map in order
// to match a domain. Reloading the lists shoud only occur once a day.
if err := o.readLists(); err != nil {
log.Warning("%s", err)
}
needReload = false
}
time.Sleep(4 * time.Second)
}
}
Exit:
modTimes = nil
o.ClearLists()
log.Info("lists monitor stopped")
}
// ClearLists deletes all the entries of a list
func (o *Operator) ClearLists() {
o.Lock()
defer o.Unlock()
log.Info("clearing domains lists: %d - %s", len(o.lists), o.Data)
for k := range o.lists {
delete(o.lists, k)
}
debug.FreeOSMemory()
}
// StopMonitoringLists stops the monitoring lists goroutine.
func (o *Operator) StopMonitoringLists() {
if o.listsMonitorRunning == true {
o.exitMonitorChan <- true
o.exitMonitorChan = nil
o.listsMonitorRunning = false
}
}
func (o *Operator) readDomainsList(raw, fileName string) (dups uint64) {
log.Debug("Loading domains list: %s, size: %d", fileName, len(raw))
lines := strings.Split(string(raw), "\n")
for _, domain := range lines {
if len(domain) < 9 {
continue
}
// exclude not valid lines
if domain[:7] != "0.0.0.0" && domain[:9] != "127.0.0.1" {
continue
}
host := domain[8:]
// exclude localhost entries
if domain[:9] == "127.0.0.1" {
host = domain[10:]
}
if host == "local" || host == "localhost" || host == "localhost.localdomain" || host == "broadcasthost" {
continue
}
host = core.Trim(host)
if _, found := o.lists[host]; found {
dups++
continue
}
o.lists[host] = fileName
}
lines = nil
log.Info("%d domains loaded, %s", len(o.lists), fileName)
return dups
}
func (o *Operator) readNetList(raw, fileName string) (dups uint64) {
log.Debug("Loading nets list: %s, size: %d", fileName, len(raw))
lines := strings.Split(string(raw), "\n")
for _, line := range lines {
if line == "" || line[0] == '#' {
continue
}
host := core.Trim(line)
if _, found := o.lists[host]; found {
dups++
continue
}
_, netMask, err := net.ParseCIDR(host)
if err != nil {
log.Warning("Error parsing net from list: %s, (%s)", err, fileName)
continue
}
o.lists[host] = netMask
}
lines = nil
log.Info("%d nets loaded, %s", len(o.lists), fileName)
return dups
}
func (o *Operator) readRegexpList(raw, fileName string) (dups uint64) {
log.Debug("Loading regexp list: %s, size: %d", fileName, len(raw))
lines := strings.Split(string(raw), "\n")
for n, line := range lines {
if line == "" || line[0] == '#' {
continue
}
host := core.Trim(line)
if _, found := o.lists[host]; found {
dups++
continue
}
re, err := regexp.Compile(line)
if err != nil {
log.Warning("Error compiling regexp from list: %s, (%d:%s)", err, n, fileName)
continue
}
o.lists[line] = re
}
lines = nil
log.Info("%d regexps loaded, %s", len(o.lists), fileName)
return dups
}
func (o *Operator) readIPList(raw, fileName string) (dups uint64) {
log.Debug("Loading IPs list: %s, size: %d", fileName, len(raw))
lines := strings.Split(string(raw), "\n")
for _, line := range lines {
if line == "" || line[0] == '#' {
continue
}
ip := core.Trim(line)
if _, found := o.lists[ip]; found {
dups++
continue
}
o.lists[ip] = fileName
}
lines = nil
log.Info("%d IPs loaded, %s", len(o.lists), fileName)
return dups
}
func (o *Operator) readLists() error {
o.ClearLists()
var dups uint64
// this list is particular to this operator and rule
o.Lock()
defer o.Unlock()
o.lists = make(map[string]interface{})
expr := filepath.Join(o.Data, "*.*")
fileList, err := filepath.Glob(expr)
if err != nil {
return fmt.Errorf("Error loading domains lists '%s': %s", expr, err)
}
for _, fileName := range fileList {
// ignore hidden files
name := filepath.Base(fileName)
if name[:1] == "." {
continue
}
raw, err := ioutil.ReadFile(fileName)
if err != nil {
log.Warning("Error reading list of IPs (%s): %s", fileName, err)
continue
}
if o.Operand == OpDomainsLists {
dups += o.readDomainsList(string(raw), fileName)
} else if o.Operand == OpDomainsRegexpLists {
dups += o.readRegexpList(string(raw), fileName)
} else if o.Operand == OpNetLists {
dups += o.readNetList(string(raw), fileName)
} else if o.Operand == OpIPLists {
dups += o.readIPList(string(raw), fileName)
} else {
log.Warning("Unknown lists operand type: %s", o.Operand)
}
}
log.Info("%d lists loaded, %d domains, %d duplicated", len(fileList), len(o.lists), dups)
return nil
}
func (o *Operator) loadLists() {
log.Info("loading domains lists: %s, %s, %s", o.Type, o.Operand, o.Data)
// when loading from disk, we don't use the Operator's constructor, so we need to create this channel
if o.exitMonitorChan == nil {
o.exitMonitorChan = make(chan bool)
o.listsMonitorRunning = true
go o.monitorLists()
}
}
opensnitch-1.5.8.1/daemon/rule/operator_test.go 0000664 0000000 0000000 00000053132 14401326716 0021501 0 ustar 00root root 0000000 0000000 package rule
import (
"encoding/json"
"fmt"
"net"
"testing"
"time"
"github.com/evilsocket/opensnitch/daemon/conman"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/netstat"
"github.com/evilsocket/opensnitch/daemon/procmon"
)
var (
defaultProcPath = "/usr/bin/opensnitchd"
defaultProcArgs = "-rules-path /etc/opensnitchd/rules/"
defaultDstHost = "opensnitch.io"
defaultDstPort = uint(443)
defaultDstIP = "185.53.178.14"
defaultUserID = 666
netEntry = &netstat.Entry{
UserId: defaultUserID,
}
proc = &procmon.Process{
ID: 12345,
Path: defaultProcPath,
Args: []string{"-rules-path", "/etc/opensnitchd/rules/"},
}
conn = &conman.Connection{
Protocol: "TCP",
SrcPort: 66666,
SrcIP: net.ParseIP("192.168.1.111"),
DstIP: net.ParseIP(defaultDstIP),
DstPort: defaultDstPort,
DstHost: defaultDstHost,
Process: proc,
Entry: netEntry,
}
)
func compileListOperators(list *[]Operator, t *testing.T) {
op := *list
for i := 0; i < len(*list); i++ {
if err := op[i].Compile(); err != nil {
t.Error("NewOperator List, Compile() subitem error:", err)
}
}
}
func unmarshalListData(data string, t *testing.T) (op *[]Operator) {
if err := json.Unmarshal([]byte(data), &op); err != nil {
t.Error("Error unmarshalling list data:", err, data)
return nil
}
return op
}
func restoreConnection() {
conn.Process.Path = defaultProcPath
conn.DstHost = defaultDstHost
conn.DstPort = defaultDstPort
conn.Entry.UserId = defaultUserID
}
func TestNewOperatorSimple(t *testing.T) {
t.Log("Test NewOperator() simple")
var list []Operator
opSimple, err := NewOperator(Simple, false, OpTrue, "", list)
if err != nil {
t.Error("NewOperator simple.err should be nil: ", err)
t.Fail()
}
if err = opSimple.Compile(); err != nil {
t.Fail()
}
if opSimple.Match(nil) == false {
t.Error("Test NewOperator() simple.case-insensitive doesn't match")
t.Fail()
}
t.Run("Operator Simple proc.id", func(t *testing.T) {
// proc.id not sensitive
opSimple, err = NewOperator(Simple, false, OpProcessID, "12345", list)
if err != nil {
t.Error("NewOperator simple.case-insensitive.proc.id err should be nil: ", err)
t.Fail()
}
if err = opSimple.Compile(); err != nil {
t.Error("NewOperator simple.case-insensitive.proc.id Compile() err:", err)
t.Fail()
}
if opSimple.Match(conn) == false {
t.Error("Test NewOperator() simple proc.id doesn't match")
t.Fail()
}
})
opSimple, err = NewOperator(Simple, false, OpProcessPath, defaultProcPath, list)
t.Run("Operator Simple proc.path case-insensitive", func(t *testing.T) {
// proc path not sensitive
if err != nil {
t.Error("NewOperator simple proc.path err should be nil: ", err)
t.Fail()
}
if err = opSimple.Compile(); err != nil {
t.Error("NewOperator simple.case-insensitive.proc.path Compile() err:", err)
t.Fail()
}
if opSimple.Match(conn) == false {
t.Error("Test NewOperator() simple proc.path doesn't match")
t.Fail()
}
})
t.Run("Operator Simple proc.path sensitive", func(t *testing.T) {
// proc path sensitive
opSimple.Sensitive = true
conn.Process.Path = "/usr/bin/OpenSnitchd"
if opSimple.Match(conn) == true {
t.Error("Test NewOperator() simple proc.path sensitive match")
t.Fail()
}
})
opSimple, err = NewOperator(Simple, false, OpDstHost, defaultDstHost, list)
t.Run("Operator Simple con.dstHost case-insensitive", func(t *testing.T) {
// proc dst host not sensitive
if err != nil {
t.Error("NewOperator simple proc.path err should be nil: ", err)
t.Fail()
}
if err = opSimple.Compile(); err != nil {
t.Error("NewOperator simple.case-insensitive.dstHost Compile() err:", err)
t.Fail()
}
if opSimple.Match(conn) == false {
t.Error("Test NewOperator() simple.conn.dstHost.not-sensitive doesn't match")
t.Fail()
}
})
t.Run("Operator Simple con.dstHost case-insensitive different host", func(t *testing.T) {
conn.DstHost = "www.opensnitch.io"
if opSimple.Match(conn) == true {
t.Error("Test NewOperator() simple.conn.dstHost.not-sensitive doesn't MATCH")
t.Fail()
}
})
t.Run("Operator Simple con.dstHost sensitive", func(t *testing.T) {
// proc dst host sensitive
opSimple, err = NewOperator(Simple, true, OpDstHost, "OpEnsNitCh.io", list)
if err != nil {
t.Error("NewOperator simple.dstHost.sensitive err should be nil: ", err)
t.Fail()
}
if err = opSimple.Compile(); err != nil {
t.Error("NewOperator simple.dstHost.sensitive Compile() err:", err)
t.Fail()
}
conn.DstHost = "OpEnsNitCh.io"
if opSimple.Match(conn) == false {
t.Error("Test NewOperator() simple.dstHost.sensitive doesn't match")
t.Fail()
}
})
t.Run("Operator Simple proc.args case-insensitive", func(t *testing.T) {
// proc args case-insensitive
opSimple, err = NewOperator(Simple, false, OpProcessCmd, defaultProcArgs, list)
if err != nil {
t.Error("NewOperator simple proc.args err should be nil: ", err)
t.Fail()
}
if err = opSimple.Compile(); err != nil {
t.Error("NewOperator simple proc.args Compile() err: ", err)
t.Fail()
}
if opSimple.Match(conn) == false {
t.Error("Test NewOperator() simple proc.args doesn't match")
t.Fail()
}
})
t.Run("Operator Simple con.dstIp case-insensitive", func(t *testing.T) {
// proc dstIp case-insensitive
opSimple, err = NewOperator(Simple, false, OpDstIP, defaultDstIP, list)
if err != nil {
t.Error("NewOperator simple conn.dstip.err should be nil: ", err)
t.Fail()
}
if err = opSimple.Compile(); err != nil {
t.Error("NewOperator simple con.dstIp Compile() err: ", err)
t.Fail()
}
if opSimple.Match(conn) == false {
t.Error("Test NewOperator() simple conn.dstip doesn't match")
t.Fail()
}
})
t.Run("Operator Simple UserId case-insensitive", func(t *testing.T) {
// conn.uid case-insensitive
opSimple, err = NewOperator(Simple, false, OpUserID, fmt.Sprint(defaultUserID), list)
if err != nil {
t.Error("NewOperator simple conn.userid.err should be nil: ", err)
t.Fail()
}
if err = opSimple.Compile(); err != nil {
t.Error("NewOperator simple UserId Compile() err: ", err)
t.Fail()
}
if opSimple.Match(conn) == false {
t.Error("Test NewOperator() simple conn.userid doesn't match")
t.Fail()
}
})
restoreConnection()
}
func TestNewOperatorNetwork(t *testing.T) {
t.Log("Test NewOperator() network")
var dummyList []Operator
opSimple, err := NewOperator(Network, false, OpDstNetwork, "185.53.178.14/24", dummyList)
if err != nil {
t.Error("NewOperator network.err should be nil: ", err)
t.Fail()
}
if err = opSimple.Compile(); err != nil {
t.Fail()
}
if opSimple.Match(conn) == false {
t.Error("Test NewOperator() network doesn't match")
t.Fail()
}
opSimple, err = NewOperator(Network, false, OpDstNetwork, "8.8.8.8/24", dummyList)
if err != nil {
t.Error("NewOperator network.err should be nil: ", err)
t.Fail()
}
if err = opSimple.Compile(); err != nil {
t.Fail()
}
if opSimple.Match(conn) == true {
t.Error("Test NewOperator() network doesn't match:", conn.DstIP)
t.Fail()
}
restoreConnection()
}
func TestNewOperatorRegexp(t *testing.T) {
t.Log("Test NewOperator() regexp")
var dummyList []Operator
opRE, err := NewOperator(Regexp, false, OpProto, "^TCP$", dummyList)
if err != nil {
t.Error("NewOperator regexp.err should be nil: ", err)
t.Fail()
}
if err = opRE.Compile(); err != nil {
t.Fail()
}
if opRE.Match(conn) == false {
t.Error("Test NewOperator() regexp doesn't match")
t.Fail()
}
restoreConnection()
}
func TestNewOperatorInvalidRegexp(t *testing.T) {
t.Log("Test NewOperator() invalid regexp")
var dummyList []Operator
opRE, err := NewOperator(Regexp, false, OpProto, "^TC(P$", dummyList)
if err != nil {
t.Error("NewOperator regexp.err should be nil: ", err)
t.Fail()
}
if err = opRE.Compile(); err == nil {
t.Error("NewOperator() invalid regexp. It should fail: ", err)
t.Fail()
}
restoreConnection()
}
func TestNewOperatorRegexpSensitive(t *testing.T) {
t.Log("Test NewOperator() regexp sensitive")
var dummyList []Operator
var sensitive Sensitive
sensitive = true
conn.Process.Path = "/tmp/cUrL"
opRE, err := NewOperator(Regexp, sensitive, OpProcessPath, "^/tmp/cUrL$", dummyList)
if err != nil {
t.Error("NewOperator regexp.case-sensitive.err should be nil: ", err)
t.Fail()
}
if err = opRE.Compile(); err != nil {
t.Fail()
}
if opRE.Match(conn) == false {
t.Error("Test NewOperator() RE sensitive doesn't match:", conn.Process.Path)
t.Fail()
}
t.Run("Operator regexp proc.path case-sensitive", func(t *testing.T) {
conn.Process.Path = "/tmp/curl"
if opRE.Match(conn) == true {
t.Error("Test NewOperator() RE sensitive match:", conn.Process.Path)
t.Fail()
}
})
opRE, err = NewOperator(Regexp, !sensitive, OpProcessPath, "^/tmp/cUrL$", dummyList)
if err != nil {
t.Error("NewOperator regexp.case-insensitive.err should be nil: ", err)
t.Fail()
}
if err = opRE.Compile(); err != nil {
t.Fail()
}
if opRE.Match(conn) == false {
t.Error("Test NewOperator() RE not sensitive match:", conn.Process.Path)
t.Fail()
}
restoreConnection()
}
func TestNewOperatorList(t *testing.T) {
t.Log("Test NewOperator() List")
var list []Operator
listData := `[{"type": "simple", "operand": "dest.ip", "data": "185.53.178.14", "sensitive": false}, {"type": "simple", "operand": "dest.port", "data": "443", "sensitive": false}]`
// simple list
opList, err := NewOperator(List, false, OpProto, listData, list)
t.Run("Operator List simple case-insensitive", func(t *testing.T) {
if err != nil {
t.Error("NewOperator list.regexp.err should be nil: ", err)
t.Fail()
}
if err = opList.Compile(); err != nil {
t.Fail()
}
opList.List = *unmarshalListData(opList.Data, t)
compileListOperators(&opList.List, t)
if opList.Match(conn) == false {
t.Error("Test NewOperator() list simple doesn't match")
t.Fail()
}
})
t.Run("Operator List regexp case-insensitive", func(t *testing.T) {
// list with regexp, case-insensitive
listData = `[{"type": "regexp", "operand": "process.path", "data": "^/usr/bin/.*", "sensitive": false},{"type": "simple", "operand": "dest.ip", "data": "185.53.178.14", "sensitive": false}, {"type": "simple", "operand": "dest.port", "data": "443", "sensitive": false}]`
opList.List = *unmarshalListData(listData, t)
compileListOperators(&opList.List, t)
if err = opList.Compile(); err != nil {
t.Fail()
}
if opList.Match(conn) == false {
t.Error("Test NewOperator() list regexp doesn't match")
t.Fail()
}
})
t.Run("Operator List regexp case-sensitive", func(t *testing.T) {
// list with regexp, case-sensitive
// "data": "^/usr/BiN/.*" must match conn.Process.Path (sensitive)
listData = `[{"type": "regexp", "operand": "process.path", "data": "^/usr/BiN/.*", "sensitive": false},{"type": "simple", "operand": "dest.ip", "data": "185.53.178.14", "sensitive": false}, {"type": "simple", "operand": "dest.port", "data": "443", "sensitive": false}]`
opList.List = *unmarshalListData(listData, t)
compileListOperators(&opList.List, t)
conn.Process.Path = "/usr/BiN/opensnitchd"
opList.Sensitive = true
if err = opList.Compile(); err != nil {
t.Fail()
}
if opList.Match(conn) == false {
t.Error("Test NewOperator() list.regexp.sensitive doesn't match:", conn.Process.Path)
t.Fail()
}
})
t.Run("Operator List regexp case-insensitive 2", func(t *testing.T) {
// "data": "^/usr/BiN/.*" must not match conn.Process.Path (insensitive)
opList.Sensitive = false
conn.Process.Path = "/USR/BiN/opensnitchd"
if err = opList.Compile(); err != nil {
t.Fail()
}
if opList.Match(conn) == false {
t.Error("Test NewOperator() list.regexp.insensitive match:", conn.Process.Path)
t.Fail()
}
})
t.Run("Operator List regexp case-insensitive 3", func(t *testing.T) {
// "data": "^/usr/BiN/.*" must match conn.Process.Path (insensitive)
opList.Sensitive = false
conn.Process.Path = "/USR/bin/opensnitchd"
if err = opList.Compile(); err != nil {
t.Fail()
}
if opList.Match(conn) == false {
t.Error("Test NewOperator() list.regexp.insensitive match:", conn.Process.Path)
t.Fail()
}
})
restoreConnection()
}
func TestNewOperatorListsSimple(t *testing.T) {
t.Log("Test NewOperator() Lists simple")
var dummyList []Operator
opLists, err := NewOperator(Lists, false, OpDomainsLists, "testdata/lists/domains/", dummyList)
if err != nil {
t.Error("NewOperator Lists, shouldn't be nil: ", err)
t.Fail()
}
if err = opLists.Compile(); err != nil {
t.Error("NewOperator Lists, Compile() error:", err)
}
time.Sleep(time.Second)
t.Log("testing Lists, DstHost:", conn.DstHost)
// The list contains 4 lines, 1 is a comment and there's a domain duplicated.
// We should only load lines that start with 0.0.0.0 or 127.0.0.1
if len(opLists.lists) != 2 {
t.Error("NewOperator Lists, number of domains error:", opLists.lists, len(opLists.lists))
}
if opLists.Match(conn) == false {
t.Error("Test NewOperator() lists doesn't match")
}
opLists.StopMonitoringLists()
time.Sleep(time.Second)
opLists.Lock()
if len(opLists.lists) != 0 {
t.Error("NewOperator Lists, number should be 0 after stop:", opLists.lists, len(opLists.lists))
}
opLists.Unlock()
restoreConnection()
}
func TestNewOperatorListsIPs(t *testing.T) {
t.Log("Test NewOperator() Lists domains_regexp")
var subOp *Operator
var list []Operator
listData := `[{"type": "simple", "operand": "user.id", "data": "666", "sensitive": false}, {"type": "lists", "operand": "lists.ips", "data": "testdata/lists/ips/", "sensitive": false}]`
opLists, err := NewOperator(List, false, OpList, listData, list)
if err != nil {
t.Error("NewOperator Lists domains_regexp, shouldn't be nil: ", err)
t.Fail()
}
if err := opLists.Compile(); err != nil {
t.Error("NewOperator Lists domains_regexp, Compile() error:", err)
}
opLists.List = *unmarshalListData(opLists.Data, t)
for i := 0; i < len(opLists.List); i++ {
if err := opLists.List[i].Compile(); err != nil {
t.Error("NewOperator Lists domains_regexp, Compile() subitem error:", err)
}
if opLists.List[i].Type == Lists {
subOp = &opLists.List[i]
}
}
time.Sleep(time.Second)
if opLists.Match(conn) == false {
t.Error("Test NewOperator() Lists domains_regexp, doesn't match:", conn.DstHost)
}
subOp.Lock()
listslen := len(subOp.lists)
subOp.Unlock()
if listslen != 2 {
t.Error("NewOperator Lists domains_regexp, number of domains error:", subOp.lists)
}
//t.Log("checking lists.domains_regexp:", tries, conn.DstHost)
if opLists.Match(conn) == false {
// we don't care about if it matches, we're testing race conditions
t.Log("Test NewOperator() Lists domains_regexp, doesn't match:", conn.DstHost)
}
subOp.StopMonitoringLists()
time.Sleep(time.Second)
subOp.Lock()
if len(subOp.lists) != 0 {
t.Error("NewOperator Lists number should be 0:", subOp.lists, len(subOp.lists))
}
subOp.Unlock()
restoreConnection()
}
func TestNewOperatorListsNETs(t *testing.T) {
t.Log("Test NewOperator() Lists domains_regexp")
var subOp *Operator
var list []Operator
listData := `[{"type": "simple", "operand": "user.id", "data": "666", "sensitive": false}, {"type": "lists", "operand": "lists.nets", "data": "testdata/lists/nets/", "sensitive": false}]`
opLists, err := NewOperator(List, false, OpList, listData, list)
if err != nil {
t.Error("NewOperator Lists domains_regexp, shouldn't be nil: ", err)
t.Fail()
}
if err := opLists.Compile(); err != nil {
t.Error("NewOperator Lists domains_regexp, Compile() error:", err)
}
opLists.List = *unmarshalListData(opLists.Data, t)
for i := 0; i < len(opLists.List); i++ {
if err := opLists.List[i].Compile(); err != nil {
t.Error("NewOperator Lists domains_regexp, Compile() subitem error:", err)
}
if opLists.List[i].Type == Lists {
subOp = &opLists.List[i]
}
}
time.Sleep(time.Second)
if opLists.Match(conn) == false {
t.Error("Test NewOperator() Lists domains_regexp, doesn't match:", conn.DstHost)
}
subOp.Lock()
listslen := len(subOp.lists)
subOp.Unlock()
if listslen != 2 {
t.Error("NewOperator Lists domains_regexp, number of domains error:", subOp.lists)
}
//t.Log("checking lists.domains_regexp:", tries, conn.DstHost)
if opLists.Match(conn) == false {
// we don't care about if it matches, we're testing race conditions
t.Log("Test NewOperator() Lists domains_regexp, doesn't match:", conn.DstHost)
}
subOp.StopMonitoringLists()
time.Sleep(time.Second)
subOp.Lock()
if len(subOp.lists) != 0 {
t.Error("NewOperator Lists number should be 0:", subOp.lists, len(subOp.lists))
}
subOp.Unlock()
restoreConnection()
}
func TestNewOperatorListsComplex(t *testing.T) {
t.Log("Test NewOperator() Lists complex")
var subOp *Operator
var list []Operator
listData := `[{"type": "simple", "operand": "user.id", "data": "666", "sensitive": false}, {"type": "lists", "operand": "lists.domains", "data": "testdata/lists/domains/", "sensitive": false}]`
opLists, err := NewOperator(List, false, OpList, listData, list)
if err != nil {
t.Error("NewOperator Lists complex, shouldn't be nil: ", err)
t.Fail()
}
if err := opLists.Compile(); err != nil {
t.Error("NewOperator Lists complex, Compile() error:", err)
}
opLists.List = *unmarshalListData(opLists.Data, t)
for i := 0; i < len(opLists.List); i++ {
if err := opLists.List[i].Compile(); err != nil {
t.Error("NewOperator Lists complex, Compile() subitem error:", err)
}
if opLists.List[i].Type == Lists {
subOp = &opLists.List[i]
}
}
time.Sleep(time.Second)
subOp.Lock()
if len(subOp.lists) != 2 {
t.Error("NewOperator Lists complex, number of domains error:", subOp.lists)
}
subOp.Unlock()
if opLists.Match(conn) == false {
t.Error("Test NewOperator() Lists complex, doesn't match")
}
subOp.StopMonitoringLists()
time.Sleep(time.Second)
subOp.Lock()
if len(subOp.lists) != 0 {
t.Error("NewOperator Lists number should be 0:", subOp.lists, len(subOp.lists))
}
subOp.Unlock()
restoreConnection()
}
func TestNewOperatorListsDomainsRegexp(t *testing.T) {
t.Log("Test NewOperator() Lists domains_regexp")
var subOp *Operator
var list []Operator
listData := `[{"type": "simple", "operand": "user.id", "data": "666", "sensitive": false}, {"type": "lists", "operand": "lists.domains_regexp", "data": "testdata/lists/regexp/", "sensitive": false}]`
opLists, err := NewOperator(List, false, OpList, listData, list)
if err != nil {
t.Error("NewOperator Lists domains_regexp, shouldn't be nil: ", err)
t.Fail()
}
if err := opLists.Compile(); err != nil {
t.Error("NewOperator Lists domains_regexp, Compile() error:", err)
}
opLists.List = *unmarshalListData(opLists.Data, t)
for i := 0; i < len(opLists.List); i++ {
if err := opLists.List[i].Compile(); err != nil {
t.Error("NewOperator Lists domains_regexp, Compile() subitem error:", err)
}
if opLists.List[i].Type == Lists {
subOp = &opLists.List[i]
}
}
time.Sleep(time.Second)
if opLists.Match(conn) == false {
t.Error("Test NewOperator() Lists domains_regexp, doesn't match:", conn.DstHost)
}
subOp.Lock()
listslen := len(subOp.lists)
subOp.Unlock()
if listslen != 2 {
t.Error("NewOperator Lists domains_regexp, number of domains error:", subOp.lists)
}
//t.Log("checking lists.domains_regexp:", tries, conn.DstHost)
if opLists.Match(conn) == false {
// we don't care about if it matches, we're testing race conditions
t.Log("Test NewOperator() Lists domains_regexp, doesn't match:", conn.DstHost)
}
subOp.StopMonitoringLists()
time.Sleep(time.Second)
subOp.Lock()
if len(subOp.lists) != 0 {
t.Error("NewOperator Lists number should be 0:", subOp.lists, len(subOp.lists))
}
subOp.Unlock()
restoreConnection()
}
// Must be launched with -race to test that we don't cause leaks
// Race occured on operator.go:241 reListCmp().MathString()
// fixed here: 53419fe
func TestRaceNewOperatorListsDomainsRegexp(t *testing.T) {
t.Log("Test NewOperator() Lists domains_regexp")
var subOp *Operator
var list []Operator
listData := `[{"type": "simple", "operand": "user.id", "data": "666", "sensitive": false}, {"type": "lists", "operand": "lists.domains_regexp", "data": "testdata/lists/regexp/", "sensitive": false}]`
opLists, err := NewOperator(List, false, OpList, listData, list)
if err != nil {
t.Error("NewOperator Lists domains_regexp, shouldn't be nil: ", err)
t.Fail()
}
if err := opLists.Compile(); err != nil {
t.Error("NewOperator Lists domains_regexp, Compile() error:", err)
}
opLists.List = *unmarshalListData(opLists.Data, t)
for i := 0; i < len(opLists.List); i++ {
if err := opLists.List[i].Compile(); err != nil {
t.Error("NewOperator Lists domains_regexp, Compile() subitem error:", err)
}
if opLists.List[i].Type == Lists {
subOp = &opLists.List[i]
}
}
// touch domains list in background, to force a reload.
go func() {
touches := 1000
for {
if touches < 0 {
break
}
core.Exec("/bin/touch", []string{"testdata/lists/regexp/domainsregexp.txt"})
touches--
time.Sleep(100 * time.Millisecond)
//t.Log("touching:", touches)
}
}()
time.Sleep(time.Second)
subOp.Lock()
listslen := len(subOp.lists)
subOp.Unlock()
if listslen != 2 {
t.Error("NewOperator Lists domains_regexp, number of domains error:", subOp.lists)
}
tries := 10000
for {
if tries < 0 {
break
}
//t.Log("checking lists.domains_regexp:", tries, conn.DstHost)
if opLists.Match(conn) == false {
// we don't care about if it matches, we're testing race conditions
t.Log("Test NewOperator() Lists domains_regexp, doesn't match:", conn.DstHost)
}
tries--
time.Sleep(10 * time.Millisecond)
}
subOp.StopMonitoringLists()
time.Sleep(time.Second)
subOp.Lock()
if len(subOp.lists) != 0 {
t.Error("NewOperator Lists number should be 0:", subOp.lists, len(subOp.lists))
}
subOp.Unlock()
restoreConnection()
}
opensnitch-1.5.8.1/daemon/rule/rule.go 0000664 0000000 0000000 00000005526 14401326716 0017562 0 ustar 00root root 0000000 0000000 package rule
import (
"fmt"
"time"
"github.com/evilsocket/opensnitch/daemon/conman"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/evilsocket/opensnitch/daemon/ui/protocol"
)
// Action of a rule
type Action string
// Actions of rules
const (
Allow = Action("allow")
Deny = Action("deny")
Reject = Action("reject")
)
// Duration of a rule
type Duration string
// daemon possible durations
const (
Once = Duration("once")
Restart = Duration("until restart")
Always = Duration("always")
)
// Rule represents an action on a connection.
// The fields match the ones saved as json to disk.
// If a .json rule file is modified on disk, it's reloaded automatically.
type Rule struct {
Created time.Time `json:"created"`
Updated time.Time `json:"updated"`
Name string `json:"name"`
Enabled bool `json:"enabled"`
Precedence bool `json:"precedence"`
Action Action `json:"action"`
Duration Duration `json:"duration"`
Operator Operator `json:"operator"`
}
// Create creates a new rule object with the specified parameters.
func Create(name string, enabled bool, precedence bool, action Action, duration Duration, op *Operator) *Rule {
return &Rule{
Created: time.Now(),
Enabled: enabled,
Precedence: precedence,
Name: name,
Action: action,
Duration: duration,
Operator: *op,
}
}
func (r *Rule) String() string {
return fmt.Sprintf("%s: if(%s){ %s %s }", r.Name, r.Operator.String(), r.Action, r.Duration)
}
// Match performs on a connection the checks a Rule has, to determine if it
// must be allowed or denied.
func (r *Rule) Match(con *conman.Connection) bool {
return r.Operator.Match(con)
}
// Deserialize translates back the rule received to a Rule object
func Deserialize(reply *protocol.Rule) (*Rule, error) {
if reply.Operator == nil {
log.Warning("Deserialize rule, Operator nil")
return nil, fmt.Errorf("invalid operator")
}
operator, err := NewOperator(
Type(reply.Operator.Type),
Sensitive(reply.Operator.Sensitive),
Operand(reply.Operator.Operand),
reply.Operator.Data,
make([]Operator, 0),
)
if err != nil {
log.Warning("Deserialize rule, NewOperator() error: %s", err)
return nil, err
}
return Create(
reply.Name,
reply.Enabled,
reply.Precedence,
Action(reply.Action),
Duration(reply.Duration),
operator,
), nil
}
// Serialize translates a Rule to the protocol object
func (r *Rule) Serialize() *protocol.Rule {
if r == nil {
return nil
}
return &protocol.Rule{
Name: string(r.Name),
Enabled: bool(r.Enabled),
Precedence: bool(r.Precedence),
Action: string(r.Action),
Duration: string(r.Duration),
Operator: &protocol.Operator{
Type: string(r.Operator.Type),
Sensitive: bool(r.Operator.Sensitive),
Operand: string(r.Operator.Operand),
Data: string(r.Operator.Data),
},
}
}
opensnitch-1.5.8.1/daemon/rule/rule_test.go 0000664 0000000 0000000 00000002136 14401326716 0020613 0 ustar 00root root 0000000 0000000 package rule
import "testing"
func TestCreate(t *testing.T) {
t.Log("Test: Create rule")
var list []Operator
oper, _ := NewOperator(Simple, false, OpTrue, "", list)
r := Create("000-test-name", true, false, Allow, Once, oper)
t.Run("New rule must not be nil", func(t *testing.T) {
if r == nil {
t.Error("Create() returned nil")
t.Fail()
}
})
t.Run("Rule name must be 000-test-name", func(t *testing.T) {
if r.Name != "000-test-name" {
t.Error("Rule name error:", r.Name)
t.Fail()
}
})
t.Run("Rule must be enabled", func(t *testing.T) {
if r.Enabled == false {
t.Error("Rule Enabled is false:", r)
t.Fail()
}
})
t.Run("Rule Precedence must be false", func(t *testing.T) {
if r.Precedence == true {
t.Error("Rule Precedence is true:", r)
t.Fail()
}
})
t.Run("Rule Action must be Allow", func(t *testing.T) {
if r.Action != Allow {
t.Error("Rule Action is not Allow:", r.Action)
t.Fail()
}
})
t.Run("Rule Duration should be Once", func(t *testing.T) {
if r.Duration != Once {
t.Error("Rule Duration is not Once:", r.Duration)
t.Fail()
}
})
}
opensnitch-1.5.8.1/daemon/rule/testdata/ 0000775 0000000 0000000 00000000000 14401326716 0020065 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/rule/testdata/000-allow-chrome.json 0000664 0000000 0000000 00000000570 14401326716 0023650 0 ustar 00root root 0000000 0000000 {
"created": "2020-12-13T18:06:52.209804547+01:00",
"updated": "2020-12-13T18:06:52.209857713+01:00",
"name": "000-allow-chrome",
"enabled": true,
"precedence": true,
"action": "allow",
"duration": "always",
"operator": {
"type": "simple",
"operand": "process.path",
"sensitive": false,
"data": "/opt/google/chrome/chrome",
"list": []
}
} opensnitch-1.5.8.1/daemon/rule/testdata/001-deny-chrome.json 0000664 0000000 0000000 00000000567 14401326716 0023500 0 ustar 00root root 0000000 0000000 {
"created": "2020-12-13T17:54:49.067148304+01:00",
"updated": "2020-12-13T17:54:49.067213602+01:00",
"name": "001-deny-chrome",
"enabled": true,
"precedence": false,
"action": "deny",
"duration": "always",
"operator": {
"type": "simple",
"operand": "process.path",
"sensitive": false,
"data": "/opt/google/chrome/chrome",
"list": []
}
} opensnitch-1.5.8.1/daemon/rule/testdata/invalid-regexp-list.json 0000664 0000000 0000000 00000001526 14401326716 0024653 0 ustar 00root root 0000000 0000000 {
"created": "2020-12-13T18:06:52.209804547+01:00",
"updated": "2020-12-13T18:06:52.209857713+01:00",
"name": "invalid-regexp-list",
"enabled": true,
"precedence": true,
"action": "allow",
"duration": "always",
"operator": {
"type": "list",
"operand": "list",
"sensitive": false,
"data": "[{\"type\": \"regexp\", \"operand\": \"process.path\", \"sensitive\": false, \"data\": \"^(/di(rmngr$\"}, {\"type\": \"simple\", \"operand\": \"dest.port\", \"data\": \"53\", \"sensitive\": false}]",
"list": [
{
"type": "regexp",
"operand": "process.path",
"sensitive": false,
"data": "^(/di(rmngr)$",
"list": null
},
{
"type": "simple",
"operand": "dest.port",
"sensitive": false,
"data": "53",
"list": null
}
]
}
}
opensnitch-1.5.8.1/daemon/rule/testdata/invalid-regexp.json 0000664 0000000 0000000 00000000574 14401326716 0023704 0 ustar 00root root 0000000 0000000 {
"created": "2020-12-13T18:06:52.209804547+01:00",
"updated": "2020-12-13T18:06:52.209857713+01:00",
"name": "invalid-regexp",
"enabled": true,
"precedence": true,
"action": "allow",
"duration": "always",
"operator": {
"type": "regexp",
"operand": "process.path",
"sensitive": false,
"data": "/opt/((.*)google/chrome/chrome",
"list": []
}
}
opensnitch-1.5.8.1/daemon/rule/testdata/lists/ 0000775 0000000 0000000 00000000000 14401326716 0021223 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/rule/testdata/lists/domains/ 0000775 0000000 0000000 00000000000 14401326716 0022655 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/rule/testdata/lists/domains/domainlists.txt 0000664 0000000 0000000 00000000164 14401326716 0025745 0 ustar 00root root 0000000 0000000 # this line must be ignored, 0.0.0.0 www.test.org
0.0.0.0 www.test.org
127.0.0.1 www.test.org
0.0.0.0 opensnitch.io
opensnitch-1.5.8.1/daemon/rule/testdata/lists/ips/ 0000775 0000000 0000000 00000000000 14401326716 0022016 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/rule/testdata/lists/ips/ips.txt 0000664 0000000 0000000 00000000227 14401326716 0023353 0 ustar 00root root 0000000 0000000 # this line must be ignored, 0.0.0.0 www.test.org
# empty lines are also ignored
1.1.1.1
185.53.178.14
# duplicated entries should be ignored
1.1.1.1
opensnitch-1.5.8.1/daemon/rule/testdata/lists/nets/ 0000775 0000000 0000000 00000000000 14401326716 0022174 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/rule/testdata/lists/nets/nets.txt 0000664 0000000 0000000 00000000240 14401326716 0023702 0 ustar 00root root 0000000 0000000 # this line must be ignored, 0.0.0.0 www.test.org
# empty lines are also ignored
1.1.1.0/24
185.53.178.0/24
# duplicated entries should be ignored
1.1.1.0/24
opensnitch-1.5.8.1/daemon/rule/testdata/lists/regexp/ 0000775 0000000 0000000 00000000000 14401326716 0022515 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/rule/testdata/lists/regexp/domainsregexp.txt 0000664 0000000 0000000 00000000132 14401326716 0026117 0 ustar 00root root 0000000 0000000 # this line must be ignored, 0.0.0.0 www.test.org
www.test.org
www.test.org
opensnitch.io
opensnitch-1.5.8.1/daemon/rule/testdata/live_reload/ 0000775 0000000 0000000 00000000000 14401326716 0022352 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/rule/testdata/live_reload/test-live-reload-delete.json 0000664 0000000 0000000 00000000620 14401326716 0027663 0 ustar 00root root 0000000 0000000 {
"created": "2020-12-13T18:06:52.209804547+01:00",
"updated": "2020-12-13T18:06:52.209857713+01:00",
"name": "test-live-reload-delete",
"enabled": true,
"precedence": true,
"action": "deny",
"duration": "always",
"operator": {
"type": "simple",
"operand": "process.path",
"sensitive": false,
"data": "/usr/bin/curl",
"list": []
}
} opensnitch-1.5.8.1/daemon/rule/testdata/live_reload/test-live-reload-remove.json 0000664 0000000 0000000 00000000620 14401326716 0027716 0 ustar 00root root 0000000 0000000 {
"created": "2020-12-13T18:06:52.209804547+01:00",
"updated": "2020-12-13T18:06:52.209857713+01:00",
"name": "test-live-reload-remove",
"enabled": true,
"precedence": true,
"action": "deny",
"duration": "always",
"operator": {
"type": "simple",
"operand": "process.path",
"sensitive": false,
"data": "/usr/bin/curl",
"list": []
}
} opensnitch-1.5.8.1/daemon/statistics/ 0000775 0000000 0000000 00000000000 14401326716 0017477 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/statistics/event.go 0000664 0000000 0000000 00000001251 14401326716 0021146 0 ustar 00root root 0000000 0000000 package statistics
import (
"time"
"github.com/evilsocket/opensnitch/daemon/conman"
"github.com/evilsocket/opensnitch/daemon/rule"
"github.com/evilsocket/opensnitch/daemon/ui/protocol"
)
type Event struct {
Time time.Time
Connection *conman.Connection
Rule *rule.Rule
}
func NewEvent(con *conman.Connection, match *rule.Rule) *Event {
return &Event{
Time: time.Now(),
Connection: con,
Rule: match,
}
}
func (e *Event) Serialize() *protocol.Event {
return &protocol.Event{
Time: e.Time.Format("2006-01-02 15:04:05"),
Connection: e.Connection.Serialize(),
Rule: e.Rule.Serialize(),
Unixnano: e.Time.UnixNano(),
}
}
opensnitch-1.5.8.1/daemon/statistics/stats.go 0000664 0000000 0000000 00000013262 14401326716 0021170 0 ustar 00root root 0000000 0000000 package statistics
import (
"fmt"
"sync"
"time"
"github.com/evilsocket/opensnitch/daemon/conman"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/evilsocket/opensnitch/daemon/rule"
"github.com/evilsocket/opensnitch/daemon/ui/protocol"
)
// StatsConfig holds the stats confguration
type StatsConfig struct {
MaxEvents int `json:"MaxEvents"`
MaxStats int `json:"MaxStats"`
}
type conEvent struct {
con *conman.Connection
match *rule.Rule
wasMissed bool
}
// Statistics holds the connections and statistics the daemon intercepts.
// The connections are stored in the Events slice.
type Statistics struct {
sync.RWMutex
Started time.Time
DNSResponses int
Connections int
Ignored int
Accepted int
Dropped int
RuleHits int
RuleMisses int
Events []*Event
ByProto map[string]uint64
ByAddress map[string]uint64
ByHost map[string]uint64
ByPort map[string]uint64
ByUID map[string]uint64
ByExecutable map[string]uint64
rules *rule.Loader
jobs chan conEvent
// max number of events to keep in the buffer
maxEvents int
// max number of entries for each By* map
maxStats int
}
// New returns a new Statistics object and initializes the go routines to update the stats.
func New(rules *rule.Loader) (stats *Statistics) {
stats = &Statistics{
Started: time.Now(),
Events: make([]*Event, 0),
ByProto: make(map[string]uint64),
ByAddress: make(map[string]uint64),
ByHost: make(map[string]uint64),
ByPort: make(map[string]uint64),
ByUID: make(map[string]uint64),
ByExecutable: make(map[string]uint64),
rules: rules,
jobs: make(chan conEvent),
maxEvents: 150,
maxStats: 25,
}
go stats.eventWorker(0)
go stats.eventWorker(1)
go stats.eventWorker(2)
go stats.eventWorker(3)
return stats
}
// SetConfig configures the max events to keep in the backlog before sending
// the stats to the UI, or while the UI is not connected.
// if the backlog is full, it'll be shifted by one.
func (s *Statistics) SetConfig(config StatsConfig) {
if config.MaxEvents > 0 {
s.maxEvents = config.MaxEvents
}
if config.MaxStats > 0 {
s.maxStats = config.MaxStats
}
}
// OnDNSResponse increases the counter of dns and accepted connections.
func (s *Statistics) OnDNSResponse() {
s.Lock()
defer s.Unlock()
s.DNSResponses++
s.Accepted++
}
// OnIgnored increases the counter of ignored and accepted connections.
func (s *Statistics) OnIgnored() {
s.Lock()
defer s.Unlock()
s.Ignored++
s.Accepted++
}
func (s *Statistics) incMap(m *map[string]uint64, key string) {
if val, found := (*m)[key]; found == false {
// do we have enough space left?
nElems := len(*m)
if nElems >= s.maxStats {
// find the element with less hits
nMin := uint64(9999999999)
minKey := ""
for k, v := range *m {
if v < nMin {
minKey = k
nMin = v
}
}
// remove it
if minKey != "" {
delete(*m, minKey)
}
}
(*m)[key] = 1
} else {
(*m)[key] = val + 1
}
}
func (s *Statistics) eventWorker(id int) {
log.Debug("Stats worker #%d started.", id)
for true {
select {
case job := <-s.jobs:
s.onConnection(job.con, job.match, job.wasMissed)
}
}
}
func (s *Statistics) onConnection(con *conman.Connection, match *rule.Rule, wasMissed bool) {
s.Lock()
defer s.Unlock()
s.Connections++
if wasMissed {
s.RuleMisses++
} else {
s.RuleHits++
}
if wasMissed == false && match.Action == rule.Allow {
s.Accepted++
} else {
s.Dropped++
}
s.incMap(&s.ByProto, con.Protocol)
s.incMap(&s.ByAddress, con.DstIP.String())
if con.DstHost != "" {
s.incMap(&s.ByHost, con.DstHost)
}
s.incMap(&s.ByPort, fmt.Sprintf("%d", con.DstPort))
s.incMap(&s.ByUID, fmt.Sprintf("%d", con.Entry.UserId))
s.incMap(&s.ByExecutable, con.Process.Path)
// if we reached the limit, shift everything back
// by one position
nEvents := len(s.Events)
if nEvents == s.maxEvents {
s.Events = s.Events[1:]
}
if wasMissed {
return
}
s.Events = append(s.Events, NewEvent(con, match))
}
// OnConnectionEvent sends the details of a new connection throughout a channel,
// in order to add the connection to the stats.
func (s *Statistics) OnConnectionEvent(con *conman.Connection, match *rule.Rule, wasMissed bool) {
s.jobs <- conEvent{
con: con,
match: match,
wasMissed: wasMissed,
}
}
func (s *Statistics) serializeEvents() []*protocol.Event {
nEvents := len(s.Events)
serialized := make([]*protocol.Event, nEvents)
for i, e := range s.Events {
serialized[i] = e.Serialize()
}
return serialized
}
// emptyStats empties the stats once we've sent them to the GUI.
// We don't need them anymore here.
func (s *Statistics) emptyStats() {
s.Lock()
if len(s.Events) > 0 {
s.Events = make([]*Event, 0)
}
s.Unlock()
}
// Serialize returns the collected statistics.
// After return the stats, the Events are emptied, to keep collecting more stats
// and not miss connections.
func (s *Statistics) Serialize() *protocol.Statistics {
s.Lock()
defer s.emptyStats()
defer s.Unlock()
return &protocol.Statistics{
DaemonVersion: core.Version,
Rules: uint64(s.rules.NumRules()),
Uptime: uint64(time.Since(s.Started).Seconds()),
DnsResponses: uint64(s.DNSResponses),
Connections: uint64(s.Connections),
Ignored: uint64(s.Ignored),
Accepted: uint64(s.Accepted),
Dropped: uint64(s.Dropped),
RuleHits: uint64(s.RuleHits),
RuleMisses: uint64(s.RuleMisses),
Events: s.serializeEvents(),
ByProto: s.ByProto,
ByAddress: s.ByAddress,
ByHost: s.ByHost,
ByPort: s.ByPort,
ByUid: s.ByUID,
ByExecutable: s.ByExecutable,
}
}
opensnitch-1.5.8.1/daemon/system-fw.json 0000664 0000000 0000000 00000000476 14401326716 0020145 0 ustar 00root root 0000000 0000000 {
"SystemRules": [
{
"Rule": {
"Description": "Allow icmp",
"Table": "mangle",
"Chain": "OUTPUT",
"Parameters": "-p icmp",
"Target": "ACCEPT",
"TargetParameters": ""
}
}
]
}
opensnitch-1.5.8.1/daemon/ui/ 0000775 0000000 0000000 00000000000 14401326716 0015722 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/ui/client.go 0000664 0000000 0000000 00000021055 14401326716 0017532 0 ustar 00root root 0000000 0000000 package ui
import (
"fmt"
"net"
"sync"
"time"
"github.com/evilsocket/opensnitch/daemon/conman"
"github.com/evilsocket/opensnitch/daemon/firewall/iptables"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/evilsocket/opensnitch/daemon/rule"
"github.com/evilsocket/opensnitch/daemon/statistics"
"github.com/evilsocket/opensnitch/daemon/ui/protocol"
"github.com/fsnotify/fsnotify"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/keepalive"
)
var (
configFile = "/etc/opensnitchd/default-config.json"
dummyOperator, _ = rule.NewOperator(rule.Simple, false, rule.OpTrue, "", make([]rule.Operator, 0))
clientDisconnectedRule = rule.Create("ui.client.disconnected", true, false, rule.Allow, rule.Once, dummyOperator)
// While the GUI is connected, deny by default everything until the user takes an action.
clientConnectedRule = rule.Create("ui.client.connected", true, false, rule.Deny, rule.Once, dummyOperator)
clientErrorRule = rule.Create("ui.client.error", true, false, rule.Allow, rule.Once, dummyOperator)
config Config
)
type serverConfig struct {
Address string `json:"Address"`
LogFile string `json:"LogFile"`
}
// Config holds the values loaded from configFile
type Config struct {
sync.RWMutex
Server serverConfig `json:"Server"`
DefaultAction string `json:"DefaultAction"`
DefaultDuration string `json:"DefaultDuration"`
InterceptUnknown bool `json:"InterceptUnknown"`
ProcMonitorMethod string `json:"ProcMonitorMethod"`
LogLevel *uint32 `json:"LogLevel"`
Firewall string `json:"Firewall"`
Stats statistics.StatsConfig `json:"Stats"`
}
// Client holds the connection information of a client.
type Client struct {
sync.RWMutex
clientCtx context.Context
clientCancel context.CancelFunc
stats *statistics.Statistics
rules *rule.Loader
socketPath string
isUnixSocket bool
con *grpc.ClientConn
client protocol.UIClient
configWatcher *fsnotify.Watcher
streamNotifications protocol.UI_NotificationsClient
//isAsking is set to true if the client is awaiting a decision from the GUI
isAsking bool
}
// NewClient creates and configures a new client.
func NewClient(socketPath string, stats *statistics.Statistics, rules *rule.Loader) *Client {
c := &Client{
stats: stats,
rules: rules,
isUnixSocket: false,
isAsking: false,
}
c.clientCtx, c.clientCancel = context.WithCancel(context.Background())
if watcher, err := fsnotify.NewWatcher(); err == nil {
c.configWatcher = watcher
}
c.loadDiskConfiguration(false)
if socketPath != "" {
c.setSocketPath(c.getSocketPath(socketPath))
}
go c.poller()
return c
}
// Close cancels the running tasks: pinging the server and (re)connection poller.
func (c *Client) Close() {
c.clientCancel()
}
// ProcMonitorMethod returns the monitor method configured.
// If it's not present in the config file, it'll return an empty string.
func (c *Client) ProcMonitorMethod() string {
config.RLock()
defer config.RUnlock()
return config.ProcMonitorMethod
}
// InterceptUnknown returns
func (c *Client) InterceptUnknown() bool {
config.RLock()
defer config.RUnlock()
return config.InterceptUnknown
}
// GetStatsConfig returns the stats config from disk
func (c *Client) GetStatsConfig() statistics.StatsConfig {
config.RLock()
defer config.RUnlock()
return config.Stats
}
// GetFirewallType returns the firewall to use
func (c *Client) GetFirewallType() string {
config.RLock()
defer config.RUnlock()
if config.Firewall == "" {
return iptables.Name
}
return config.Firewall
}
// DefaultAction returns the default configured action for
func (c *Client) DefaultAction() rule.Action {
isConnected := c.Connected()
c.RLock()
defer c.RUnlock()
if isConnected {
return clientConnectedRule.Action
}
return clientDisconnectedRule.Action
}
// DefaultDuration returns the default duration configured for a rule.
// For example it can be: once, always, "until restart".
func (c *Client) DefaultDuration() rule.Duration {
c.RLock()
defer c.RUnlock()
return clientDisconnectedRule.Duration
}
// Connected checks if the client has established a connection with the server.
func (c *Client) Connected() bool {
c.RLock()
defer c.RUnlock()
if c.con == nil || c.con.GetState() != connectivity.Ready {
return false
}
return true
}
//GetIsAsking returns the isAsking flag
func (c *Client) GetIsAsking() bool {
c.RLock()
defer c.RUnlock()
return c.isAsking
}
//SetIsAsking sets the isAsking flag
func (c *Client) SetIsAsking(flag bool) {
c.Lock()
defer c.Unlock()
c.isAsking = flag
}
func (c *Client) poller() {
log.Debug("UI service poller started for socket %s", c.socketPath)
wasConnected := false
for {
select {
case <-c.clientCtx.Done():
log.Info("Client.poller() exit, Done()")
goto Exit
default:
isConnected := c.Connected()
if wasConnected != isConnected {
c.onStatusChange(isConnected)
wasConnected = isConnected
}
if c.Connected() == false {
// connect and create the client if needed
if err := c.connect(); err != nil {
log.Warning("Error while connecting to UI service: %s", err)
}
}
if c.Connected() == true {
// if the client is connected and ready, send a ping
if err := c.ping(time.Now()); err != nil {
log.Warning("Error while pinging UI service: %s, state: %v", err, c.con.GetState())
}
}
time.Sleep(1 * time.Second)
}
}
Exit:
log.Info("uiClient exit")
}
func (c *Client) onStatusChange(connected bool) {
if connected {
log.Info("Connected to the UI service on %s", c.socketPath)
go c.Subscribe()
} else {
log.Error("Connection to the UI service lost.")
c.disconnect()
}
}
func (c *Client) connect() (err error) {
if c.Connected() {
return
}
if c.con != nil {
if c.con.GetState() == connectivity.TransientFailure || c.con.GetState() == connectivity.Shutdown {
c.disconnect()
} else {
return
}
}
if err := c.openSocket(); err != nil {
c.disconnect()
return err
}
if c.client == nil {
c.client = protocol.NewUIClient(c.con)
}
return nil
}
func (c *Client) openSocket() (err error) {
c.Lock()
defer c.Unlock()
if c.isUnixSocket {
c.con, err = grpc.Dial(c.socketPath, grpc.WithInsecure(),
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", addr, timeout)
}))
} else {
// https://pkg.go.dev/google.golang.org/grpc/keepalive#ClientParameters
var kacp = keepalive.ClientParameters{
Time: 5 * time.Second,
// if there's no activity after ^, wait 20s and close
// server timeout is 20s by default.
Timeout: 22 * time.Second,
// send pings even without active streams
PermitWithoutStream: true,
}
c.con, err = grpc.Dial(c.socketPath, grpc.WithInsecure(), grpc.WithKeepaliveParams(kacp))
}
return err
}
func (c *Client) disconnect() {
c.Lock()
defer c.Unlock()
c.client = nil
if c.con != nil {
c.con.Close()
c.con = nil
log.Debug("client.disconnect()")
}
}
func (c *Client) ping(ts time.Time) (err error) {
if c.Connected() == false {
return fmt.Errorf("service is not connected")
}
c.Lock()
defer c.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
reqID := uint64(ts.UnixNano())
pReq := &protocol.PingRequest{
Id: reqID,
Stats: c.stats.Serialize(),
}
c.stats.RLock()
pong, err := c.client.Ping(ctx, pReq)
c.stats.RUnlock()
if err != nil {
return err
}
if pong.Id != reqID {
return fmt.Errorf("Expected pong with id 0x%x, got 0x%x", reqID, pong.Id)
}
return nil
}
// Ask sends a request to the server, with the values of a connection to be
// allowed or denied.
func (c *Client) Ask(con *conman.Connection) *rule.Rule {
if c.client == nil {
return nil
}
// FIXME: if timeout is fired, the rule is not added to the list in the GUI
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
defer cancel()
reply, err := c.client.AskRule(ctx, con.Serialize())
if err != nil {
log.Warning("Error while asking for rule: %s - %v", err, con)
return nil
}
r, err := rule.Deserialize(reply)
if err != nil {
return nil
}
return r
}
func (c *Client) monitorConfigWorker() {
for {
select {
case event := <-c.configWatcher.Events:
if (event.Op&fsnotify.Write == fsnotify.Write) || (event.Op&fsnotify.Remove == fsnotify.Remove) {
c.loadDiskConfiguration(true)
}
}
}
}
opensnitch-1.5.8.1/daemon/ui/config.go 0000664 0000000 0000000 00000005617 14401326716 0017527 0 ustar 00root root 0000000 0000000 package ui
import (
"encoding/json"
"fmt"
"io/ioutil"
"strings"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/evilsocket/opensnitch/daemon/procmon/monitor"
"github.com/evilsocket/opensnitch/daemon/rule"
)
func (c *Client) getSocketPath(socketPath string) string {
c.Lock()
defer c.Unlock()
if strings.HasPrefix(socketPath, "unix://") == true {
c.isUnixSocket = true
return socketPath[7:]
}
c.isUnixSocket = false
return socketPath
}
func (c *Client) setSocketPath(socketPath string) {
c.Lock()
defer c.Unlock()
c.socketPath = socketPath
}
func (c *Client) isProcMonitorEqual(newMonitorMethod string) bool {
config.RLock()
defer config.RUnlock()
return newMonitorMethod == config.ProcMonitorMethod
}
func (c *Client) parseConf(rawConfig string) (conf Config, err error) {
err = json.Unmarshal([]byte(rawConfig), &conf)
return conf, err
}
func (c *Client) loadDiskConfiguration(reload bool) {
raw, err := ioutil.ReadFile(configFile)
if err != nil {
fmt.Errorf("Error loading disk configuration %s: %s", configFile, err)
}
if ok := c.loadConfiguration(raw); ok {
if err := c.configWatcher.Add(configFile); err != nil {
log.Error("Could not watch path: %s", err)
return
}
}
if reload {
return
}
go c.monitorConfigWorker()
}
func (c *Client) loadConfiguration(rawConfig []byte) bool {
config.Lock()
defer config.Unlock()
if err := json.Unmarshal(rawConfig, &config); err != nil {
log.Error("Error parsing configuration %s: %s", configFile, err)
return false
}
// firstly load config level, to detect further errors if any
if config.LogLevel != nil {
log.SetLogLevel(int(*config.LogLevel))
}
if config.Server.LogFile != "" {
log.Close()
log.OpenFile(config.Server.LogFile)
}
if config.Server.Address != "" {
tempSocketPath := c.getSocketPath(config.Server.Address)
if tempSocketPath != c.socketPath {
// disconnect, and let the connection poller reconnect to the new address
c.disconnect()
}
c.setSocketPath(tempSocketPath)
}
if config.DefaultAction != "" {
clientDisconnectedRule.Action = rule.Action(config.DefaultAction)
clientErrorRule.Action = rule.Action(config.DefaultAction)
}
if config.DefaultDuration != "" {
clientDisconnectedRule.Duration = rule.Duration(config.DefaultDuration)
clientErrorRule.Duration = rule.Duration(config.DefaultDuration)
}
if config.ProcMonitorMethod != "" {
if err := monitor.ReconfigureMonitorMethod(config.ProcMonitorMethod); err != nil {
log.Warning("Unable to set new process monitor method from disk: %v", err)
}
}
return true
}
func (c *Client) saveConfiguration(rawConfig string) (err error) {
if c.loadConfiguration([]byte(rawConfig)) != true {
return fmt.Errorf("Error parsing configuration %s: %s", rawConfig, err)
}
if err = ioutil.WriteFile(configFile, []byte(rawConfig), 0644); err != nil {
log.Error("writing configuration to disk: %s", err)
return err
}
return nil
}
opensnitch-1.5.8.1/daemon/ui/notifications.go 0000664 0000000 0000000 00000023224 14401326716 0021125 0 ustar 00root root 0000000 0000000 package ui
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"strconv"
"strings"
"time"
"github.com/evilsocket/opensnitch/daemon/core"
"github.com/evilsocket/opensnitch/daemon/firewall"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/evilsocket/opensnitch/daemon/procmon"
"github.com/evilsocket/opensnitch/daemon/procmon/monitor"
"github.com/evilsocket/opensnitch/daemon/rule"
"github.com/evilsocket/opensnitch/daemon/ui/protocol"
"golang.org/x/net/context"
)
var stopMonitoringProcess = make(chan int)
// NewReply constructs a new protocol notification reply
func NewReply(rID uint64, replyCode protocol.NotificationReplyCode, data string) *protocol.NotificationReply {
return &protocol.NotificationReply{
Id: rID,
Code: replyCode,
Data: data,
}
}
func (c *Client) getClientConfig() *protocol.ClientConfig {
raw, _ := ioutil.ReadFile(configFile)
nodeName := core.GetHostname()
nodeVersion := core.GetKernelVersion()
var ts time.Time
rulesTotal := len(c.rules.GetAll())
ruleList := make([]*protocol.Rule, rulesTotal)
idx := 0
for _, r := range c.rules.GetAll() {
ruleList[idx] = r.Serialize()
idx++
}
return &protocol.ClientConfig{
Id: uint64(ts.UnixNano()),
Name: nodeName,
Version: nodeVersion,
IsFirewallRunning: firewall.IsRunning(),
Config: strings.Replace(string(raw), "\n", "", -1),
LogLevel: uint32(log.MinLevel),
Rules: ruleList,
}
}
func (c *Client) monitorProcessDetails(pid int, stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
p := procmon.NewProcess(pid, "")
ticker := time.NewTicker(2 * time.Second)
for {
select {
case _pid := <-stopMonitoringProcess:
if _pid != pid {
continue
}
goto Exit
case <-ticker.C:
if err := p.GetInfo(); err != nil {
c.sendNotificationReply(stream, notification.Id, notification.Data, err)
goto Exit
}
pJSON, err := json.Marshal(p)
notification.Data = string(pJSON)
if errs := c.sendNotificationReply(stream, notification.Id, notification.Data, err); errs != nil {
goto Exit
}
}
}
Exit:
ticker.Stop()
}
func (c *Client) handleActionChangeConfig(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
log.Info("[notification] Reloading configuration")
// Parse received configuration first, to get the new proc monitor method.
newConf, err := c.parseConf(notification.Data)
if err != nil {
log.Warning("[notification] error parsing received config: %v", notification.Data)
c.sendNotificationReply(stream, notification.Id, "", err)
return
}
if err := monitor.ReconfigureMonitorMethod(newConf.ProcMonitorMethod); err != nil {
c.sendNotificationReply(stream, notification.Id, "", err)
return
}
// this save operation triggers a re-loadConfiguration()
err = c.saveConfiguration(notification.Data)
if err != nil {
log.Warning("[notification] CHANGE_CONFIG not applied %s", err)
}
c.sendNotificationReply(stream, notification.Id, "", err)
}
func (c *Client) handleActionEnableRule(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
var err error
for _, rul := range notification.Rules {
log.Info("[notification] enable rule: %s", rul.Name)
// protocol.Rule(protobuf) != rule.Rule(json)
r, _ := rule.Deserialize(rul)
r.Enabled = true
// save to disk only if the duration is rule.Always
err = c.rules.Replace(r, r.Duration == rule.Always)
}
c.sendNotificationReply(stream, notification.Id, "", err)
}
func (c *Client) handleActionDisableRule(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
var err error
for _, rul := range notification.Rules {
log.Info("[notification] disable rule: %s", rul)
r, _ := rule.Deserialize(rul)
r.Enabled = false
err = c.rules.Replace(r, r.Duration == rule.Always)
}
c.sendNotificationReply(stream, notification.Id, "", err)
}
func (c *Client) handleActionChangeRule(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
var rErr error
for _, rul := range notification.Rules {
r, err := rule.Deserialize(rul)
if r == nil {
rErr = fmt.Errorf("Invalid rule, %s", err)
continue
}
log.Info("[notification] change rule: %s %d", r, notification.Id)
if err := c.rules.Replace(r, r.Duration == rule.Always); err != nil {
log.Warning("[notification] Error changing rule: %s %s", err, r)
rErr = err
}
}
c.sendNotificationReply(stream, notification.Id, "", rErr)
}
func (c *Client) handleActionDeleteRule(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
var err error
for _, rul := range notification.Rules {
log.Info("[notification] delete rule: %s %d", rul.Name, notification.Id)
err = c.rules.Delete(rul.Name)
if err != nil {
log.Error("[notification] Error deleting rule: %s %s", err, rul)
}
}
c.sendNotificationReply(stream, notification.Id, "", err)
}
func (c *Client) handleActionMonitorProcess(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
pid, err := strconv.Atoi(notification.Data)
if err != nil {
log.Error("parsing PID to monitor: %d, err: %s", pid, err)
return
}
if !core.Exists(fmt.Sprint("/proc/", pid)) {
c.sendNotificationReply(stream, notification.Id, "", fmt.Errorf("The process is no longer running"))
return
}
go c.monitorProcessDetails(pid, stream, notification)
}
func (c *Client) handleActionStopMonitorProcess(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
pid, err := strconv.Atoi(notification.Data)
if err != nil {
log.Error("parsing PID to stop monitor: %d, err: %s", pid, err)
c.sendNotificationReply(stream, notification.Id, "", fmt.Errorf("Error stopping monitor: %s", notification.Data))
return
}
stopMonitoringProcess <- pid
c.sendNotificationReply(stream, notification.Id, "", nil)
}
func (c *Client) handleNotification(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
switch {
case notification.Type == protocol.Action_MONITOR_PROCESS:
c.handleActionMonitorProcess(stream, notification)
case notification.Type == protocol.Action_STOP_MONITOR_PROCESS:
c.handleActionStopMonitorProcess(stream, notification)
case notification.Type == protocol.Action_CHANGE_CONFIG:
c.handleActionChangeConfig(stream, notification)
case notification.Type == protocol.Action_LOAD_FIREWALL:
log.Info("[notification] starting firewall")
firewall.Init(c.GetFirewallType(), nil)
c.sendNotificationReply(stream, notification.Id, "", nil)
case notification.Type == protocol.Action_UNLOAD_FIREWALL:
log.Info("[notification] stopping firewall")
firewall.Stop()
c.sendNotificationReply(stream, notification.Id, "", nil)
// ENABLE_RULE just replaces the rule on disk
case notification.Type == protocol.Action_ENABLE_RULE:
c.handleActionEnableRule(stream, notification)
case notification.Type == protocol.Action_DISABLE_RULE:
c.handleActionDisableRule(stream, notification)
case notification.Type == protocol.Action_DELETE_RULE:
c.handleActionDeleteRule(stream, notification)
// CHANGE_RULE can add() or replace) an existing rule.
case notification.Type == protocol.Action_CHANGE_RULE:
c.handleActionChangeRule(stream, notification)
}
}
func (c *Client) sendNotificationReply(stream protocol.UI_NotificationsClient, nID uint64, data string, err error) error {
reply := NewReply(nID, protocol.NotificationReplyCode_OK, data)
if err != nil {
reply.Code = protocol.NotificationReplyCode_ERROR
reply.Data = fmt.Sprint(err)
}
if err := stream.Send(reply); err != nil {
log.Error("Error replying to notification: %s %d", err, reply.Id)
return err
}
return nil
}
// Subscribe opens a connection with the server (UI), to start
// receiving notifications.
// It firstly sends the daemon status and configuration.
func (c *Client) Subscribe() {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
clientCfg, err := c.client.Subscribe(ctx, c.getClientConfig())
if err != nil {
log.Error("Subscribing to GUI %s", err)
// When connecting to the GUI via TCP, sometimes the notifications channel is
// not established, and the main channel is never closed.
// We need to disconnect everything after a timeout and try it again.
c.disconnect()
return
}
if tempConf, err := c.parseConf(clientCfg.Config); err == nil {
c.Lock()
clientConnectedRule.Action = rule.Action(tempConf.DefaultAction)
c.Unlock()
}
c.listenForNotifications()
}
// Notifications is the channel where the daemon receives messages from the server.
// It consists of 2 grpc streams (send/receive) that are never closed,
// this way we can share messages in realtime.
// If the GUI is closed, we'll receive an error reading from the channel.
func (c *Client) listenForNotifications() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// open the stream channel
streamReply := &protocol.NotificationReply{Id: 0, Code: protocol.NotificationReplyCode_OK}
notisStream, err := c.client.Notifications(ctx)
if err != nil {
log.Error("establishing notifications channel %s", err)
return
}
// send the first notification
if err := notisStream.Send(streamReply); err != nil {
log.Error("sending notification HELLO %s", err)
return
}
log.Info("Start receiving notifications")
for {
select {
case <-c.clientCtx.Done():
goto Exit
default:
noti, err := notisStream.Recv()
if err == io.EOF {
log.Warning("notification channel closed by the server")
goto Exit
}
if err != nil {
log.Error("getting notifications: %s %s", err, noti)
goto Exit
}
c.handleNotification(notisStream, noti)
}
}
Exit:
notisStream.CloseSend()
log.Info("Stop receiving notifications")
c.disconnect()
}
opensnitch-1.5.8.1/daemon/ui/protocol/ 0000775 0000000 0000000 00000000000 14401326716 0017563 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/daemon/ui/protocol/.gitkeep 0000664 0000000 0000000 00000000000 14401326716 0021202 0 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/debian/ 0000775 0000000 0000000 00000000000 14401326716 0015264 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/debian/changelog 0000664 0000000 0000000 00000015350 14401326716 0017142 0 ustar 00root root 0000000 0000000 opensnitch (1.5.5-1) unstable; urgency=medium
* New upstream release.
* Bump Standards-Version to 4.6.2.
* Upload sponsored by Petter Reinholdtsen.
-- Gustavo Iñiguez Goya Wed, 01 Feb 2023 22:37:12 +0100
opensnitch (1.5.4-1) unstable; urgency=high
* New upstream release. (Closes: #1030115)
* debian/control:
- Updated packages description.
- Removed debconf and whiptail|dialog dependencies.
- Added xdg-user-dirs, gtk-update-icon-cache dependencies.
- Point Vcs-Git field to the 1.5.0 branch.
* debian/postinst:
- Fixed opensnitch_ui.desktop installation.
- Fixed updating icons cache.
* debian/postrm:
- Fixed removing opensnitch_ui.desktop
* debian/tests/:
- Added autopkgtests.
* Upload sponsored by Petter Reinholdtsen.
-- Gustavo Iñiguez Goya Tue, 31 Jan 2023 23:48:58 +0100
opensnitch (1.5.3-1) unstable; urgency=medium
* Added debian/upstream/metadata.
* Updated Homepage url.
* Updated Copyright years.
-- Gustavo-Iniguez-Goya Sun, 22 Jan 2023 21:30:45 +0100
opensnitch (1.5.2.1-1) unstable; urgency=medium
* Initial release. (Closes: #909567)
-- Gustavo-Iniguez-Goya Fri, 20 Jan 2023 22:26:40 +0000
opensnitch (1.5.2-1) unstable; urgency=medium
* try to mount debugfs on boot up
-- gustavo-iniguez-goya Wed, 27 Jul 2022 17:29:33 +0200
opensnitch (1.5.1-1) unstable; urgency=medium
* Better eBPF cache.
* Fixed error resolving domains to localhost.
* Fixed error deleting our nftables rules.
-- gustavo-iniguez-goya Fri, 25 Feb 2022 01:21:38 +0100
opensnitch (1.5.0-1) unstable; urgency=medium
* New release.
* Added Reject option.
* New lists types to block ads/malware/...
* Better connections interception.
* Better VPNs handling.
* Bug fixes.
-- gustavo-iniguez-goya Fri, 28 Jan 2022 23:20:38 +0100
opensnitch (1.5.0~rc2-1) unstable; urgency=medium
* Better connections interception.
* Improvements.
-- gustavo-iniguez-goya Sun, 16 Jan 2022 23:15:12 +0100
opensnitch (1.5.0~rc1-1) unstable; urgency=medium
* New features.
-- gustavo-iniguez-goya Thu, 07 Oct 2021 14:57:35 +0200
opensnitch (1.4.0-1) unstable; urgency=medium
* final release.
-- gustavo-iniguez-goya Fri, 27 Aug 2021 13:33:07 +0200
opensnitch (1.4.0~rc4-1) unstable; urgency=medium
* Bug fix release.
-- gustavo-iniguez-goya Wed, 11 Aug 2021 15:17:49 +0200
opensnitch (1.4.0~rc3-1) unstable; urgency=medium
* Bug fix release.
-- gustavo-iniguez-goya Fri, 16 Jul 2021 23:28:52 +0200
opensnitch (1.4.0~rc2-1) unstable; urgency=medium
* Added eBPF support.
* Fixes and improvements.
-- gustavo-iniguez-goya Fri, 07 May 2021 01:08:02 +0200
opensnitch (1.4.0~rc-1) unstable; urgency=medium
* Bug fix and improvements release.
-- gustavo-iniguez-goya Thu, 25 Mar 2021 01:02:31 +0100
opensnitch (1.3.6-1) unstable; urgency=medium
* Bug fix and improvements release.
-- gustavo-iniguez-goya Wed, 10 Feb 2021 10:17:43 +0100
opensnitch (1.3.5-1) unstable; urgency=medium
* Bug fix and improvements release.
-- gustavo-iniguez-goya Mon, 11 Jan 2021 18:01:53 +0100
opensnitch (1.3.0-1) unstable; urgency=medium
* Fixed how we check rules
* Fixed cpu spike after disable interception.
* Fixed cleaning up fw rules on exit.
* make regexp rules case-insensitive by default
* allow to filter by dst network.
-- gustavo-iniguez-goya Wed, 16 Dec 2020 01:15:03 +0100
opensnitch (1.3.0~rc-1) unstable; urgency=medium
* Non-maintainer upload.
-- gustavo-iniguez-goya Fri, 13 Nov 2020 00:51:34 +0100
opensnitch (1.2.0-1) unstable; urgency=medium
* Fixed memleaks.
* Sort rules by name
* Added priority field to rules.
* Other fixes
-- gustavo-iniguez-goya Mon, 09 Nov 2020 22:55:13 +0100
opensnitch (1.0.1-1) unstable; urgency=medium
* Fixed app exit when IPv6 is not supported.
* Other fixes.
-- gustavo-iniguez-goya Thu, 30 Jul 2020 21:56:20 +0200
opensnitch (1.0.0-1) unstable; urgency=medium
* v1.0.0 released.
-- gustavo-iniguez-goya Thu, 16 Jul 2020 00:19:26 +0200
opensnitch (1.0.0rc11-1) unstable; urgency=medium
* Fixed multiple race conditions.
* Fixed CWD parsing when using audit proc monitor method.
-- gustavo-iniguez-goya Wed, 24 Jun 2020 00:10:38 +0200
opensnitch (1.0.0rc10-1) unstable; urgency=medium
* Fixed checking UID functions availability.
* Improved process path parsing.
* Fixed applying config from the UI.
* Fixed default log level.
* Gather CWD and process environment vars.
* Increase default timeout when asking for a rule.
-- gustavo-iniguez-goya Sat, 13 Jun 2020 18:45:02 +0200
opensnitch (1.0.0rc9-1) unstable; urgency=medium
* Ignore malformed rules from loading.
* Allow to modify and add rules from the UI.
-- gustavo-iniguez-goya Sun, 17 May 2020 18:18:24 +0200
opensnitch (1.0.0rc8) unstable; urgency=medium
* Allow to change settings from the UI.
* Improved connection handling with the UI.
-- gustavo-iniguez-goya Wed, 29 Apr 2020 21:52:27 +0200
opensnitch (1.0.0rc7-1) unstable; urgency=medium
* Stability, performance and realiability improvements.
-- gustavo-iniguez-goya Sun, 12 Apr 2020 23:25:41 +0200
opensnitch (1.0.0rc6-1) unstable; urgency=medium
* Fixed iptables rules deletion.
* Improved PIDs cache.
* Added audit process monitoring method.
* Added logrotate file.
* Added default configuration file.
-- gustavo-iniguez-goya Sun, 08 Mar 2020 20:47:58 +0100
opensnitch (1.0.0rc-5) unstable; urgency=medium
* Fixed netlink socket querying.
* Added check to reload firewall rules if missing.
-- gustavo-iniguez-goya Mon, 24 Feb 2020 19:55:06 +0100
opensnitch (1.0.0rc-3) unstable; urgency=medium
* @see: https://github.com/gustavo-iniguez-goya/opensnitch/releases
-- gustavo-iniguez-goya Tue, 18 Feb 2020 10:09:45 +0100
opensnitch (1.0.0rc-2) unstable; urgency=medium
* UI minor changes
* Expand deb package compatibility.
-- gustavo-iniguez-goya Wed, 05 Feb 2020 21:50:20 +0100
opensnitch (1.0.0rc-1) unstable; urgency=medium
* Initial release
-- gustavo-iniguez-goya Fri, 22 Nov 2019 01:14:08 +0100
opensnitch-1.5.8.1/debian/control 0000664 0000000 0000000 00000005343 14401326716 0016674 0 ustar 00root root 0000000 0000000 Source: opensnitch
Maintainer: Gustavo Iñiguez Goya
Section: devel
Testsuite: autopkgtest-pkg-go
Priority: optional
Build-Depends:
debhelper-compat (= 11),
dh-golang,
dh-python,
golang-any,
golang-github-evilsocket-ftrace-dev,
golang-github-fsnotify-fsnotify-dev,
golang-github-google-gopacket-dev,
golang-github-google-nftables-dev,
golang-github-iovisor-gobpf-dev,
golang-github-vishvananda-netlink-dev,
golang-golang-x-net-dev,
golang-google-grpc-dev,
golang-goprotobuf-dev,
libmnl-dev,
libnetfilter-queue-dev,
pkg-config,
protoc-gen-go-grpc,
pyqt5-dev-tools,
qttools5-dev-tools,
python3-all,
python3-grpc-tools,
python3-setuptools
Standards-Version: 4.6.2
Vcs-Browser: https://github.com/evilsocket/opensnitch
Vcs-Git: https://github.com/evilsocket/opensnitch.git -b 1.5.0
Homepage: https://github.com/evilsocket/opensnitch
Rules-Requires-Root: no
XS-Go-Import-Path: github.com/evilsocket/opensnitch
Package: opensnitch
Section: net
Architecture: any
Depends:
${misc:Depends},
${shlibs:Depends},
Recommends: python3-opensnitch-ui
Built-Using: ${misc:Built-Using}
Description: GNU/Linux interactive application firewall
OpenSnitch is a GNU/Linux firewall application.
Whenever a program makes a connection, it'll prompt the user to allow or deny
it.
.
The user can decide if block the outgoing connection based on properties of
the connection: by port, by uid, by dst ip, by program or a combination
of them.
.
These rules can last forever, until the app restart or just one time.
.
The GUI allows the user to view live outgoing connections, as well as search
by process, user, host or port.
.
OpenSnitch can also work as a system-wide domains blocker, by using lists
of domains, list of IPs or list of regular expressions.
Package: python3-opensnitch-ui
Architecture: all
Section: net
Depends:
${misc:Depends},
${shlibs:Depends},
libqt5sql5-sqlite,
python3-grpcio,
python3-notify2,
python3-pyinotify,
python3-pyqt5,
python3-pyqt5.qtsql,
python3-setuptools,
python3-six,
python3-slugify,
python3:any,
xdg-user-dirs,
gtk-update-icon-cache
Recommends:
python3-pyasn
Suggests: opensnitch
Description: GNU/Linux interactive application firewall GUI
opensnitch-ui is a GUI for opensnitch written in Python.
It allows the user to view live outgoing connections, as well as search
for details of the intercepted connections.
.
The user can decide if block outgoing connections based on properties of
the connection: by port, by uid, by dst ip, by program or a combination
of them.
.
These rules can last forever, until restart the daemon or just one time.
.
OpenSnitch can also work as a system-wide domains blocker, by using lists
of domains, list of IPs or list of regular expressions.
opensnitch-1.5.8.1/debian/copyright 0000664 0000000 0000000 00000002246 14401326716 0017223 0 ustar 00root root 0000000 0000000 Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Source: https://github.com/evilsocket/opensnitch
Upstream-Contact: Gustavo Iñiguez Goia
Upstream-Name: opensnitch
Files-Excluded:
Godeps/_workspace
Files: *
Copyright:
2017-2018 evilsocket
2019-2023 Gustavo Iñiguez Goia
Comment: Debian packaging is licensed under the same terms as upstream
License: GPL-3.0+
This program is free software; you can redistribute it
and/or modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later
version.
.
This program is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more
details.
.
You should have received a copy of the GNU General Public
License along with this program. If not, If not, see
http://www.gnu.org/licenses/.
.
On Debian systems, the full text of the GNU General Public
License version 3 can be found in the file
'/usr/share/common-licenses/GPL-3'.
opensnitch-1.5.8.1/debian/gbp.conf 0000664 0000000 0000000 00000000036 14401326716 0016702 0 ustar 00root root 0000000 0000000 [DEFAULT]
pristine-tar = True
opensnitch-1.5.8.1/debian/gitlab-ci.yml 0000664 0000000 0000000 00000002525 14401326716 0017646 0 ustar 00root root 0000000 0000000 # auto-generated, DO NOT MODIFY.
# The authoritative copy of this file lives at:
# https://salsa.debian.org/go-team/ci/blob/master/config/gitlabciyml.go
# TODO: publish under debian-go-team/ci
image: stapelberg/ci2
test_the_archive:
artifacts:
paths:
- before-applying-commit.json
- after-applying-commit.json
script:
# Create an overlay to discard writes to /srv/gopath/src after the build:
- "rm -rf /cache/overlay/{upper,work}"
- "mkdir -p /cache/overlay/{upper,work}"
- "mount -t overlay overlay -o lowerdir=/srv/gopath/src,upperdir=/cache/overlay/upper,workdir=/cache/overlay/work /srv/gopath/src"
- "export GOPATH=/srv/gopath"
- "export GOCACHE=/cache/go"
# Build the world as-is:
- "ci-build -exemptions=/var/lib/ci-build/exemptions.json > before-applying-commit.json"
# Copy this package into the overlay:
- "GBP_CONF_FILES=:debian/gbp.conf gbp buildpackage --git-no-pristine-tar --git-ignore-branch --git-ignore-new --git-export-dir=/tmp/export --git-no-overlay --git-tarball-dir=/nonexistant --git-cleaner=/bin/true --git-builder='dpkg-buildpackage -S -d --no-sign'"
- "pgt-gopath -dsc /tmp/export/*.dsc"
# Rebuild the world:
- "ci-build -exemptions=/var/lib/ci-build/exemptions.json > after-applying-commit.json"
- "ci-diff before-applying-commit.json after-applying-commit.json"
opensnitch-1.5.8.1/debian/opensnitch.init 0000664 0000000 0000000 00000003562 14401326716 0020331 0 ustar 00root root 0000000 0000000 #!/bin/sh
### BEGIN INIT INFO
# Provides: opensnitchd
# Required-Start: $network $local_fs
# Required-Stop: $network $local_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: opensnitchd daemon
# Description: opensnitch application firewall
### END INIT INFO
NAME=opensnitchd
PIDDIR=/var/run/$NAME
OPENSNITCHDPID=$PIDDIR/$NAME.pid
# clear conflicting settings from the environment
unset TMPDIR
test -x /usr/bin/$NAME || exit 0
. /lib/lsb/init-functions
case $1 in
start)
log_daemon_msg "Starting opensnitch daemon" $NAME
if [ ! -d /etc/$NAME/rules ]; then
mkdir -p /etc/$NAME/rules &>/dev/null
fi
# Make sure we have our PIDDIR, even if it's on a tmpfs
install -o root -g root -m 755 -d $PIDDIR
if ! start-stop-daemon --start --quiet --oknodo --pidfile $OPENSNITCHDPID --background --exec /usr/bin/$NAME -- -rules-path /etc/$NAME/rules; then
log_end_msg 1
exit 1
fi
log_end_msg 0
;;
stop)
log_daemon_msg "Stopping $NAME daemon" $NAME
start-stop-daemon --stop --quiet --signal QUIT --name $NAME
# Wait a little and remove stale PID file
sleep 1
if [ -f $OPENSNITCHDPID ] && ! ps h `cat $OPENSNITCHDPID` > /dev/null
then
rm -f $OPENSNITCHDPID
fi
log_end_msg 0
;;
reload)
log_daemon_msg "Reloading $NAME" $NAME
start-stop-daemon --stop --quiet --signal HUP --pidfile $OPENSNITCHDPID
log_end_msg 0
;;
restart|force-reload)
$0 stop
sleep 1
$0 start
;;
status)
status_of_proc /usr/bin/$NAME $NAME
exit $?
;;
*)
echo "Usage: /etc/init.d/opensnitchd {start|stop|reload|restart|force-reload|status}"
exit 1
;;
esac
exit 0
opensnitch-1.5.8.1/debian/opensnitch.install 0000664 0000000 0000000 00000000174 14401326716 0021030 0 ustar 00root root 0000000 0000000 daemon/default-config.json etc/opensnitchd/
daemon/system-fw.json etc/opensnitchd/
#ebpf_prog/opensnitch.o etc/opensnitchd/
opensnitch-1.5.8.1/debian/opensnitch.logrotate 0000664 0000000 0000000 00000000353 14401326716 0021361 0 ustar 00root root 0000000 0000000 /var/log/opensnitchd.log {
rotate 7
# order of the fields is important
maxsize 50M
# we need this option in order to keep logging
copytruncate
missingok
notifempty
delaycompress
compress
create 640 root root
weekly
}
opensnitch-1.5.8.1/debian/opensnitch.service 0000664 0000000 0000000 00000000633 14401326716 0021022 0 ustar 00root root 0000000 0000000 [Unit]
Description=OpenSnitch is a GNU/Linux application firewall.
Documentation=https://github.com/gustavo-iniguez-goya/opensnitch/wiki
Wants=network.target
After=network.target
[Service]
Type=simple
PermissionsStartOnly=true
ExecStartPre=/bin/mkdir -p /etc/opensnitchd/rules
ExecStart=/usr/bin/opensnitchd -rules-path /etc/opensnitchd/rules
Restart=always
RestartSec=30
[Install]
WantedBy=multi-user.target
opensnitch-1.5.8.1/debian/python3-opensnitch-ui.postinst 0000775 0000000 0000000 00000000646 14401326716 0023271 0 ustar 00root root 0000000 0000000 #!/bin/sh
set -e
autostart_by_default()
{
if [ -f /etc/xdg/autostart -a ! -f /etc/xdg/autostart/opensnitch_ui.desktop ]; then
ln -s /usr/share/applications/opensnitch_ui.desktop /etc/xdg/autostart/
fi
}
autostart_by_default
if command -v gtk-update-icon-cache >/dev/null && test -f /usr/share/icons/hicolor/index.theme ; then
gtk-update-icon-cache --quiet /usr/share/icons/hicolor/
fi
#DEBHELPER#
opensnitch-1.5.8.1/debian/python3-opensnitch-ui.postrm 0000775 0000000 0000000 00000000402 14401326716 0022720 0 ustar 00root root 0000000 0000000 #!/bin/sh
set -e
case "$1" in
purge)
if [ -f /etc/xdg/autostart/opensnitch_ui.desktop ];then
rm -f /etc/xdg/autostart/opensnitch_ui.desktop
fi
;;
remove)
pkill -15 opensnitch-ui || true
;;
esac
#DEBHELPER#
opensnitch-1.5.8.1/debian/rules 0000775 0000000 0000000 00000002475 14401326716 0016354 0 ustar 00root root 0000000 0000000 #!/usr/bin/make -f
export DH_VERBOSE = 1
export DESTDIR := $(shell pwd)/debian/opensnitch
export UIDESTDIR := $(shell pwd)/debian/python3-opensnitch-ui
override_dh_installsystemd:
dh_installsystemd --restart-after-upgrade
override_dh_auto_build:
$(MAKE) protocol
# Workaround for Go build problem when building in _build
mkdir -p _build/src/github.com/evilsocket/opensnitch/daemon/ui/protocol/
cp daemon/ui/protocol/* _build/src/github.com/evilsocket/opensnitch/daemon/ui/protocol/
dh_auto_build
cd ui && python3 setup.py build --force
override_dh_auto_install:
# daemon
mkdir -p $(DESTDIR)/usr/bin
cp _build/bin/daemon $(DESTDIR)/usr/bin/opensnitchd
# GUI
make -C ui/i18n
cp -r ui/i18n/locales/ ui/opensnitch/i18n/
pyrcc5 -o ui/opensnitch/resources_rc.py ui/opensnitch/res/resources.qrc
sed -i 's/^import ui_pb2/from . import ui_pb2/' ui/opensnitch/ui_pb2*
cd ui && python3 setup.py install --force --root=$(UIDESTDIR) --no-compile -O0 --install-layout=deb
# daemon
dh_auto_install
%:
dh $@ --builddirectory=_build --buildsystem=golang --with=golang,python3
override_dh_auto_clean:
dh_auto_clean
$(MAKE) clean
$(RM) ui/opensnitch/resources_rc.py
$(RM) -r ui/opensnitch/i18n/
$(RM) ui/i18n/locales/*/*.qm
cd ui && python3 setup.py clean -a
$(RM) -r ui/opensnitch_ui.egg-info/
find ui -name \*.pyc -exec rm {} \;
opensnitch-1.5.8.1/debian/source/ 0000775 0000000 0000000 00000000000 14401326716 0016564 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/debian/source/format 0000664 0000000 0000000 00000000014 14401326716 0017772 0 ustar 00root root 0000000 0000000 3.0 (quilt)
opensnitch-1.5.8.1/debian/source/options 0000664 0000000 0000000 00000000040 14401326716 0020174 0 ustar 00root root 0000000 0000000 extend-diff-ignore="\.egg-info$" opensnitch-1.5.8.1/debian/tests/ 0000775 0000000 0000000 00000000000 14401326716 0016426 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/debian/tests/control 0000664 0000000 0000000 00000000055 14401326716 0020031 0 ustar 00root root 0000000 0000000 Tests: test-resources.sh
Depends: opensnitch
opensnitch-1.5.8.1/debian/tests/test-resources.sh 0000775 0000000 0000000 00000000536 14401326716 0021760 0 ustar 00root root 0000000 0000000 #!/bin/sh
set -e
ophome="/etc/opensnitchd"
ls -dl $ophome 1>/dev/null
echo "installed OK: $ophome"
ls -l $ophome/system-fw.json 1>/dev/null
echo "installed OK: $ophome/system-fw.json"
ls -l $ophome/default-config.json 1>/dev/null
echo "installed OK: $ophome/default-config.json"
ls -dl $ophome/rules 1>/dev/null
echo "installed OK: $ophome/rules/"
opensnitch-1.5.8.1/debian/upstream/ 0000775 0000000 0000000 00000000000 14401326716 0017124 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/debian/upstream/metadata 0000664 0000000 0000000 00000000632 14401326716 0020630 0 ustar 00root root 0000000 0000000 ---
Name: opensnitch
Bug-Database: https://github.com/evilsocket/opensnitch/issues
Bug-Submit: https://github.com/evilsocket/opensnitch/issues/new
Contact: Gustavo Iñiguez Goia
Documentation: https://github.com/evilsocket/opensnitch/wiki
CPE: cpe:/a:evilsocket:opensnitch
Repository: https://github.com/evilsocket/opensnitch.git
Repository-Browse: https://github.com/evilsocket/opensnitch
opensnitch-1.5.8.1/debian/watch 0000664 0000000 0000000 00000000352 14401326716 0016315 0 ustar 00root root 0000000 0000000 version=4
opts=filenamemangle=s/.+\/v?(\d\S*)\.tar\.gz/opensnitch-\$1\.tar\.gz/,\
uversionmangle=s/(\d)[_\.\-\+]?(RC|rc|pre|dev|beta|alpha)[.]?(\d*)$/\$1~\$2\$3/ \
https://github.com/evilsocket/opensnitch/tags .*/v?(\d\S*)\.tar\.gz
opensnitch-1.5.8.1/ebpf_prog/ 0000775 0000000 0000000 00000000000 14401326716 0016005 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/ebpf_prog/Makefile 0000664 0000000 0000000 00000012353 14401326716 0017451 0 ustar 00root root 0000000 0000000 #taken from /samples/bpf/Makefile and removed all targets
# SPDX-License-Identifier: GPL-2.0
BPF_SAMPLES_PATH ?= $(abspath $(srctree)/$(src))
TOOLS_PATH := $(BPF_SAMPLES_PATH)/../../tools
# Libbpf dependencies
LIBBPF = $(TOOLS_PATH)/lib/bpf/libbpf.a
CGROUP_HELPERS := ../../tools/testing/selftests/bpf/cgroup_helpers.o
TRACE_HELPERS := ../../tools/testing/selftests/bpf/trace_helpers.o
always-y += opensnitch.o
ifeq ($(ARCH), arm)
# Strip all except -D__LINUX_ARM_ARCH__ option needed to handle linux
# headers when arm instruction set identification is requested.
ARM_ARCH_SELECTOR := $(filter -D__LINUX_ARM_ARCH__%, $(KBUILD_CFLAGS))
BPF_EXTRA_CFLAGS := $(ARM_ARCH_SELECTOR)
TPROGS_CFLAGS += $(ARM_ARCH_SELECTOR)
endif
TPROGS_CFLAGS += -Wall -O2
TPROGS_CFLAGS += -Wmissing-prototypes
TPROGS_CFLAGS += -Wstrict-prototypes
TPROGS_CFLAGS += -I$(objtree)/usr/include
TPROGS_CFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
TPROGS_CFLAGS += -I$(srctree)/tools/lib/
TPROGS_CFLAGS += -I$(srctree)/tools/include
TPROGS_CFLAGS += -I$(srctree)/tools/perf
TPROGS_CFLAGS += -DHAVE_ATTR_TEST=0
ifdef SYSROOT
TPROGS_CFLAGS += --sysroot=$(SYSROOT)
TPROGS_LDFLAGS := -L$(SYSROOT)/usr/lib
endif
TPROGCFLAGS_bpf_load.o += -Wno-unused-variable
TPROGS_LDLIBS += $(LIBBPF) -lelf -lz
TPROGLDLIBS_tracex4 += -lrt
TPROGLDLIBS_trace_output += -lrt
TPROGLDLIBS_map_perf_test += -lrt
TPROGLDLIBS_test_overhead += -lrt
TPROGLDLIBS_xdpsock += -pthread
# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
# make M=samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
LLC ?= llc
CLANG ?= clang
LLVM_OBJCOPY ?= llvm-objcopy
BTF_PAHOLE ?= pahole
# Detect that we're cross compiling and use the cross compiler
ifdef CROSS_COMPILE
CLANG_ARCH_ARGS = --target=$(notdir $(CROSS_COMPILE:%-=%))
endif
# Don't evaluate probes and warnings if we need to run make recursively
ifneq ($(src),)
HDR_PROBE := $(shell printf "\#include \n struct list_head { int a; }; int main() { return 0; }" | \
$(CC) $(TPROGS_CFLAGS) $(TPROGS_LDFLAGS) -x c - \
-o /dev/null 2>/dev/null && echo okay)
ifeq ($(HDR_PROBE),)
$(warning WARNING: Detected possible issues with include path.)
$(warning WARNING: Please install kernel headers locally (make headers_install).)
endif
BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
$(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
readelf -S ./llvm_btf_verify.o | grep BTF; \
/bin/rm -f ./llvm_btf_verify.o)
BPF_EXTRA_CFLAGS += -fno-stack-protector
ifneq ($(BTF_LLVM_PROBE),)
BPF_EXTRA_CFLAGS += -g
else
ifneq ($(and $(BTF_LLC_PROBE),$(BTF_PAHOLE_PROBE),$(BTF_OBJCOPY_PROBE)),)
BPF_EXTRA_CFLAGS += -g
LLC_FLAGS += -mattr=dwarfris
DWARF2BTF = y
endif
endif
endif
# Trick to allow make to be run from this directory
all:
$(MAKE) -C ../../ M=$(CURDIR) BPF_SAMPLES_PATH=$(CURDIR)
clean:
$(MAKE) -C ../../ M=$(CURDIR) clean
@find $(CURDIR) -type f -name '*~' -delete
$(LIBBPF): FORCE
# Fix up variables inherited from Kbuild that tools/ build system won't like
$(MAKE) -C $(dir $@) RM='rm -rf' EXTRA_CFLAGS="$(TPROGS_CFLAGS)" \
LDFLAGS=$(TPROGS_LDFLAGS) srctree=$(BPF_SAMPLES_PATH)/../../ O=
$(obj)/syscall_nrs.h: $(obj)/syscall_nrs.s FORCE
$(call filechk,offsets,__SYSCALL_NRS_H__)
targets += syscall_nrs.s
clean-files += syscall_nrs.h
FORCE:
# Verify LLVM compiler tools are available and bpf target is supported by llc
.PHONY: verify_cmds verify_target_bpf $(CLANG) $(LLC)
verify_cmds: $(CLANG) $(LLC)
@for TOOL in $^ ; do \
if ! (which -- "$${TOOL}" > /dev/null 2>&1); then \
echo "*** ERROR: Cannot find LLVM tool $${TOOL}" ;\
exit 1; \
else true; fi; \
done
verify_target_bpf: verify_cmds
@if ! (${LLC} -march=bpf -mattr=help > /dev/null 2>&1); then \
echo "*** ERROR: LLVM (${LLC}) does not support 'bpf' target" ;\
echo " NOTICE: LLVM version >= 3.7.1 required" ;\
exit 2; \
else true; fi
$(BPF_SAMPLES_PATH)/*.c: verify_target_bpf $(LIBBPF)
$(src)/*.c: verify_target_bpf $(LIBBPF)
$(obj)/tracex5_kern.o: $(obj)/syscall_nrs.h
$(obj)/hbm_out_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
$(obj)/hbm.o: $(src)/hbm.h
$(obj)/hbm_edt_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
-include $(BPF_SAMPLES_PATH)/Makefile.target
# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
# But, there is no easy way to fix it, so just exclude it since it is
# useless for BPF samples.
$(obj)/%.o: $(src)/%.c
@echo " CLANG-bpf " $@
$(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(BPF_EXTRA_CFLAGS) \
-I$(obj) -I$(srctree)/tools/testing/selftests/bpf/ \
-I$(srctree)/tools/lib/ \
-D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \
-D__TARGET_ARCH_$(SRCARCH) -Wno-compare-distinct-pointer-types \
-Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \
-Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
-I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@
ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
endif
opensnitch-1.5.8.1/ebpf_prog/README 0000664 0000000 0000000 00000002122 14401326716 0016662 0 ustar 00root root 0000000 0000000 opensnitch.c is an eBPF program. Compilation requires getting kernel source.
sudo apt install clang llvm libelf-dev libzip-dev flex bison libssl-dev bc rsync python3
cd opensnitch
wget https://github.com/torvalds/linux/archive/v5.8.tar.gz
tar -xf v5.8.tar.gz
patch linux-5.8/tools/lib/bpf/bpf_helpers.h < ebpf_prog/file.patch
cp ebpf_prog/opensnitch.c ebpf_prog/Makefile linux-5.8/samples/bpf
cd linux-5.8 && yes "" | make oldconfig && make prepare && make headers_install # (1 min)
cd samples/bpf && make
objdump -h opensnitch.o #you should see many section, number 1 should be called kprobe/tcp_v4_connect
llvm-strip -g opensnitch.o #remove debug info
sudo cp opensnitch.o /etc/opensnitchd/
cd ../../../daemon
--opensnitchd expects to find opensnitch.o in /etc/opensnitchd/
--start opensnitchd with:
opensnitchd -rules-path /etc/opensnitchd/rules -process-monitor-method ebpf
The kernel where you intend to run it must have some options activated:
$ grep BPF /boot/config-$(uname -r)
CONFIG_CGROUP_BPF=y
CONFIG_BPF=y
CONFIG_BPF_SYSCALL=y
CONFIG_BPF_EVENTS=y
CONFIG_KPROBES=y
CONFIG_KPROBE_EVENTS=y
opensnitch-1.5.8.1/ebpf_prog/arm-clang-asm-fix.patch 0000664 0000000 0000000 00000000601 14401326716 0022226 0 ustar 00root root 0000000 0000000 --- ../../arch/arm/include/asm/unified.h 2021-04-20 10:47:54.075834124 +0000
+++ ../../arch/arm/include/asm/unified-clang-fix.h 2021-04-20 10:47:38.943811970 +0000
@@ -11,7 +11,10 @@
#if defined(__ASSEMBLY__)
.syntax unified
#else
-__asm__(".syntax unified");
+//__asm__(".syntax unified");
+#ifndef __clang__
+ __asm__(".syntax unified");
+#endif
#endif
#ifdef CONFIG_CPU_V7M
opensnitch-1.5.8.1/ebpf_prog/file.patch 0000664 0000000 0000000 00000000616 14401326716 0017750 0 ustar 00root root 0000000 0000000 --- linux-5.8/tools/lib/bpf/bpf_helpers.h 2020-08-03 00:21:45.000000000 +0300
+++ linux-5.8/tools/lib/bpf/bpf_helpersnew.h 2021-02-23 18:45:21.789624834 +0300
@@ -54,7 +54,7 @@
* Helper structure used by eBPF C program
* to describe BPF map attributes to libbpf loader
*/
-struct bpf_map_def {
+struct bpf_map_defold {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
opensnitch-1.5.8.1/ebpf_prog/opensnitch.c 0000664 0000000 0000000 00000036010 14401326716 0020323 0 ustar 00root root 0000000 0000000 #define KBUILD_MODNAME "dummy"
//uncomment if building on x86_32
//#define OPENSNITCH_x86_32
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define MAPSIZE 12000
//-------------------------------map definitions
// which github.com/iovisor/gobpf/elf expects
#define BUF_SIZE_MAP_NS 256
typedef struct bpf_map_def {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
unsigned int map_flags;
unsigned int pinning;
char namespace[BUF_SIZE_MAP_NS];
} bpf_map_def;
enum bpf_pin_type {
PIN_NONE = 0,
PIN_OBJECT_NS,
PIN_GLOBAL_NS,
PIN_CUSTOM_NS,
};
//-----------------------------------
// even though we only need 32 bits of pid, on x86_32 ebpf verifier complained when pid type was set to u32
typedef u64 pid_size_t;
typedef u64 uid_size_t;
struct tcp_key_t {
u16 sport;
u32 daddr;
u16 dport;
u32 saddr;
}__attribute__((packed));
struct tcp_value_t{
pid_size_t pid;
uid_size_t uid;
u64 counter;
}__attribute__((packed));
// not using unsigned __int128 because it is not supported on x86_32
struct ipV6 {
u64 part1;
u64 part2;
}__attribute__((packed));
struct tcpv6_key_t {
u16 sport;
struct ipV6 daddr;
u16 dport;
struct ipV6 saddr;
}__attribute__((packed));
struct tcpv6_value_t{
pid_size_t pid;
uid_size_t uid;
u64 counter;
}__attribute__((packed));;
struct udp_key_t {
u16 sport;
u32 daddr;
u16 dport;
u32 saddr;
} __attribute__((packed));
struct udp_value_t{
pid_size_t pid;
uid_size_t uid;
u64 counter;
}__attribute__((packed));
struct udpv6_key_t {
u16 sport;
struct ipV6 daddr;
u16 dport;
struct ipV6 saddr;
}__attribute__((packed));
struct udpv6_value_t{
pid_size_t pid;
uid_size_t uid;
u64 counter;
}__attribute__((packed));
// on x86_32 "struct sock" is arranged differently from x86_64 (at least on Debian kernels).
// We hardcode offsets of IP addresses.
struct sock_on_x86_32_t {
u8 data_we_dont_care_about[40];
struct ipV6 daddr;
struct ipV6 saddr;
};
// Add +1,+2,+3 etc. to map size helps to easier distinguish maps in bpftool's output
struct bpf_map_def SEC("maps/tcpMap") tcpMap = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct tcp_key_t),
.value_size = sizeof(struct tcp_value_t),
.max_entries = MAPSIZE+1,
};
struct bpf_map_def SEC("maps/tcpv6Map") tcpv6Map = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct tcpv6_key_t),
.value_size = sizeof(struct tcpv6_value_t),
.max_entries = MAPSIZE+2,
};
struct bpf_map_def SEC("maps/udpMap") udpMap = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct udp_key_t),
.value_size = sizeof(struct udp_value_t),
.max_entries = MAPSIZE+3,
};
struct bpf_map_def SEC("maps/udpv6Map") udpv6Map = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct udpv6_key_t),
.value_size = sizeof(struct udpv6_value_t),
.max_entries = MAPSIZE+4,
};
// for TCP the IP-tuple can be copied from "struct sock" only upon return from tcp_connect().
// We stash the socket here to look it up upon return.
struct bpf_map_def SEC("maps/tcpsock") tcpsock = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(u64),
.value_size = sizeof(u64),// using u64 instead of sizeof(struct sock *)
// to avoid pointer size related quirks on x86_32
.max_entries = 100,
};
struct bpf_map_def SEC("maps/tcpv6sock") tcpv6sock = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(u64),
.value_size = sizeof(u64),
.max_entries = 100,
};
// //counts how many connections we've processed. Starts at 0.
struct bpf_map_def SEC("maps/tcpcounter") tcpcounter = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(u32),
.value_size = sizeof(u64),
.max_entries = 1,
};
struct bpf_map_def SEC("maps/tcpv6counter") tcpv6counter = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(u32),
.value_size = sizeof(u64),
.max_entries = 1,
};
struct bpf_map_def SEC("maps/udpcounter") udpcounter = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(u32),
.value_size = sizeof(u64),
.max_entries = 1,
};
struct bpf_map_def SEC("maps/udpv6counter") udpv6counter = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(u32),
.value_size = sizeof(u64),
.max_entries = 1,
};
struct bpf_map_def SEC("maps/debugcounter") debugcounter = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(u32),
.value_size = sizeof(u64),
.max_entries = 1,
};
// size 150 gave ebpf verifier errors for kernel 4.14, 100 is ok
// we can cast any struct into rawBytes_t to be able to access arbitrary bytes of the struct
struct rawBytes_t {
u8 bytes[100];
};
//used for debug purposes only
struct bpf_map_def SEC("maps/bytes") bytes = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(u32),
.max_entries = 222,
};
//used for debug purposes only
struct bpf_map_def SEC("maps/debug") debug = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct tcpv6_key_t),
.value_size = sizeof(struct rawBytes_t),
.max_entries = 555,
};
// initializing variables with __builtin_memset() is required
// for compatibility with bpf on kernel 4.4
SEC("kprobe/tcp_v4_connect")
int kprobe__tcp_v4_connect(struct pt_regs *ctx)
{
#ifdef OPENSNITCH_x86_32
// On x86_32 platforms I couldn't get function arguments using PT_REGS_PARM1
// that's why we are accessing registers directly
struct sock *sk = (struct sock *)((ctx)->ax);
#else
struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx);
#endif
u64 skp = (u64)sk;
u64 pid_tgid = bpf_get_current_pid_tgid();
bpf_map_update_elem(&tcpsock, &pid_tgid, &skp, BPF_ANY);
return 0;
};
SEC("kretprobe/tcp_v4_connect")
int kretprobe__tcp_v4_connect(struct pt_regs *ctx)
{
u64 pid_tgid = bpf_get_current_pid_tgid();
u64 *skp = bpf_map_lookup_elem(&tcpsock, &pid_tgid);
if (skp == NULL) {return 0;}
struct sock *sk;
__builtin_memset(&sk, 0, sizeof(sk));
sk = (struct sock *)*skp;
struct tcp_key_t tcp_key;
__builtin_memset(&tcp_key, 0, sizeof(tcp_key));
bpf_probe_read(&tcp_key.dport, sizeof(tcp_key.dport), &sk->__sk_common.skc_dport);
bpf_probe_read(&tcp_key.sport, sizeof(tcp_key.sport), &sk->__sk_common.skc_num);
bpf_probe_read(&tcp_key.daddr, sizeof(tcp_key.daddr), &sk->__sk_common.skc_daddr);
bpf_probe_read(&tcp_key.saddr, sizeof(tcp_key.saddr), &sk->__sk_common.skc_rcv_saddr);
u32 zero_key = 0;
u64 *val = bpf_map_lookup_elem(&tcpcounter, &zero_key);
if (val == NULL){return 0;}
struct tcp_value_t tcp_value;
__builtin_memset(&tcp_value, 0, sizeof(tcp_value));
tcp_value.pid = pid_tgid >> 32;
tcp_value.uid = bpf_get_current_uid_gid() & 0xffffffff;
tcp_value.counter = *val;
bpf_map_update_elem(&tcpMap, &tcp_key, &tcp_value, BPF_ANY);
u64 newval = *val + 1;
bpf_map_update_elem(&tcpcounter, &zero_key, &newval, BPF_ANY);
bpf_map_delete_elem(&tcpsock, &pid_tgid);
return 0;
};
SEC("kprobe/tcp_v6_connect")
int kprobe__tcp_v6_connect(struct pt_regs *ctx)
{
#ifdef OPENSNITCH_x86_32
struct sock *sk = (struct sock *)((ctx)->ax);
#else
struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx);
#endif
u64 skp = (u64)sk;
u64 pid_tgid = bpf_get_current_pid_tgid();
bpf_map_update_elem(&tcpv6sock, &pid_tgid, &skp, BPF_ANY);
return 0;
};
SEC("kretprobe/tcp_v6_connect")
int kretprobe__tcp_v6_connect(struct pt_regs *ctx)
{
u64 pid_tgid = bpf_get_current_pid_tgid();
u64 *skp = bpf_map_lookup_elem(&tcpv6sock, &pid_tgid);
if (skp == NULL) {return 0;}
struct sock *sk;
__builtin_memset(&sk, 0, sizeof(sk));
sk = (struct sock *)*skp;
struct tcpv6_key_t tcpv6_key;
__builtin_memset(&tcpv6_key, 0, sizeof(tcpv6_key));
bpf_probe_read(&tcpv6_key.dport, sizeof(tcpv6_key.dport), &sk->__sk_common.skc_dport);
bpf_probe_read(&tcpv6_key.sport, sizeof(tcpv6_key.sport), &sk->__sk_common.skc_num);
#ifdef OPENSNITCH_x86_32
struct sock_on_x86_32_t sock;
__builtin_memset(&sock, 0, sizeof(sock));
bpf_probe_read(&sock, sizeof(sock), *(&sk));
tcpv6_key.daddr = sock.daddr;
tcpv6_key.saddr = sock.saddr;
#else
bpf_probe_read(&tcpv6_key.daddr, sizeof(tcpv6_key.daddr), &sk->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
bpf_probe_read(&tcpv6_key.saddr, sizeof(tcpv6_key.saddr), &sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
#endif
u32 zero_key = 0;
u64 *val = bpf_map_lookup_elem(&tcpv6counter, &zero_key);
if (val == NULL){return 0;}
struct tcpv6_value_t tcpv6_value;
__builtin_memset(&tcpv6_value, 0, sizeof(tcpv6_value));
tcpv6_value.pid = pid_tgid >> 32;
tcpv6_value.uid = bpf_get_current_uid_gid() & 0xffffffff;
tcpv6_value.counter = *val;
bpf_map_update_elem(&tcpv6Map, &tcpv6_key, &tcpv6_value, BPF_ANY);
u64 newval = *val + 1;
bpf_map_update_elem(&tcpv6counter, &zero_key, &newval, BPF_ANY);
bpf_map_delete_elem(&tcpv6sock, &pid_tgid);
return 0;
};
SEC("kprobe/udp_sendmsg")
int kprobe__udp_sendmsg(struct pt_regs *ctx)
{
#ifdef OPENSNITCH_x86_32
struct sock *sk = (struct sock *)((ctx)->ax);
struct msghdr *msg = (struct msghdr *)((ctx)->dx);
#else
struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx);
struct msghdr *msg = (struct msghdr *)PT_REGS_PARM2(ctx);
#endif
u64 msg_name; //pointer
__builtin_memset(&msg_name, 0, sizeof(msg_name));
bpf_probe_read(&msg_name, sizeof(msg_name), &msg->msg_name);
struct sockaddr_in * usin = (struct sockaddr_in *)msg_name;
struct udp_key_t udp_key;
__builtin_memset(&udp_key, 0, sizeof(udp_key));
bpf_probe_read(&udp_key.dport, sizeof(udp_key.dport), &usin->sin_port);
if (udp_key.dport != 0){ //likely
bpf_probe_read(&udp_key.daddr, sizeof(udp_key.daddr), &usin->sin_addr.s_addr);
}
else {
//very rarely dport can be found in skc_dport
bpf_probe_read(&udp_key.dport, sizeof(udp_key.dport), &sk->__sk_common.skc_dport);
bpf_probe_read(&udp_key.daddr, sizeof(udp_key.daddr), &sk->__sk_common.skc_daddr);
}
bpf_probe_read(&udp_key.sport, sizeof(udp_key.sport), &sk->__sk_common.skc_num);
bpf_probe_read(&udp_key.saddr, sizeof(udp_key.saddr), &sk->__sk_common.skc_rcv_saddr);
u32 zero_key = 0;
__builtin_memset(&zero_key, 0, sizeof(zero_key));
u64 *counterVal = bpf_map_lookup_elem(&udpcounter, &zero_key);
if (counterVal == NULL){return 0;}
struct udp_value_t *lookedupValue = bpf_map_lookup_elem(&udpMap, &udp_key);
u64 pid = bpf_get_current_pid_tgid() >> 32;
if ( lookedupValue == NULL || lookedupValue->pid != pid) {
struct udp_value_t udp_value;
__builtin_memset(&udp_value, 0, sizeof(udp_value));
udp_value.pid = pid;
udp_value.uid = bpf_get_current_uid_gid() & 0xffffffff;
udp_value.counter = *counterVal;
bpf_map_update_elem(&udpMap, &udp_key, &udp_value, BPF_ANY);
u64 newval = *counterVal + 1;
bpf_map_update_elem(&udpcounter, &zero_key, &newval, BPF_ANY);
}
//else nothing to do
return 0;
};
SEC("kprobe/udpv6_sendmsg")
int kprobe__udpv6_sendmsg(struct pt_regs *ctx)
{
#ifdef OPENSNITCH_x86_32
struct sock *sk = (struct sock *)((ctx)->ax);
struct msghdr *msg = (struct msghdr *)((ctx)->dx);
#else
struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx);
struct msghdr *msg = (struct msghdr *)PT_REGS_PARM2(ctx);
#endif
u64 msg_name; //a pointer
__builtin_memset(&msg_name, 0, sizeof(msg_name));
bpf_probe_read(&msg_name, sizeof(msg_name), &msg->msg_name);
struct udpv6_key_t udpv6_key;
__builtin_memset(&udpv6_key, 0, sizeof(udpv6_key));
bpf_probe_read(&udpv6_key.dport, sizeof(udpv6_key.dport), &sk->__sk_common.skc_dport);
if (udpv6_key.dport != 0){ //likely
bpf_probe_read(&udpv6_key.daddr, sizeof(udpv6_key.daddr), &sk->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
}
else {
struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *)msg_name;
bpf_probe_read(&udpv6_key.dport, sizeof(udpv6_key.dport), &sin6->sin6_port);
bpf_probe_read(&udpv6_key.daddr, sizeof(udpv6_key.daddr), &sin6->sin6_addr.in6_u.u6_addr32);
}
bpf_probe_read(&udpv6_key.sport, sizeof(udpv6_key.sport), &sk->__sk_common.skc_num);
bpf_probe_read(&udpv6_key.saddr, sizeof(udpv6_key.saddr), &sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
#ifdef OPENSNITCH_x86_32
struct sock_on_x86_32_t sock;
__builtin_memset(&sock, 0, sizeof(sock));
bpf_probe_read(&sock, sizeof(sock), *(&sk));
udpv6_key.daddr = sock.daddr;
udpv6_key.saddr = sock.saddr;
#endif
u32 zero_key = 0;
u64 *counterVal = bpf_map_lookup_elem(&udpv6counter, &zero_key);
if (counterVal == NULL){return 0;}
struct udpv6_value_t *lookedupValue = bpf_map_lookup_elem(&udpv6Map, &udpv6_key);
u64 pid = bpf_get_current_pid_tgid() >> 32;
if ( lookedupValue == NULL || lookedupValue->pid != pid) {
struct udpv6_value_t udpv6_value;
__builtin_memset(&udpv6_value, 0, sizeof(udpv6_value));
udpv6_value.pid = pid;
udpv6_value.uid = bpf_get_current_uid_gid() & 0xffffffff;
udpv6_value.counter = *counterVal;
bpf_map_update_elem(&udpv6Map, &udpv6_key, &udpv6_value, BPF_ANY);
u64 newval = *counterVal + 1;
bpf_map_update_elem(&udpv6counter, &zero_key, &newval, BPF_ANY);
}
//else nothing to do
return 0;
};
SEC("kprobe/iptunnel_xmit")
int kprobe__iptunnel_xmit(struct pt_regs *ctx)
{
#ifdef OPENSNITCH_x86_32
// TODO
return 0;
#else
struct sk_buff *skb = (struct sk_buff *)PT_REGS_PARM3(ctx);
u32 src = (u32)PT_REGS_PARM4(ctx);
u32 dst = (u32)PT_REGS_PARM5(ctx);
#endif
u16 sport = 0;
unsigned char *head;
u16 pkt_hdr;
__builtin_memset(&head, 0, sizeof(head));
__builtin_memset(&pkt_hdr, 0, sizeof(pkt_hdr));
bpf_probe_read(&head, sizeof(head), &skb->head);
bpf_probe_read(&pkt_hdr, sizeof(pkt_hdr), &skb->transport_header);
struct udphdr *udph;
__builtin_memset(&udph, 0, sizeof(udph));
udph = (struct udphdr *)(head + pkt_hdr);
bpf_probe_read(&sport, sizeof(sport), &udph->source);
sport = (sport >> 8) | ((sport << 8) & 0xff00);
struct udp_key_t udp_key;
struct udp_value_t udp_value;
u32 zero_key = 0;
__builtin_memset(&udp_key, 0, sizeof(udp_key));
__builtin_memset(&udp_value, 0, sizeof(udp_value));
bpf_probe_read(&udp_key.sport, sizeof(udp_key.sport), &sport);
bpf_probe_read(&udp_key.dport, sizeof(udp_key.dport), &udph->dest);
bpf_probe_read(&udp_key.saddr, sizeof(udp_key.saddr), &src);
bpf_probe_read(&udp_key.daddr, sizeof(udp_key.daddr), &dst);
u64 *counterVal = bpf_map_lookup_elem(&udpcounter, &zero_key);
if (counterVal == NULL){return 0;}
struct udp_value_t *lookedupValue = bpf_map_lookup_elem(&udpMap, &udp_key);
u64 pid = bpf_get_current_pid_tgid() >> 32;
if ( lookedupValue == NULL || lookedupValue->pid != pid) {
udp_value.pid = pid;
udp_value.uid = bpf_get_current_uid_gid() & 0xffffffff;
udp_value.counter = *counterVal;
bpf_map_update_elem(&udpMap, &udp_key, &udp_value, BPF_ANY);
u64 newval = *counterVal + 1;
bpf_map_update_elem(&udpcounter, &zero_key, &newval, BPF_ANY);
}
return 0;
};
// debug only: increment key's value by 1 in map "bytes"
void increment(u32 key){
u32 *lookedupValue = bpf_map_lookup_elem(&bytes, &key);
if (lookedupValue == NULL){
u32 zero = 0;
bpf_map_update_elem(&bytes, &key, &zero, BPF_ANY);
}
else {
u32 newval = *lookedupValue + 1;
bpf_map_update_elem(&bytes, &key, &newval, BPF_ANY);
}
}
char _license[] SEC("license") = "GPL";
// this number will be interpreted by the elf loader
// to set the current running kernel version
u32 _version SEC("version") = 0xFFFFFFFE;
opensnitch-1.5.8.1/proto/ 0000775 0000000 0000000 00000000000 14401326716 0015205 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/proto/.gitignore 0000664 0000000 0000000 00000000006 14401326716 0017171 0 ustar 00root root 0000000 0000000 *.pyc
opensnitch-1.5.8.1/proto/Makefile 0000664 0000000 0000000 00000001105 14401326716 0016642 0 ustar 00root root 0000000 0000000 all: ../daemon/ui/protocol/ui.pb.go ../ui/opensnitch/ui_pb2.py
../daemon/ui/protocol/ui.pb.go: ui.proto
protoc -I. ui.proto --go_out=../daemon/ui/protocol/ --go-grpc_out=../daemon/ui/protocol/ --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative
../ui/opensnitch/ui_pb2.py: ui.proto
python3 -m grpc_tools.protoc -I. --python_out=../ui/opensnitch/ --grpc_python_out=../ui/opensnitch/ ui.proto
clean:
@rm -rf ../daemon/ui/protocol/ui.pb.go
@rm -rf ../daemon/ui/protocol/ui_grpc.pb.go
@rm -rf ../ui/opensnitch/ui_pb2.py
@rm -rf ../ui/opensnitch/ui_pb2_grpc.py
opensnitch-1.5.8.1/proto/ui.proto 0000664 0000000 0000000 00000005355 14401326716 0016717 0 ustar 00root root 0000000 0000000 syntax = "proto3";
package protocol;
option go_package = "github.com/evilsocket/opensnitch/daemon/ui/protocol";
service UI {
rpc Ping(PingRequest) returns (PingReply) {}
rpc AskRule (Connection) returns (Rule) {}
rpc Subscribe (ClientConfig) returns (ClientConfig) {}
rpc Notifications (stream NotificationReply) returns (stream Notification) {}
}
message Event {
string time = 1;
Connection connection = 2;
Rule rule = 3;
int64 unixnano = 4;
}
message Statistics {
string daemon_version = 1;
uint64 rules = 2;
uint64 uptime = 3;
uint64 dns_responses = 4;
uint64 connections = 5;
uint64 ignored = 6;
uint64 accepted = 7;
uint64 dropped = 8;
uint64 rule_hits = 9;
uint64 rule_misses = 10;
map by_proto = 11;
map by_address = 12;
map by_host = 13;
map by_port = 14;
map by_uid = 15;
map by_executable = 16;
repeated Event events = 17;
}
message PingRequest {
uint64 id = 1;
Statistics stats = 2;
}
message PingReply {
uint64 id = 1;
}
message Connection {
string protocol = 1;
string src_ip = 2;
uint32 src_port = 3;
string dst_ip = 4;
string dst_host = 5;
uint32 dst_port = 6;
uint32 user_id = 7;
uint32 process_id = 8;
string process_path = 9;
string process_cwd = 10;
repeated string process_args = 11;
map process_env = 12;
}
message Operator {
string type = 1;
string operand = 2;
string data = 3;
bool sensitive = 4;
}
message Rule {
string name = 1;
bool enabled = 2;
bool precedence = 3;
string action = 4;
string duration = 5;
Operator operator = 6;
}
enum Action {
NONE = 0;
LOAD_FIREWALL = 1;
UNLOAD_FIREWALL = 2;
CHANGE_CONFIG = 3;
ENABLE_RULE = 4;
DISABLE_RULE = 5;
DELETE_RULE = 6;
CHANGE_RULE = 7;
LOG_LEVEL = 8;
STOP = 9;
MONITOR_PROCESS = 10;
STOP_MONITOR_PROCESS = 11;
}
// client configuration sent on Subscribe()
message ClientConfig {
uint64 id = 1;
string name = 2;
string version = 3;
bool isFirewallRunning = 4;
// daemon configuration as json string
string config = 5;
uint32 logLevel = 6;
repeated Rule rules = 7;
}
// notification sent to the clients (daemons)
message Notification {
uint64 id = 1;
string clientName = 2;
string serverName = 3;
// CHANGE_CONFIG: 2, data: {"default_timeout": 1, ...}
Action type = 4;
string data = 5;
repeated Rule rules = 6;
}
// notification reply sent to the server (GUI)
message NotificationReply {
uint64 id = 1;
NotificationReplyCode code = 2;
string data = 3;
}
enum NotificationReplyCode {
OK = 0;
ERROR = 1;
}
opensnitch-1.5.8.1/release.sh 0000775 0000000 0000000 00000001243 14401326716 0016021 0 ustar 00root root 0000000 0000000 #!/bin/bash
# nothing to see here, just a utility i use to create new releases ^_^
CURRENT_VERSION=$(cat daemon/core/version.go | grep Version | cut -d '"' -f 2)
TO_UPDATE=(
daemon/core/version.go
ui/version.py
)
echo -n "Current version is $CURRENT_VERSION, select new version: "
read NEW_VERSION
echo "Creating version $NEW_VERSION ...\n"
for file in "${TO_UPDATE[@]}"
do
echo "Patching $file ..."
sed -i "s/$CURRENT_VERSION/$NEW_VERSION/g" $file
git add $file
done
git commit -m "Releasing v$NEW_VERSION"
git push
git tag -a v$NEW_VERSION -m "Release v$NEW_VERSION"
git push origin v$NEW_VERSION
echo
echo "All done, v$NEW_VERSION released ^_^"
opensnitch-1.5.8.1/screenshots/ 0000775 0000000 0000000 00000000000 14401326716 0016402 5 ustar 00root root 0000000 0000000 opensnitch-1.5.8.1/screenshots/opensnitch-ui-general-tab-deny.png 0000664 0000000 0000000 00000325323 14401326716 0025021 0 ustar 00root root 0000000 0000000 PNG
IHDR G 8 sBIT|d tEXtSoftware mate-screenshotȖJ IDATxyXTeþ
*+ZjKkefV˲̲E}\3-r/5rBPPPP]9?hN(ܟ8˳3s9g43j(%2!B!>[g^SׯOƍqtt|%B!BQfRSS VΞ=ܹs5
{P!B!DJNNfʕ̝;W_B!Bruu^z X5J;666\,!B!;?!B!Ŀ` ++ljj`ggVqqq<F!B!VAff&FQ]AhDד-h4⒴$ B!B<E!--lEB
TP'''t:z4nܸABBD999*Hz P!BR۷o5jA]o4j₷7.\ ''t*Hz+ɪB!BQƲAQn]0ESVVVz%<: TRSSy&7o$55uBKlR6i)c+GQz=zիh0w|iZW^W")ztU 111舵&)KH}2Woat=<ٕFgZNʶ?ùz#,3Л>M+|PM]{UZ!;:;t%?qrbޞ܋*e$wYnġsL
u{=O룞<$cᆲen
%8VO<%Xi!h4b0pqqV}8JIBff&ΡÒߢEQڵSݻw/6A)F 333qrr~Fqrr"33nBr a~xj@If 1KP~9f6tWLe$Ǫ)b~! C777kի8pl999镑NFFF*:uh"v``jPx/A)F Gכ-"<<ggMR!I.ƣx1[e~-HcaVg˱[Ӎ7~
}&ؕj}m1}DOYhВ±uhUj?|IؖWBQrrr 7h:vVb x{{̙3Y&͚5Sbkk^`00gBCC={6:VVVf۔U~mڴښsrOFA۶mȸJPwP_&&JMަٞZ'jBsYw$OҨEߍ[BRq i<@Ia_h4NTז;SYT
0]k`BޠuFgwHpo㠮-{N)O%W֩m~N7խ _Ʃ3(釘?iU:SAMv"VWGұq[^`Q6$էG5>qn<߷xtԩ> 9[vr<ڢT&~~7R18T۷)}^lMQyPXxÞ$Vk]x&ƍ㫯O>^`2vX4
FRQ,X@tt4SLaΜ9̙3#GM,i9:ubǎk֭[fjkkKllzB!"AW=>85iּ:ZEOXr?su);9_2V7صd.e>$T-p_v[1#dyJ|pH%l
7ZWgdO9v1xEaΎ-:RSuNq
ΟɷN`]u}MD*x?/7i[EEU[|Rp_ BJ&Y9Dq)GKFxj,JS
p$5 kMQz\IpW4xzEż_IuyMOP(*AĹXyѸQ 9߲%QϜ>r#,Ixvu*+yŝE_) VEѐm61991cȜ9sVo&fegghjK^o|G
U~mعs';vd߾}l6wbΎTΝ;GJ ~:2SdX98R(-N.4R^})d=ʀWWT5tJ'?9rxgԲ?eں(BnĠ0,̉92'v9C
?Ea;L]T]z'#v-<xsL'*坽wБk~1XJW5Aߌ3C4l}ߊnh_DYMHҺV"%2Tgw\ӓtp%f4?
#dlb6tcUtўWOҙ0;/z?I6
7IRwlE?Aj }dQŤ/Vʊ۷od.))3{lƌCbbbaj%XT~mڴaذat:n߾N?$<<EQ|*?0bڵkG
;w.жmRJ t:ʕ+@s<#=(dp[Bpvt@CV 7,qDǓm˓UyͶFgW%5BAc`^y
7.?x87.fr/ƴ(_$G]!IR^}*ZZO-is8wX~fCcukYYAL#MD_>캤\1x bÅps9> okTRH:Eſn]5 5nDW#葛/*|«z5?^'I˓WC`Kp4fa$1l7'MR06;VڏѬBNħא=zѥ%JK$(xILL$ƍ͛7
̾T5lؐlr/
iذٔӲʕ+k4nܘ7nШQ#^{5NUitSZ6(J
gO] j@e/V;fTQ2/OZ;*=F+_a\iO(O3Gk4%Ѻ'9t~5m Sd!|nM\HkjkB6mB;qӽ!>
h:Vy*$;?~GEХJvEUcUă[s"~NrvaE@Y5a{5hO:Éټ 7c`;k= !EQpqqʕ+ܾ};;FB$==UM/55YYYj@X1LHIIaÆlْ7n:?(=Bи6䱺ΖagsOl<h=`[$f)BP_nZKWȩ䇟߯p,'D2-Zki;Kim!'T
.*b1riKD42k57tϝUq>55GHD
ǻN]*(18/Ѡ[S'a Pu.F
Urvw%3%:VG!!<7Z4upMEC>F՚7HLLqQFCŊ-^^PR%4]}1Ð_\\IIIfy$%%wOA)*>KxRYͧSZ4I\MFX'>Em;{㔄},qU!L8V~RG?o:O;?+6MMֿ>wzv|$9V;DborhKNeOJ^?|*9wߗ}OZ::iyŬhV#j&H;ϻmtvHǣ~Հwa؎.Anʪw#yoas9՞C]jngOx$Z4͍luSl\]g
N]$ۡ.]WÊ;ʧ#t?ɧ/Ǫ4|{ XQX}^"vh"~Dz@!آ!_V1խNET.Ѫh5a_y*B{z+W]'ãTU'Bhso]õHn+Pu?L~Af@j&pQFN䥐T
cM8ސEz LtkN
w-)qo<7]PIwVtS|!(b0`Hz6};ATCFE潙NSDY>7FhPf;urD
g:H*<156ݭs:s8Ρ-Cx}/+ ##υՒФ/u1SGΐR;݂^()9VT 5ipzJŜ9wH_!L<==ڵk\vdeeիWv>>>xzz]-8?ͨQQFBa1Y8[Naa(6G%E/4J_ooQ))LWP!r١HLL$))z=:{{{ ''G,!yP![.)OI(hE eYro2*@1BBff&Zggg<<