pax_global_header00006660000000000000000000000064143407271640014522gustar00rootroot0000000000000052 comment=b332f2076246caa3b1006218b104924032fb1627 nebula-1.6.1+dfsg/000077500000000000000000000000001434072716400137145ustar00rootroot00000000000000nebula-1.6.1+dfsg/AUTHORS000066400000000000000000000004471434072716400147710ustar00rootroot00000000000000# This is the official list of Nebula authors for copyright purposes. # Names should be added to this file as: # Name or Organization # The email address is not required for organizations. Slack Technologies, Inc. Nate Brown Ryan Huber nebula-1.6.1+dfsg/CHANGELOG.md000066400000000000000000000350671434072716400155400ustar00rootroot00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] ## [1.6.1] - 2022-09-26 ### Fixed - Refuse to process underlay packets received from overlay IPs. This prevents confusion on hosts that have unsafe routes configured. (#741) - The ssh `reload` command did not work on Windows, since it relied on sending a SIGHUP signal internally. This has been fixed. (#725) - A regression in v1.5.2 that broke unsafe routes on Mobile clients has been fixed. (#729) ## [1.6.0] - 2022-06-30 ### Added - Experimental: nebula clients can be configured to act as relays for other nebula clients. Primarily useful when stubborn NATs make a direct tunnel impossible. (#678) - Configuration option to report manually specified `ip:port`s to lighthouses. (#650) - Windows arm64 build. (#638) - `punchy` and most `lighthouse` config options now support hot reloading. (#649) ### Changed - Build against go 1.18. (#656) - Promoted `routines` config from experimental to supported feature. (#702) - Dependencies updated. (#664) ### Fixed - Packets destined for the same host that sent it will be returned on MacOS. This matches the default behavior of other operating systems. (#501) - `unsafe_route` configuration will no longer crash on Windows. (#648) - A few panics that were introduced in 1.5.x. (#657, #658, #675) ### Security - You can set `listen.send_recv_error` to control the conditions in which `recv_error` messages are sent. Sending these messages can expose the fact that Nebula is running on a host, but it speeds up re-handshaking. (#670) ### Removed - `x509` config stanza support has been removed. (#685) ## [1.5.2] - 2021-12-14 ### Added - Warn when a non lighthouse node does not have lighthouse hosts configured. (#587) ### Changed - No longer fatals if expired CA certificates are present in `pki.ca`, as long as 1 valid CA is present. (#599) - `nebula-cert` will now enforce ipv4 addresses. (#604) - Warn on macOS if an unsafe route cannot be created due to a collision with an existing route. (#610) - Warn if you set a route MTU on platforms where we don't support it. (#611) ### Fixed - Rare race condition when tearing down a tunnel due to `recv_error` and sending packets on another thread. (#590) - Bug in `routes` and `unsafe_routes` handling that was introduced in 1.5.0. (#595) - `-test` mode no longer results in a crash. (#602) ### Removed - `x509.ca` config alias for `pki.ca`. (#604) ### Security - Upgraded `golang.org/x/crypto` to address an issue which allowed unauthenticated clients to cause a panic in SSH servers. (#603) ## 1.5.1 - 2021-12-13 (This release was skipped due to discovering #610 and #611 after the tag was created.) ## [1.5.0] - 2021-11-11 ### Added - SSH `print-cert` has a new `-raw` flag to get the PEM representation of a certificate. (#483) - New build architecture: Linux `riscv64`. (#542) - New experimental config option `remote_allow_ranges`. (#540) - New config option `pki.disconnect_invalid` that will tear down tunnels when they become invalid (through expiry or removal of root trust). Default is `false`. Note, this will not currently recognize if a remote has changed certificates since the last handshake. (#370) - New config option `unsafe_routes..metric` will set a metric for a specific unsafe route. It's useful if you have more than one identical route and want to prefer one against the other. (#353) ### Changed - Build against go 1.17. (#553) - Build with `CGO_ENABLED=0` set, to create more portable binaries. This could have an effect on DNS resolution if you rely on anything non-standard. (#421) - Windows now uses the [wintun](https://www.wintun.net/) driver which does not require installation. This driver is a large improvement over the TAP driver that was used in previous versions. If you had a previous version of `nebula` running, you will want to disable the tap driver in Control Panel, or uninstall the `tap0901` driver before running this version. (#289) - Darwin binaries are now universal (works on both amd64 and arm64), signed, and shipped in a notarized zip file. `nebula-darwin.zip` will be the only darwin release artifact. (#571) - Darwin uses syscalls and AF_ROUTE to configure the routing table, instead of using `/sbin/route`. Setting `tun.dev` is now allowed on Darwin as well, it must be in the format `utun[0-9]+` or it will be ignored. (#163) ### Deprecated - The `preferred_ranges` option has been supported as a replacement for `local_range` since v1.0.0. It has now been documented and `local_range` has been officially deprecated. (#541) ### Fixed - Valid recv_error packets were incorrectly marked as "spoofing" and ignored. (#482) - SSH server handles single `exec` requests correctly. (#483) - Signing a certificate with `nebula-cert sign` now verifies that the supplied ca-key matches the ca-crt. (#503) - If `preferred_ranges` (or the deprecated `local_range`) is configured, we will immediately switch to a preferred remote address after the reception of a handshake packet (instead of waiting until 1,000 packets have been sent). (#532) - A race condition when `punchy.respond` is enabled and ensures the correct vpn ip is sent a punch back response in highly queried node. (#566) - Fix a rare crash during handshake due to a race condition. (#535) ## [1.4.0] - 2021-05-11 ### Added - Ability to output qr code images in `print`, `ca`, and `sign` modes for `nebula-cert`. This is useful when configuring mobile clients. (#297) - Experimental: Nebula can now do work on more than 2 cpu cores in send and receive paths via the new `routines` config option. (#382, #391, #395) - ICMP ping requests can be responded to when the `tun.disabled` is `true`. This is useful so that you can "ping" a lighthouse running in this mode. (#342) - Run smoke tests via `make smoke-docker`. (#287) - More reported stats, udp memory use on linux, build version (when using Prometheus), firewall, handshake, and cached packet stats. (#390, #405, #450, #453) - IPv6 support for the underlay network. (#369) - End to end testing, run with `make e2e`. (#425, #427, #428) ### Changed - Darwin will now log stdout/stderr to a file when using `-service` mode. (#303) - Example systemd unit file now better arranged startup order when using `sshd` and other fixes. (#317, #412, #438) - Reduced memory utilization/garbage collection. (#320, #323, #340) - Reduced CPU utilization. (#329) - Build against go 1.16. (#381) - Refactored handshakes to improve performance and correctness. (#401, #402, #404, #416, #451) - Improved roaming support for mobile clients. (#394, #457) - Lighthouse performance and correctness improvements. (#406, #418, #429, #433, #437, #442, #449) - Better ordered startup to enable `sshd`, `stats`, and `dns` subsystems to listen on the nebula interface. (#375) ### Fixed - No longer report handshake packets as `lost` in stats. (#331) - Error handling in the `cert` package. (#339, #373) - Orphaned pending hostmap entries are cleaned up. (#344) - Most known data races are now resolved. (#396, #400, #424) - Refuse to run a lighthouse on an ephemeral port. (#399) - Removed the global references. (#423, #426, #446) - Reloading via ssh command avoids a panic. (#447) - Shutdown is now performed in a cleaner way. (#448) - Logs will now find their way to Windows event viewer when running under `-service` mode in Windows. (#443) ## [1.3.0] - 2020-09-22 ### Added - You can emit statistics about non-message packets by setting the option `stats.message_metrics`. You can similarly emit detailed statistics about lighthouse packets by setting the option `stats.lighthouse_metrics`. See the example config for more details. (#230) - We now support freebsd/amd64. This is experimental, please give us feedback. (#103) - We now release a binary for `linux/mips-softfloat` which has also been stripped to reduce filesize and hopefully have a better chance on running on small mips devices. (#231) - You can set `tun.disabled` to true to run a standalone lighthouse without a tun device (and thus, without root). (#269) - You can set `logging.disable_timestamp` to remove timestamps from log lines, which is useful when output is redirected to a logging system that already adds timestamps. (#288) ### Changed - Handshakes should now trigger faster, as we try to be proactive with sending them instead of waiting for the next timer tick in most cases. (#246, #265) - Previously, we would drop the conntrack table whenever firewall rules were changed during a SIGHUP. Now, we will maintain the table and just validate that an entry still matches with the new rule set. (#233) - Debug logs for firewall drops now include the reason. (#220, #239) - Logs for handshakes now include the fingerprint of the remote host. (#262) - Config item `pki.blacklist` is now `pki.blocklist`. (#272) - Better support for older Linux kernels. We now only set `SO_REUSEPORT` if `tun.routines` is greater than 1 (default is 1). We also only use the `recvmmsg` syscall if `listen.batch` is greater than 1 (default is 64). (#275) - It is possible to run Nebula as a library inside of another process now. Note that this is still experimental and the internal APIs around this might change in minor version releases. (#279) ### Deprecated - `pki.blacklist` is deprecated in favor of `pki.blocklist` with the same functionality. Existing configs will continue to load for this release to allow for migrations. (#272) ### Fixed - `advmss` is now set correctly for each route table entry when `tun.routes` is configured to have some routes with higher MTU. (#245) - Packets that arrive on the tun device with an unroutable destination IP are now dropped correctly, instead of wasting time making queries to the lighthouses for IP `0.0.0.0` (#267) ## [1.2.0] - 2020-04-08 ### Added - Add `logging.timestamp_format` config option. The primary purpose of this change is to allow logging timestamps with millisecond precision. (#187) - Support `unsafe_routes` on Windows. (#184) - Add `lighthouse.remote_allow_list` to filter which subnets we will use to handshake with other hosts. See the example config for more details. (#217) - Add `lighthouse.local_allow_list` to filter which local IP addresses and/or interfaces we advertise to the lighthouses. See the example config for more details. (#217) - Wireshark dissector plugin. Add this file in `dist/wireshark` to your Wireshark plugins folder to see Nebula packet headers decoded. (#216) - systemd unit for Arch, so it can be built entirely from this repo. (#216) ### Changed - Added a delay to punching via lighthouse signal to deal with race conditions in some linux conntrack implementations. (#210) See deprecated, this also adds a new `punchy.delay` option that defaults to `1s`. - Validate all `lighthouse.hosts` and `static_host_map` VPN IPs are in the subnet defined in our cert. Exit with a fatal error if they are not in our subnet, as this is an invalid configuration (we will not have the proper routes set up to communicate with these hosts). (#170) - Use absolute paths to system binaries on macOS and Windows. (#191) - Add configuration options for `handshakes`. This includes options to tweak `try_interval`, `retries` and `wait_rotation`. See example config for descriptions. (#179) - Allow `-config` file to not end in `.yaml` or `yml`. Useful when using `-test` and automated tools like Ansible that create temporary files without suffixes. (#189) - The config test mode, `-test`, is now more thorough and catches more parsing issues. (#177) - Various documentation and example fixes. (#196) - Improved log messages. (#181, #200) - Dependencies updated. (#188) ### Deprecated - `punchy`, `punch_back` configuration options have been collapsed under the now top level `punchy` config directive. (#210) `punchy.punch` - This is the old `punchy` option. Should we perform NAT hole punching (default false)? `punchy.respond` - This is the old `punch_back` option. Should we respond to hole punching by hole punching back (default false)? ### Fixed - Reduce memory allocations when not using `unsafe_routes`. (#198) - Ignore packets from self to self. (#192) - MTU fixed for `unsafe_routes`. (#209) ## [1.1.0] - 2020-01-17 ### Added - For macOS and Windows, build a special version of the binary that can install and manage its own service configuration. You can use this with `nebula -service`. If you are building from source, use `make service` to build this feature. - Support for `mips`, `mips64`, `386` and `ppc64le` processors on Linux. - You can now configure the DNS listen host and port with `lighthouse.dns.host` and `lighthouse.dns.port`. - Subnet and routing support. You can now add a `unsafe_routes` section to your config to allow hosts to act as gateways to other subnets. Read the example config for more details. This is supported on Linux and macOS. ### Changed - Certificates now have more verifications performed, including making sure the certificate lifespan does not exceed the lifespan of the root CA. This could cause issues if you have signed certificates with expirations beyond the expiration of your CA, and you will need to reissue your certificates. - If lighthouse interval is set to `0`, never update the lighthouse (mobile optimization). - Various documentation and example fixes. - Improved error messages. - Dependencies updated. ### Fixed - If you have a firewall rule with `group: ["one-group"]`, this will now be accepted, with a warning to use `group: "one-group"` instead. - The `listen.host` configuration option was previously ignored (the bind host was always 0.0.0.0). This option will now be honored. - The `ca_sha` and `ca_name` firewall rule options should now work correctly. ## [1.0.0] - 2019-11-19 ### Added - Initial public release. [Unreleased]: https://github.com/slackhq/nebula/compare/v1.6.1...HEAD [1.6.1]: https://github.com/slackhq/nebula/releases/tag/v1.6.1 [1.6.0]: https://github.com/slackhq/nebula/releases/tag/v1.6.0 [1.5.2]: https://github.com/slackhq/nebula/releases/tag/v1.5.2 [1.5.0]: https://github.com/slackhq/nebula/releases/tag/v1.5.0 [1.4.0]: https://github.com/slackhq/nebula/releases/tag/v1.4.0 [1.3.0]: https://github.com/slackhq/nebula/releases/tag/v1.3.0 [1.2.0]: https://github.com/slackhq/nebula/releases/tag/v1.2.0 [1.1.0]: https://github.com/slackhq/nebula/releases/tag/v1.1.0 [1.0.0]: https://github.com/slackhq/nebula/releases/tag/v1.0.0 nebula-1.6.1+dfsg/LICENSE000066400000000000000000000021001434072716400147120ustar00rootroot00000000000000MIT License Copyright (c) 2018-2019 Slack Technologies, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. nebula-1.6.1+dfsg/Makefile000066400000000000000000000114621434072716400153600ustar00rootroot00000000000000GOMINVERSION = 1.18 NEBULA_CMD_PATH = "./cmd/nebula" GO111MODULE = on export GO111MODULE CGO_ENABLED = 0 export CGO_ENABLED # Set up OS specific bits ifeq ($(OS),Windows_NT) #TODO: we should be able to ditch awk as well GOVERSION := $(shell go version | awk "{print substr($$3, 3)}") GOISMIN := $(shell IF "$(GOVERSION)" GEQ "$(GOMINVERSION)" ECHO 1) NEBULA_CMD_SUFFIX = .exe NULL_FILE = nul else GOVERSION := $(shell go version | awk '{print substr($$3, 3)}') GOISMIN := $(shell expr "$(GOVERSION)" ">=" "$(GOMINVERSION)") NEBULA_CMD_SUFFIX = NULL_FILE = /dev/null endif # Only defined the build number if we haven't already ifndef BUILD_NUMBER ifeq ($(shell git describe --exact-match 2>$(NULL_FILE)),) BUILD_NUMBER = $(shell git describe --abbrev=0 --match "v*" | cut -dv -f2)-$(shell git branch --show-current)-$(shell git describe --long --dirty | cut -d- -f2-) else BUILD_NUMBER = $(shell git describe --exact-match --dirty | cut -dv -f2) endif endif LDFLAGS = -X main.Build=$(BUILD_NUMBER) ALL_LINUX = linux-amd64 \ linux-386 \ linux-ppc64le \ linux-arm-5 \ linux-arm-6 \ linux-arm-7 \ linux-arm64 \ linux-mips \ linux-mipsle \ linux-mips64 \ linux-mips64le \ linux-mips-softfloat \ linux-riscv64 ALL = $(ALL_LINUX) \ darwin-amd64 \ darwin-arm64 \ freebsd-amd64 \ windows-amd64 \ windows-arm64 e2e: $(TEST_ENV) go test -tags=e2e_testing -count=1 $(TEST_FLAGS) ./e2e e2ev: TEST_FLAGS = -v e2ev: e2e e2evv: TEST_ENV += TEST_LOGS=1 e2evv: e2ev e2evvv: TEST_ENV += TEST_LOGS=2 e2evvv: e2ev e2evvvv: TEST_ENV += TEST_LOGS=3 e2evvvv: e2ev e2e-bench: TEST_FLAGS = -bench=. -benchmem -run=^$ e2e-bench: e2e all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert) release: $(ALL:%=build/nebula-%.tar.gz) release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz) release-freebsd: build/nebula-freebsd-amd64.tar.gz BUILD_ARGS = -trimpath bin-windows: build/windows-amd64/nebula.exe build/windows-amd64/nebula-cert.exe mv $? . bin-windows-arm64: build/windows-arm64/nebula.exe build/windows-arm64/nebula-cert.exe mv $? . bin-darwin: build/darwin-amd64/nebula build/darwin-amd64/nebula-cert mv $? . bin-freebsd: build/freebsd-amd64/nebula build/freebsd-amd64/nebula-cert mv $? . bin: go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula${NEBULA_CMD_SUFFIX} ${NEBULA_CMD_PATH} go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula-cert${NEBULA_CMD_SUFFIX} ./cmd/nebula-cert install: go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ${NEBULA_CMD_PATH} go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ./cmd/nebula-cert build/linux-arm-%: GOENV += GOARM=$(word 3, $(subst -, ,$*)) build/linux-mips-%: GOENV += GOMIPS=$(word 3, $(subst -, ,$*)) # Build an extra small binary for mips-softfloat build/linux-mips-softfloat/%: LDFLAGS += -s -w build/%/nebula: .FORCE GOOS=$(firstword $(subst -, , $*)) \ GOARCH=$(word 2, $(subst -, ,$*)) $(GOENV) \ go build $(BUILD_ARGS) -o $@ -ldflags "$(LDFLAGS)" ${NEBULA_CMD_PATH} build/%/nebula-cert: .FORCE GOOS=$(firstword $(subst -, , $*)) \ GOARCH=$(word 2, $(subst -, ,$*)) $(GOENV) \ go build $(BUILD_ARGS) -o $@ -ldflags "$(LDFLAGS)" ./cmd/nebula-cert build/%/nebula.exe: build/%/nebula mv $< $@ build/%/nebula-cert.exe: build/%/nebula-cert mv $< $@ build/nebula-%.tar.gz: build/%/nebula build/%/nebula-cert tar -zcv -C build/$* -f $@ nebula nebula-cert build/nebula-%.zip: build/%/nebula.exe build/%/nebula-cert.exe cd build/$* && zip ../nebula-$*.zip nebula.exe nebula-cert.exe vet: go vet -v ./... test: go test -v ./... test-cov-html: go test -coverprofile=coverage.out go tool cover -html=coverage.out bench: go test -bench=. bench-cpu: go test -bench=. -benchtime=5s -cpuprofile=cpu.pprof go tool pprof go-audit.test cpu.pprof bench-cpu-long: go test -bench=. -benchtime=60s -cpuprofile=cpu.pprof go tool pprof go-audit.test cpu.pprof proto: nebula.pb.go cert/cert.pb.go nebula.pb.go: nebula.proto .FORCE go build github.com/gogo/protobuf/protoc-gen-gogofaster PATH="$(CURDIR):$(PATH)" protoc --gogofaster_out=paths=source_relative:. $< rm protoc-gen-gogofaster cert/cert.pb.go: cert/cert.proto .FORCE $(MAKE) -C cert cert.pb.go service: @echo > $(NULL_FILE) $(eval NEBULA_CMD_PATH := "./cmd/nebula-service") ifeq ($(words $(MAKECMDGOALS)),1) @$(MAKE) service ${.DEFAULT_GOAL} --no-print-directory endif bin-docker: bin build/linux-amd64/nebula build/linux-amd64/nebula-cert smoke-docker: bin-docker cd .github/workflows/smoke/ && ./build.sh cd .github/workflows/smoke/ && ./smoke.sh smoke-relay-docker: bin-docker cd .github/workflows/smoke/ && ./build-relay.sh cd .github/workflows/smoke/ && ./smoke-relay.sh smoke-docker-race: BUILD_ARGS = -race smoke-docker-race: smoke-docker .FORCE: .PHONY: e2e e2ev e2evv e2evvv e2evvvv test test-cov-html bench bench-cpu bench-cpu-long bin proto release service smoke-docker smoke-docker-race .DEFAULT_GOAL := bin nebula-1.6.1+dfsg/README.md000066400000000000000000000145151434072716400152010ustar00rootroot00000000000000## What is Nebula? Nebula is a scalable overlay networking tool with a focus on performance, simplicity and security. It lets you seamlessly connect computers anywhere in the world. Nebula is portable, and runs on Linux, OSX, Windows, iOS, and Android. It can be used to connect a small number of computers, but is also able to connect tens of thousands of computers. Nebula incorporates a number of existing concepts like encryption, security groups, certificates, and tunneling, and each of those individual pieces existed before Nebula in various forms. What makes Nebula different to existing offerings is that it brings all of these ideas together, resulting in a sum that is greater than its individual parts. Further documentation can be found [here](https://www.defined.net/nebula/). You can read more about Nebula [here](https://medium.com/p/884110a5579). You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU). ## Supported Platforms #### Desktop and Server Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for downloads or see the [Distribution Packages](https://github.com/slackhq/nebula#distribution-packages) section. - Linux - 64 and 32 bit, arm, and others - Windows - MacOS - Freebsd #### Distribution Packages - [Arch Linux](https://archlinux.org/packages/community/x86_64/nebula/) ``` $ sudo pacman -S nebula ``` - [Fedora Linux](https://copr.fedorainfracloud.org/coprs/jdoss/nebula/) ``` $ sudo dnf copr enable jdoss/nebula $ sudo dnf install nebula ``` #### Mobile - [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&itscg=30200) - [Android](https://play.google.com/store/apps/details?id=net.defined.mobile_nebula&pcampaignid=pcampaignidMKT-Other-global-all-co-prtnr-py-PartBadge-Mar2515-1) ## Technical Overview Nebula is a mutually authenticated peer-to-peer software defined network based on the [Noise Protocol Framework](https://noiseprotocol.org/). Nebula uses certificates to assert a node's IP address, name, and membership within user-defined groups. Nebula's user-defined groups allow for provider agnostic traffic filtering between nodes. Discovery nodes allow individual peers to find each other and optionally use UDP hole punching to establish connections from behind most firewalls or NATs. Users can move data between nodes in any number of cloud service providers, datacenters, and endpoints, without needing to maintain a particular addressing scheme. Nebula uses Elliptic-curve Diffie-Hellman (`ECDH`) key exchange and `AES-256-GCM` in its default configuration. Nebula was created to provide a mechanism for groups of hosts to communicate securely, even across the internet, while enabling expressive firewall definitions similar in style to cloud security groups. ## Getting started (quickly) To set up a Nebula network, you'll need: #### 1. The [Nebula binaries](https://github.com/slackhq/nebula/releases) or [Distribution Packages](https://github.com/slackhq/nebula#distribution-packages) for your specific platform. Specifically you'll need `nebula-cert` and the specific nebula binary for each platform you use. #### 2. (Optional, but you really should..) At least one discovery node with a routable IP address, which we call a lighthouse. Nebula lighthouses allow nodes to find each other, anywhere in the world. A lighthouse is the only node in a Nebula network whose IP should not change. Running a lighthouse requires very few compute resources, and you can easily use the least expensive option from a cloud hosting provider. If you're not sure which provider to use, a number of us have used $5/mo [DigitalOcean](https://digitalocean.com) droplets as lighthouses. Once you have launched an instance, ensure that Nebula udp traffic (default port udp/4242) can reach it over the internet. #### 3. A Nebula certificate authority, which will be the root of trust for a particular Nebula network. ``` ./nebula-cert ca -name "Myorganization, Inc" ``` This will create files named `ca.key` and `ca.cert` in the current directory. The `ca.key` file is the most sensitive file you'll create, because it is the key used to sign the certificates for individual nebula nodes/hosts. Please store this file somewhere safe, preferably with strong encryption. #### 4. Nebula host keys and certificates generated from that certificate authority This assumes you have four nodes, named lighthouse1, laptop, server1, host3. You can name the nodes any way you'd like, including FQDN. You'll also need to choose IP addresses and the associated subnet. In this example, we are creating a nebula network that will use 192.168.100.x/24 as its network range. This example also demonstrates nebula groups, which can later be used to define traffic rules in a nebula network. ``` ./nebula-cert sign -name "lighthouse1" -ip "192.168.100.1/24" ./nebula-cert sign -name "laptop" -ip "192.168.100.2/24" -groups "laptop,home,ssh" ./nebula-cert sign -name "server1" -ip "192.168.100.9/24" -groups "servers" ./nebula-cert sign -name "host3" -ip "192.168.100.10/24" ``` #### 5. Configuration files for each host Download a copy of the nebula [example configuration](https://github.com/slackhq/nebula/blob/master/examples/config.yml). * On the lighthouse node, you'll need to ensure `am_lighthouse: true` is set. * On the individual hosts, ensure the lighthouse is defined properly in the `static_host_map` section, and is added to the lighthouse `hosts` section. #### 6. Copy nebula credentials, configuration, and binaries to each host For each host, copy the nebula binary to the host, along with `config.yaml` from step 5, and the files `ca.crt`, `{host}.crt`, and `{host}.key` from step 4. **DO NOT COPY `ca.key` TO INDIVIDUAL NODES.** #### 7. Run nebula on each host ``` ./nebula -config /path/to/config.yaml ``` ## Building Nebula from source Download go and clone this repo. Change to the nebula directory. To build nebula for all platforms: `make all` To build nebula for a specific platform (ex, Windows): `make bin-windows` See the [Makefile](Makefile) for more details on build targets ## Credits Nebula was created at Slack Technologies, Inc by Nate Brown and Ryan Huber, with contributions from Oliver Fross, Alan Lam, Wade Simmons, and Lining Wang. nebula-1.6.1+dfsg/allow_list.go000066400000000000000000000210321434072716400164120ustar00rootroot00000000000000package nebula import ( "fmt" "net" "regexp" "github.com/slackhq/nebula/cidr" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/iputil" ) type AllowList struct { // The values of this cidrTree are `bool`, signifying allow/deny cidrTree *cidr.Tree6 } type RemoteAllowList struct { AllowList *AllowList // Inside Range Specific, keys of this tree are inside CIDRs and values // are *AllowList insideAllowLists *cidr.Tree6 } type LocalAllowList struct { AllowList *AllowList // To avoid ambiguity, all rules must be true, or all rules must be false. nameRules []AllowListNameRule } type AllowListNameRule struct { Name *regexp.Regexp Allow bool } func NewLocalAllowListFromConfig(c *config.C, k string) (*LocalAllowList, error) { var nameRules []AllowListNameRule handleKey := func(key string, value interface{}) (bool, error) { if key == "interfaces" { var err error nameRules, err = getAllowListInterfaces(k, value) if err != nil { return false, err } return true, nil } return false, nil } al, err := newAllowListFromConfig(c, k, handleKey) if err != nil { return nil, err } return &LocalAllowList{AllowList: al, nameRules: nameRules}, nil } func NewRemoteAllowListFromConfig(c *config.C, k, rangesKey string) (*RemoteAllowList, error) { al, err := newAllowListFromConfig(c, k, nil) if err != nil { return nil, err } remoteAllowRanges, err := getRemoteAllowRanges(c, rangesKey) if err != nil { return nil, err } return &RemoteAllowList{AllowList: al, insideAllowLists: remoteAllowRanges}, nil } // If the handleKey func returns true, the rest of the parsing is skipped // for this key. This allows parsing of special values like `interfaces`. func newAllowListFromConfig(c *config.C, k string, handleKey func(key string, value interface{}) (bool, error)) (*AllowList, error) { r := c.Get(k) if r == nil { return nil, nil } return newAllowList(k, r, handleKey) } // If the handleKey func returns true, the rest of the parsing is skipped // for this key. This allows parsing of special values like `interfaces`. func newAllowList(k string, raw interface{}, handleKey func(key string, value interface{}) (bool, error)) (*AllowList, error) { rawMap, ok := raw.(map[interface{}]interface{}) if !ok { return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw) } tree := cidr.NewTree6() // Keep track of the rules we have added for both ipv4 and ipv6 type allowListRules struct { firstValue bool allValuesMatch bool defaultSet bool allValues bool } rules4 := allowListRules{firstValue: true, allValuesMatch: true, defaultSet: false} rules6 := allowListRules{firstValue: true, allValuesMatch: true, defaultSet: false} for rawKey, rawValue := range rawMap { rawCIDR, ok := rawKey.(string) if !ok { return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey) } if handleKey != nil { handled, err := handleKey(rawCIDR, rawValue) if err != nil { return nil, err } if handled { continue } } value, ok := rawValue.(bool) if !ok { return nil, fmt.Errorf("config `%s` has invalid value (type %T): %v", k, rawValue, rawValue) } _, ipNet, err := net.ParseCIDR(rawCIDR) if err != nil { return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR) } // TODO: should we error on duplicate CIDRs in the config? tree.AddCIDR(ipNet, value) maskBits, maskSize := ipNet.Mask.Size() var rules *allowListRules if maskSize == 32 { rules = &rules4 } else { rules = &rules6 } if rules.firstValue { rules.allValues = value rules.firstValue = false } else { if value != rules.allValues { rules.allValuesMatch = false } } // Check if this is 0.0.0.0/0 or ::/0 if maskBits == 0 { rules.defaultSet = true } } if !rules4.defaultSet { if rules4.allValuesMatch { _, zeroCIDR, _ := net.ParseCIDR("0.0.0.0/0") tree.AddCIDR(zeroCIDR, !rules4.allValues) } else { return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for 0.0.0.0/0", k) } } if !rules6.defaultSet { if rules6.allValuesMatch { _, zeroCIDR, _ := net.ParseCIDR("::/0") tree.AddCIDR(zeroCIDR, !rules6.allValues) } else { return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for ::/0", k) } } return &AllowList{cidrTree: tree}, nil } func getAllowListInterfaces(k string, v interface{}) ([]AllowListNameRule, error) { var nameRules []AllowListNameRule rawRules, ok := v.(map[interface{}]interface{}) if !ok { return nil, fmt.Errorf("config `%s.interfaces` is invalid (type %T): %v", k, v, v) } firstEntry := true var allValues bool for rawName, rawAllow := range rawRules { name, ok := rawName.(string) if !ok { return nil, fmt.Errorf("config `%s.interfaces` has invalid key (type %T): %v", k, rawName, rawName) } allow, ok := rawAllow.(bool) if !ok { return nil, fmt.Errorf("config `%s.interfaces` has invalid value (type %T): %v", k, rawAllow, rawAllow) } nameRE, err := regexp.Compile("^" + name + "$") if err != nil { return nil, fmt.Errorf("config `%s.interfaces` has invalid key: %s: %v", k, name, err) } nameRules = append(nameRules, AllowListNameRule{ Name: nameRE, Allow: allow, }) if firstEntry { allValues = allow firstEntry = false } else { if allow != allValues { return nil, fmt.Errorf("config `%s.interfaces` values must all be the same true/false value", k) } } } return nameRules, nil } func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6, error) { value := c.Get(k) if value == nil { return nil, nil } remoteAllowRanges := cidr.NewTree6() rawMap, ok := value.(map[interface{}]interface{}) if !ok { return nil, fmt.Errorf("config `%s` has invalid type: %T", k, value) } for rawKey, rawValue := range rawMap { rawCIDR, ok := rawKey.(string) if !ok { return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey) } allowList, err := newAllowList(fmt.Sprintf("%s.%s", k, rawCIDR), rawValue, nil) if err != nil { return nil, err } _, ipNet, err := net.ParseCIDR(rawCIDR) if err != nil { return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR) } remoteAllowRanges.AddCIDR(ipNet, allowList) } return remoteAllowRanges, nil } func (al *AllowList) Allow(ip net.IP) bool { if al == nil { return true } result := al.cidrTree.MostSpecificContains(ip) switch v := result.(type) { case bool: return v default: panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result)) } } func (al *AllowList) AllowIpV4(ip iputil.VpnIp) bool { if al == nil { return true } result := al.cidrTree.MostSpecificContainsIpV4(ip) switch v := result.(type) { case bool: return v default: panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result)) } } func (al *AllowList) AllowIpV6(hi, lo uint64) bool { if al == nil { return true } result := al.cidrTree.MostSpecificContainsIpV6(hi, lo) switch v := result.(type) { case bool: return v default: panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result)) } } func (al *LocalAllowList) Allow(ip net.IP) bool { if al == nil { return true } return al.AllowList.Allow(ip) } func (al *LocalAllowList) AllowName(name string) bool { if al == nil || len(al.nameRules) == 0 { return true } for _, rule := range al.nameRules { if rule.Name.MatchString(name) { return rule.Allow } } // If no rules match, return the default, which is the inverse of the rules return !al.nameRules[0].Allow } func (al *RemoteAllowList) AllowUnknownVpnIp(ip net.IP) bool { if al == nil { return true } return al.AllowList.Allow(ip) } func (al *RemoteAllowList) Allow(vpnIp iputil.VpnIp, ip net.IP) bool { if !al.getInsideAllowList(vpnIp).Allow(ip) { return false } return al.AllowList.Allow(ip) } func (al *RemoteAllowList) AllowIpV4(vpnIp iputil.VpnIp, ip iputil.VpnIp) bool { if al == nil { return true } if !al.getInsideAllowList(vpnIp).AllowIpV4(ip) { return false } return al.AllowList.AllowIpV4(ip) } func (al *RemoteAllowList) AllowIpV6(vpnIp iputil.VpnIp, hi, lo uint64) bool { if al == nil { return true } if !al.getInsideAllowList(vpnIp).AllowIpV6(hi, lo) { return false } return al.AllowList.AllowIpV6(hi, lo) } func (al *RemoteAllowList) getInsideAllowList(vpnIp iputil.VpnIp) *AllowList { if al.insideAllowLists != nil { inside := al.insideAllowLists.MostSpecificContainsIpV4(vpnIp) if inside != nil { return inside.(*AllowList) } } return nil } nebula-1.6.1+dfsg/allow_list_test.go000066400000000000000000000107321434072716400174560ustar00rootroot00000000000000package nebula import ( "net" "regexp" "testing" "github.com/slackhq/nebula/cidr" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/test" "github.com/stretchr/testify/assert" ) func TestNewAllowListFromConfig(t *testing.T) { l := test.NewLogger() c := config.NewC(l) c.Settings["allowlist"] = map[interface{}]interface{}{ "192.168.0.0": true, } r, err := newAllowListFromConfig(c, "allowlist", nil) assert.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0") assert.Nil(t, r) c.Settings["allowlist"] = map[interface{}]interface{}{ "192.168.0.0/16": "abc", } r, err = newAllowListFromConfig(c, "allowlist", nil) assert.EqualError(t, err, "config `allowlist` has invalid value (type string): abc") c.Settings["allowlist"] = map[interface{}]interface{}{ "192.168.0.0/16": true, "10.0.0.0/8": false, } r, err = newAllowListFromConfig(c, "allowlist", nil) assert.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for 0.0.0.0/0") c.Settings["allowlist"] = map[interface{}]interface{}{ "0.0.0.0/0": true, "10.0.0.0/8": false, "10.42.42.0/24": true, "fd00::/8": true, "fd00:fd00::/16": false, } r, err = newAllowListFromConfig(c, "allowlist", nil) assert.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for ::/0") c.Settings["allowlist"] = map[interface{}]interface{}{ "0.0.0.0/0": true, "10.0.0.0/8": false, "10.42.42.0/24": true, } r, err = newAllowListFromConfig(c, "allowlist", nil) if assert.NoError(t, err) { assert.NotNil(t, r) } c.Settings["allowlist"] = map[interface{}]interface{}{ "0.0.0.0/0": true, "10.0.0.0/8": false, "10.42.42.0/24": true, "::/0": false, "fd00::/8": true, "fd00:fd00::/16": false, } r, err = newAllowListFromConfig(c, "allowlist", nil) if assert.NoError(t, err) { assert.NotNil(t, r) } // Test interface names c.Settings["allowlist"] = map[interface{}]interface{}{ "interfaces": map[interface{}]interface{}{ `docker.*`: "foo", }, } lr, err := NewLocalAllowListFromConfig(c, "allowlist") assert.EqualError(t, err, "config `allowlist.interfaces` has invalid value (type string): foo") c.Settings["allowlist"] = map[interface{}]interface{}{ "interfaces": map[interface{}]interface{}{ `docker.*`: false, `eth.*`: true, }, } lr, err = NewLocalAllowListFromConfig(c, "allowlist") assert.EqualError(t, err, "config `allowlist.interfaces` values must all be the same true/false value") c.Settings["allowlist"] = map[interface{}]interface{}{ "interfaces": map[interface{}]interface{}{ `docker.*`: false, }, } lr, err = NewLocalAllowListFromConfig(c, "allowlist") if assert.NoError(t, err) { assert.NotNil(t, lr) } } func TestAllowList_Allow(t *testing.T) { assert.Equal(t, true, ((*AllowList)(nil)).Allow(net.ParseIP("1.1.1.1"))) tree := cidr.NewTree6() tree.AddCIDR(cidr.Parse("0.0.0.0/0"), true) tree.AddCIDR(cidr.Parse("10.0.0.0/8"), false) tree.AddCIDR(cidr.Parse("10.42.42.42/32"), true) tree.AddCIDR(cidr.Parse("10.42.0.0/16"), true) tree.AddCIDR(cidr.Parse("10.42.42.0/24"), true) tree.AddCIDR(cidr.Parse("10.42.42.0/24"), false) tree.AddCIDR(cidr.Parse("::1/128"), true) tree.AddCIDR(cidr.Parse("::2/128"), false) al := &AllowList{cidrTree: tree} assert.Equal(t, true, al.Allow(net.ParseIP("1.1.1.1"))) assert.Equal(t, false, al.Allow(net.ParseIP("10.0.0.4"))) assert.Equal(t, true, al.Allow(net.ParseIP("10.42.42.42"))) assert.Equal(t, false, al.Allow(net.ParseIP("10.42.42.41"))) assert.Equal(t, true, al.Allow(net.ParseIP("10.42.0.1"))) assert.Equal(t, true, al.Allow(net.ParseIP("::1"))) assert.Equal(t, false, al.Allow(net.ParseIP("::2"))) } func TestLocalAllowList_AllowName(t *testing.T) { assert.Equal(t, true, ((*LocalAllowList)(nil)).AllowName("docker0")) rules := []AllowListNameRule{ {Name: regexp.MustCompile("^docker.*$"), Allow: false}, {Name: regexp.MustCompile("^tun.*$"), Allow: false}, } al := &LocalAllowList{nameRules: rules} assert.Equal(t, false, al.AllowName("docker0")) assert.Equal(t, false, al.AllowName("tun0")) assert.Equal(t, true, al.AllowName("eth0")) rules = []AllowListNameRule{ {Name: regexp.MustCompile("^eth.*$"), Allow: true}, {Name: regexp.MustCompile("^ens.*$"), Allow: true}, } al = &LocalAllowList{nameRules: rules} assert.Equal(t, false, al.AllowName("docker0")) assert.Equal(t, true, al.AllowName("eth0")) assert.Equal(t, true, al.AllowName("ens5")) } nebula-1.6.1+dfsg/bits.go000066400000000000000000000110441434072716400152040ustar00rootroot00000000000000package nebula import ( "github.com/rcrowley/go-metrics" "github.com/sirupsen/logrus" ) type Bits struct { length uint64 current uint64 bits []bool firstSeen bool lostCounter metrics.Counter dupeCounter metrics.Counter outOfWindowCounter metrics.Counter } func NewBits(bits uint64) *Bits { return &Bits{ length: bits, bits: make([]bool, bits, bits), current: 0, lostCounter: metrics.GetOrRegisterCounter("network.packets.lost", nil), dupeCounter: metrics.GetOrRegisterCounter("network.packets.duplicate", nil), outOfWindowCounter: metrics.GetOrRegisterCounter("network.packets.out_of_window", nil), } } func (b *Bits) Check(l logrus.FieldLogger, i uint64) bool { // If i is the next number, return true. if i > b.current || (i == 0 && b.firstSeen == false && b.current < b.length) { return true } // If i is within the window, check if it's been set already. The first window will fail this check if i > b.current-b.length { return !b.bits[i%b.length] } // If i is within the first window if i < b.length { return !b.bits[i%b.length] } // Not within the window l.Debugf("rejected a packet (top) %d %d\n", b.current, i) return false } func (b *Bits) Update(l *logrus.Logger, i uint64) bool { // If i is the next number, return true and update current. if i == b.current+1 { // Report missed packets, we can only understand what was missed after the first window has been gone through if i > b.length && b.bits[i%b.length] == false { b.lostCounter.Inc(1) } b.bits[i%b.length] = true b.current = i return true } // If i packet is greater than current but less than the maximum length of our bitmap, // flip everything in between to false and move ahead. if i > b.current && i < b.current+b.length { // In between current and i need to be zero'd to allow those packets to come in later for n := b.current + 1; n < i; n++ { b.bits[n%b.length] = false } b.bits[i%b.length] = true b.current = i //l.Debugf("missed %d packets between %d and %d\n", i-b.current, i, b.current) return true } // If i is greater than the delta between current and the total length of our bitmap, // just flip everything in the map and move ahead. if i >= b.current+b.length { // The current window loss will be accounted for later, only record the jump as loss up until then lost := maxInt64(0, int64(i-b.current-b.length)) //TODO: explain this if b.current == 0 { lost++ } for n := range b.bits { // Don't want to count the first window as a loss //TODO: this is likely wrong, we are wanting to track only the bit slots that we aren't going to track anymore and this is marking everything as missed //if b.bits[n] == false { // lost++ //} b.bits[n] = false } b.lostCounter.Inc(lost) if l.Level >= logrus.DebugLevel { l.WithField("receiveWindow", m{"accepted": true, "currentCounter": b.current, "incomingCounter": i, "reason": "window shifting"}). Debug("Receive window") } b.bits[i%b.length] = true b.current = i return true } // Allow for the 0 packet to come in within the first window if i == 0 && b.firstSeen == false && b.current < b.length { b.firstSeen = true b.bits[i%b.length] = true return true } // If i is within the window of current minus length (the total pat window size), // allow it and flip to true but to NOT change current. We also have to account for the first window if ((b.current >= b.length && i > b.current-b.length) || (b.current < b.length && i < b.length)) && i <= b.current { if b.current == i { if l.Level >= logrus.DebugLevel { l.WithField("receiveWindow", m{"accepted": false, "currentCounter": b.current, "incomingCounter": i, "reason": "duplicate"}). Debug("Receive window") } b.dupeCounter.Inc(1) return false } if b.bits[i%b.length] == true { if l.Level >= logrus.DebugLevel { l.WithField("receiveWindow", m{"accepted": false, "currentCounter": b.current, "incomingCounter": i, "reason": "old duplicate"}). Debug("Receive window") } b.dupeCounter.Inc(1) return false } b.bits[i%b.length] = true return true } // In all other cases, fail and don't change current. b.outOfWindowCounter.Inc(1) if l.Level >= logrus.DebugLevel { l.WithField("accepted", false). WithField("currentCounter", b.current). WithField("incomingCounter", i). WithField("reason", "nonsense"). Debug("Receive window") } return false } func maxInt64(a, b int64) int64 { if a > b { return a } return b } nebula-1.6.1+dfsg/bits_test.go000066400000000000000000000156141434072716400162520ustar00rootroot00000000000000package nebula import ( "testing" "github.com/slackhq/nebula/test" "github.com/stretchr/testify/assert" ) func TestBits(t *testing.T) { l := test.NewLogger() b := NewBits(10) // make sure it is the right size assert.Len(t, b.bits, 10) // This is initialized to zero - receive one. This should work. assert.True(t, b.Check(l, 1)) u := b.Update(l, 1) assert.True(t, u) assert.EqualValues(t, 1, b.current) g := []bool{false, true, false, false, false, false, false, false, false, false} assert.Equal(t, g, b.bits) // Receive two assert.True(t, b.Check(l, 2)) u = b.Update(l, 2) assert.True(t, u) assert.EqualValues(t, 2, b.current) g = []bool{false, true, true, false, false, false, false, false, false, false} assert.Equal(t, g, b.bits) // Receive two again - it will fail assert.False(t, b.Check(l, 2)) u = b.Update(l, 2) assert.False(t, u) assert.EqualValues(t, 2, b.current) // Jump ahead to 15, which should clear everything and set the 6th element assert.True(t, b.Check(l, 15)) u = b.Update(l, 15) assert.True(t, u) assert.EqualValues(t, 15, b.current) g = []bool{false, false, false, false, false, true, false, false, false, false} assert.Equal(t, g, b.bits) // Mark 14, which is allowed because it is in the window assert.True(t, b.Check(l, 14)) u = b.Update(l, 14) assert.True(t, u) assert.EqualValues(t, 15, b.current) g = []bool{false, false, false, false, true, true, false, false, false, false} assert.Equal(t, g, b.bits) // Mark 5, which is not allowed because it is not in the window assert.False(t, b.Check(l, 5)) u = b.Update(l, 5) assert.False(t, u) assert.EqualValues(t, 15, b.current) g = []bool{false, false, false, false, true, true, false, false, false, false} assert.Equal(t, g, b.bits) // make sure we handle wrapping around once to the current position b = NewBits(10) assert.True(t, b.Update(l, 1)) assert.True(t, b.Update(l, 11)) assert.Equal(t, []bool{false, true, false, false, false, false, false, false, false, false}, b.bits) // Walk through a few windows in order b = NewBits(10) for i := uint64(0); i <= 100; i++ { assert.True(t, b.Check(l, i), "Error while checking %v", i) assert.True(t, b.Update(l, i), "Error while updating %v", i) } } func TestBitsDupeCounter(t *testing.T) { l := test.NewLogger() b := NewBits(10) b.lostCounter.Clear() b.dupeCounter.Clear() b.outOfWindowCounter.Clear() assert.True(t, b.Update(l, 1)) assert.Equal(t, int64(0), b.dupeCounter.Count()) assert.False(t, b.Update(l, 1)) assert.Equal(t, int64(1), b.dupeCounter.Count()) assert.True(t, b.Update(l, 2)) assert.Equal(t, int64(1), b.dupeCounter.Count()) assert.True(t, b.Update(l, 3)) assert.Equal(t, int64(1), b.dupeCounter.Count()) assert.False(t, b.Update(l, 1)) assert.Equal(t, int64(0), b.lostCounter.Count()) assert.Equal(t, int64(2), b.dupeCounter.Count()) assert.Equal(t, int64(0), b.outOfWindowCounter.Count()) } func TestBitsOutOfWindowCounter(t *testing.T) { l := test.NewLogger() b := NewBits(10) b.lostCounter.Clear() b.dupeCounter.Clear() b.outOfWindowCounter.Clear() assert.True(t, b.Update(l, 20)) assert.Equal(t, int64(0), b.outOfWindowCounter.Count()) assert.True(t, b.Update(l, 21)) assert.True(t, b.Update(l, 22)) assert.True(t, b.Update(l, 23)) assert.True(t, b.Update(l, 24)) assert.True(t, b.Update(l, 25)) assert.True(t, b.Update(l, 26)) assert.True(t, b.Update(l, 27)) assert.True(t, b.Update(l, 28)) assert.True(t, b.Update(l, 29)) assert.Equal(t, int64(0), b.outOfWindowCounter.Count()) assert.False(t, b.Update(l, 0)) assert.Equal(t, int64(1), b.outOfWindowCounter.Count()) //tODO: make sure lostcounter doesn't increase in orderly increment assert.Equal(t, int64(20), b.lostCounter.Count()) assert.Equal(t, int64(0), b.dupeCounter.Count()) assert.Equal(t, int64(1), b.outOfWindowCounter.Count()) } func TestBitsLostCounter(t *testing.T) { l := test.NewLogger() b := NewBits(10) b.lostCounter.Clear() b.dupeCounter.Clear() b.outOfWindowCounter.Clear() //assert.True(t, b.Update(0)) assert.True(t, b.Update(l, 0)) assert.True(t, b.Update(l, 20)) assert.True(t, b.Update(l, 21)) assert.True(t, b.Update(l, 22)) assert.True(t, b.Update(l, 23)) assert.True(t, b.Update(l, 24)) assert.True(t, b.Update(l, 25)) assert.True(t, b.Update(l, 26)) assert.True(t, b.Update(l, 27)) assert.True(t, b.Update(l, 28)) assert.True(t, b.Update(l, 29)) assert.Equal(t, int64(20), b.lostCounter.Count()) assert.Equal(t, int64(0), b.dupeCounter.Count()) assert.Equal(t, int64(0), b.outOfWindowCounter.Count()) b = NewBits(10) b.lostCounter.Clear() b.dupeCounter.Clear() b.outOfWindowCounter.Clear() assert.True(t, b.Update(l, 0)) assert.Equal(t, int64(0), b.lostCounter.Count()) assert.True(t, b.Update(l, 9)) assert.Equal(t, int64(0), b.lostCounter.Count()) // 10 will set 0 index, 0 was already set, no lost packets assert.True(t, b.Update(l, 10)) assert.Equal(t, int64(0), b.lostCounter.Count()) // 11 will set 1 index, 1 was missed, we should see 1 packet lost assert.True(t, b.Update(l, 11)) assert.Equal(t, int64(1), b.lostCounter.Count()) // Now let's fill in the window, should end up with 8 lost packets assert.True(t, b.Update(l, 12)) assert.True(t, b.Update(l, 13)) assert.True(t, b.Update(l, 14)) assert.True(t, b.Update(l, 15)) assert.True(t, b.Update(l, 16)) assert.True(t, b.Update(l, 17)) assert.True(t, b.Update(l, 18)) assert.True(t, b.Update(l, 19)) assert.Equal(t, int64(8), b.lostCounter.Count()) // Jump ahead by a window size assert.True(t, b.Update(l, 29)) assert.Equal(t, int64(8), b.lostCounter.Count()) // Now lets walk ahead normally through the window, the missed packets should fill in assert.True(t, b.Update(l, 30)) assert.True(t, b.Update(l, 31)) assert.True(t, b.Update(l, 32)) assert.True(t, b.Update(l, 33)) assert.True(t, b.Update(l, 34)) assert.True(t, b.Update(l, 35)) assert.True(t, b.Update(l, 36)) assert.True(t, b.Update(l, 37)) assert.True(t, b.Update(l, 38)) // 39 packets tracked, 22 seen, 17 lost assert.Equal(t, int64(17), b.lostCounter.Count()) // Jump ahead by 2 windows, should have recording 1 full window missing assert.True(t, b.Update(l, 58)) assert.Equal(t, int64(27), b.lostCounter.Count()) // Now lets walk ahead normally through the window, the missed packets should fill in from this window assert.True(t, b.Update(l, 59)) assert.True(t, b.Update(l, 60)) assert.True(t, b.Update(l, 61)) assert.True(t, b.Update(l, 62)) assert.True(t, b.Update(l, 63)) assert.True(t, b.Update(l, 64)) assert.True(t, b.Update(l, 65)) assert.True(t, b.Update(l, 66)) assert.True(t, b.Update(l, 67)) // 68 packets tracked, 32 seen, 36 missed assert.Equal(t, int64(36), b.lostCounter.Count()) assert.Equal(t, int64(0), b.dupeCounter.Count()) assert.Equal(t, int64(0), b.outOfWindowCounter.Count()) } func BenchmarkBits(b *testing.B) { z := NewBits(10) for n := 0; n < b.N; n++ { for i := range z.bits { z.bits[i] = true } for i := range z.bits { z.bits[i] = false } } } nebula-1.6.1+dfsg/cert.go000066400000000000000000000106341434072716400152040ustar00rootroot00000000000000package nebula import ( "errors" "fmt" "io/ioutil" "strings" "time" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/config" ) type CertState struct { certificate *cert.NebulaCertificate rawCertificate []byte rawCertificateNoKey []byte publicKey []byte privateKey []byte } func NewCertState(certificate *cert.NebulaCertificate, privateKey []byte) (*CertState, error) { // Marshal the certificate to ensure it is valid rawCertificate, err := certificate.Marshal() if err != nil { return nil, fmt.Errorf("invalid nebula certificate on interface: %s", err) } publicKey := certificate.Details.PublicKey cs := &CertState{ rawCertificate: rawCertificate, certificate: certificate, // PublicKey has been set to nil above privateKey: privateKey, publicKey: publicKey, } cs.certificate.Details.PublicKey = nil rawCertNoKey, err := cs.certificate.Marshal() if err != nil { return nil, fmt.Errorf("error marshalling certificate no key: %s", err) } cs.rawCertificateNoKey = rawCertNoKey // put public key back cs.certificate.Details.PublicKey = cs.publicKey return cs, nil } func NewCertStateFromConfig(c *config.C) (*CertState, error) { var pemPrivateKey []byte var err error privPathOrPEM := c.GetString("pki.key", "") if privPathOrPEM == "" { return nil, errors.New("no pki.key path or PEM data provided") } if strings.Contains(privPathOrPEM, "-----BEGIN") { pemPrivateKey = []byte(privPathOrPEM) privPathOrPEM = "" } else { pemPrivateKey, err = ioutil.ReadFile(privPathOrPEM) if err != nil { return nil, fmt.Errorf("unable to read pki.key file %s: %s", privPathOrPEM, err) } } rawKey, _, err := cert.UnmarshalX25519PrivateKey(pemPrivateKey) if err != nil { return nil, fmt.Errorf("error while unmarshaling pki.key %s: %s", privPathOrPEM, err) } var rawCert []byte pubPathOrPEM := c.GetString("pki.cert", "") if pubPathOrPEM == "" { return nil, errors.New("no pki.cert path or PEM data provided") } if strings.Contains(pubPathOrPEM, "-----BEGIN") { rawCert = []byte(pubPathOrPEM) pubPathOrPEM = "" } else { rawCert, err = ioutil.ReadFile(pubPathOrPEM) if err != nil { return nil, fmt.Errorf("unable to read pki.cert file %s: %s", pubPathOrPEM, err) } } nebulaCert, _, err := cert.UnmarshalNebulaCertificateFromPEM(rawCert) if err != nil { return nil, fmt.Errorf("error while unmarshaling pki.cert %s: %s", pubPathOrPEM, err) } if nebulaCert.Expired(time.Now()) { return nil, fmt.Errorf("nebula certificate for this host is expired") } if len(nebulaCert.Details.Ips) == 0 { return nil, fmt.Errorf("no IPs encoded in certificate") } if err = nebulaCert.VerifyPrivateKey(rawKey); err != nil { return nil, fmt.Errorf("private key is not a pair with public key in nebula cert") } return NewCertState(nebulaCert, rawKey) } func loadCAFromConfig(l *logrus.Logger, c *config.C) (*cert.NebulaCAPool, error) { var rawCA []byte var err error caPathOrPEM := c.GetString("pki.ca", "") if caPathOrPEM == "" { return nil, errors.New("no pki.ca path or PEM data provided") } if strings.Contains(caPathOrPEM, "-----BEGIN") { rawCA = []byte(caPathOrPEM) } else { rawCA, err = ioutil.ReadFile(caPathOrPEM) if err != nil { return nil, fmt.Errorf("unable to read pki.ca file %s: %s", caPathOrPEM, err) } } CAs, err := cert.NewCAPoolFromBytes(rawCA) if errors.Is(err, cert.ErrExpired) { var expired int for _, cert := range CAs.CAs { if cert.Expired(time.Now()) { expired++ l.WithField("cert", cert).Warn("expired certificate present in CA pool") } } if expired >= len(CAs.CAs) { return nil, errors.New("no valid CA certificates present") } } else if err != nil { return nil, fmt.Errorf("error while adding CA certificate to CA trust store: %s", err) } for _, fp := range c.GetStringSlice("pki.blocklist", []string{}) { l.WithField("fingerprint", fp).Info("Blocklisting cert") CAs.BlocklistFingerprint(fp) } // Support deprecated config for at least one minor release to allow for migrations //TODO: remove in 2022 or later for _, fp := range c.GetStringSlice("pki.blacklist", []string{}) { l.WithField("fingerprint", fp).Info("Blocklisting cert") l.Warn("pki.blacklist is deprecated and will not be supported in a future release. Please migrate your config to use pki.blocklist") CAs.BlocklistFingerprint(fp) } return CAs, nil } nebula-1.6.1+dfsg/cert/000077500000000000000000000000001434072716400146515ustar00rootroot00000000000000nebula-1.6.1+dfsg/cert/Makefile000066400000000000000000000003431434072716400163110ustar00rootroot00000000000000GO111MODULE = on export GO111MODULE cert.pb.go: cert.proto .FORCE go build google.golang.org/protobuf/cmd/protoc-gen-go PATH="$(CURDIR):$(PATH)" protoc --go_out=. --go_opt=paths=source_relative $< rm protoc-gen-go .FORCE: nebula-1.6.1+dfsg/cert/README.md000066400000000000000000000005201434072716400161250ustar00rootroot00000000000000## `cert` This is a library for interacting with `nebula` style certificates and authorities. A `protobuf` definition of the certificate format is also included ### Compiling the protobuf definition Make sure you have `protoc` installed. To compile for `go` with the same version of protobuf specified in go.mod: ```bash make ``` nebula-1.6.1+dfsg/cert/ca.go000066400000000000000000000063731434072716400155740ustar00rootroot00000000000000package cert import ( "errors" "fmt" "strings" "time" ) type NebulaCAPool struct { CAs map[string]*NebulaCertificate certBlocklist map[string]struct{} } // NewCAPool creates a CAPool func NewCAPool() *NebulaCAPool { ca := NebulaCAPool{ CAs: make(map[string]*NebulaCertificate), certBlocklist: make(map[string]struct{}), } return &ca } // NewCAPoolFromBytes will create a new CA pool from the provided // input bytes, which must be a PEM-encoded set of nebula certificates. // If the pool contains any expired certificates, an ErrExpired will be // returned along with the pool. The caller must handle any such errors. func NewCAPoolFromBytes(caPEMs []byte) (*NebulaCAPool, error) { pool := NewCAPool() var err error var expired bool for { caPEMs, err = pool.AddCACertificate(caPEMs) if errors.Is(err, ErrExpired) { expired = true err = nil } if err != nil { return nil, err } if len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" { break } } if expired { return pool, ErrExpired } return pool, nil } // AddCACertificate verifies a Nebula CA certificate and adds it to the pool // Only the first pem encoded object will be consumed, any remaining bytes are returned. // Parsed certificates will be verified and must be a CA func (ncp *NebulaCAPool) AddCACertificate(pemBytes []byte) ([]byte, error) { c, pemBytes, err := UnmarshalNebulaCertificateFromPEM(pemBytes) if err != nil { return pemBytes, err } if !c.Details.IsCA { return pemBytes, fmt.Errorf("%s: %w", c.Details.Name, ErrNotCA) } if !c.CheckSignature(c.Details.PublicKey) { return pemBytes, fmt.Errorf("%s: %w", c.Details.Name, ErrNotSelfSigned) } sum, err := c.Sha256Sum() if err != nil { return pemBytes, fmt.Errorf("could not calculate shasum for provided CA; error: %s; %s", err, c.Details.Name) } ncp.CAs[sum] = c if c.Expired(time.Now()) { return pemBytes, fmt.Errorf("%s: %w", c.Details.Name, ErrExpired) } return pemBytes, nil } // BlocklistFingerprint adds a cert fingerprint to the blocklist func (ncp *NebulaCAPool) BlocklistFingerprint(f string) { ncp.certBlocklist[f] = struct{}{} } // ResetCertBlocklist removes all previously blocklisted cert fingerprints func (ncp *NebulaCAPool) ResetCertBlocklist() { ncp.certBlocklist = make(map[string]struct{}) } // IsBlocklisted returns true if the fingerprint fails to generate or has been explicitly blocklisted func (ncp *NebulaCAPool) IsBlocklisted(c *NebulaCertificate) bool { h, err := c.Sha256Sum() if err != nil { return true } if _, ok := ncp.certBlocklist[h]; ok { return true } return false } // GetCAForCert attempts to return the signing certificate for the provided certificate. // No signature validation is performed func (ncp *NebulaCAPool) GetCAForCert(c *NebulaCertificate) (*NebulaCertificate, error) { if c.Details.Issuer == "" { return nil, fmt.Errorf("no issuer in certificate") } signer, ok := ncp.CAs[c.Details.Issuer] if ok { return signer, nil } return nil, fmt.Errorf("could not find ca for the certificate") } // GetFingerprints returns an array of trusted CA fingerprints func (ncp *NebulaCAPool) GetFingerprints() []string { fp := make([]string, len(ncp.CAs)) i := 0 for k := range ncp.CAs { fp[i] = k i++ } return fp } nebula-1.6.1+dfsg/cert/cert.go000066400000000000000000000417661434072716400161530ustar00rootroot00000000000000package cert import ( "bytes" "crypto" "crypto/rand" "crypto/sha256" "encoding/binary" "encoding/hex" "encoding/json" "encoding/pem" "fmt" "net" "time" "golang.org/x/crypto/curve25519" "golang.org/x/crypto/ed25519" "google.golang.org/protobuf/proto" ) const publicKeyLen = 32 const ( CertBanner = "NEBULA CERTIFICATE" X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY" X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY" Ed25519PrivateKeyBanner = "NEBULA ED25519 PRIVATE KEY" Ed25519PublicKeyBanner = "NEBULA ED25519 PUBLIC KEY" ) type NebulaCertificate struct { Details NebulaCertificateDetails Signature []byte } type NebulaCertificateDetails struct { Name string Ips []*net.IPNet Subnets []*net.IPNet Groups []string NotBefore time.Time NotAfter time.Time PublicKey []byte IsCA bool Issuer string // Map of groups for faster lookup InvertedGroups map[string]struct{} } type m map[string]interface{} // UnmarshalNebulaCertificate will unmarshal a protobuf byte representation of a nebula cert func UnmarshalNebulaCertificate(b []byte) (*NebulaCertificate, error) { if len(b) == 0 { return nil, fmt.Errorf("nil byte array") } var rc RawNebulaCertificate err := proto.Unmarshal(b, &rc) if err != nil { return nil, err } if rc.Details == nil { return nil, fmt.Errorf("encoded Details was nil") } if len(rc.Details.Ips)%2 != 0 { return nil, fmt.Errorf("encoded IPs should be in pairs, an odd number was found") } if len(rc.Details.Subnets)%2 != 0 { return nil, fmt.Errorf("encoded Subnets should be in pairs, an odd number was found") } nc := NebulaCertificate{ Details: NebulaCertificateDetails{ Name: rc.Details.Name, Groups: make([]string, len(rc.Details.Groups)), Ips: make([]*net.IPNet, len(rc.Details.Ips)/2), Subnets: make([]*net.IPNet, len(rc.Details.Subnets)/2), NotBefore: time.Unix(rc.Details.NotBefore, 0), NotAfter: time.Unix(rc.Details.NotAfter, 0), PublicKey: make([]byte, len(rc.Details.PublicKey)), IsCA: rc.Details.IsCA, InvertedGroups: make(map[string]struct{}), }, Signature: make([]byte, len(rc.Signature)), } copy(nc.Signature, rc.Signature) copy(nc.Details.Groups, rc.Details.Groups) nc.Details.Issuer = hex.EncodeToString(rc.Details.Issuer) if len(rc.Details.PublicKey) < publicKeyLen { return nil, fmt.Errorf("Public key was fewer than 32 bytes; %v", len(rc.Details.PublicKey)) } copy(nc.Details.PublicKey, rc.Details.PublicKey) for i, rawIp := range rc.Details.Ips { if i%2 == 0 { nc.Details.Ips[i/2] = &net.IPNet{IP: int2ip(rawIp)} } else { nc.Details.Ips[i/2].Mask = net.IPMask(int2ip(rawIp)) } } for i, rawIp := range rc.Details.Subnets { if i%2 == 0 { nc.Details.Subnets[i/2] = &net.IPNet{IP: int2ip(rawIp)} } else { nc.Details.Subnets[i/2].Mask = net.IPMask(int2ip(rawIp)) } } for _, g := range rc.Details.Groups { nc.Details.InvertedGroups[g] = struct{}{} } return &nc, nil } // UnmarshalNebulaCertificateFromPEM will unmarshal the first pem block in a byte array, returning any non consumed data // or an error on failure func UnmarshalNebulaCertificateFromPEM(b []byte) (*NebulaCertificate, []byte, error) { p, r := pem.Decode(b) if p == nil { return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block") } if p.Type != CertBanner { return nil, r, fmt.Errorf("bytes did not contain a proper nebula certificate banner") } nc, err := UnmarshalNebulaCertificate(p.Bytes) return nc, r, err } // MarshalX25519PrivateKey is a simple helper to PEM encode an X25519 private key func MarshalX25519PrivateKey(b []byte) []byte { return pem.EncodeToMemory(&pem.Block{Type: X25519PrivateKeyBanner, Bytes: b}) } // MarshalEd25519PrivateKey is a simple helper to PEM encode an Ed25519 private key func MarshalEd25519PrivateKey(key ed25519.PrivateKey) []byte { return pem.EncodeToMemory(&pem.Block{Type: Ed25519PrivateKeyBanner, Bytes: key}) } // UnmarshalX25519PrivateKey will try to pem decode an X25519 private key, returning any other bytes b // or an error on failure func UnmarshalX25519PrivateKey(b []byte) ([]byte, []byte, error) { k, r := pem.Decode(b) if k == nil { return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block") } if k.Type != X25519PrivateKeyBanner { return nil, r, fmt.Errorf("bytes did not contain a proper nebula X25519 private key banner") } if len(k.Bytes) != publicKeyLen { return nil, r, fmt.Errorf("key was not 32 bytes, is invalid X25519 private key") } return k.Bytes, r, nil } // UnmarshalEd25519PrivateKey will try to pem decode an Ed25519 private key, returning any other bytes b // or an error on failure func UnmarshalEd25519PrivateKey(b []byte) (ed25519.PrivateKey, []byte, error) { k, r := pem.Decode(b) if k == nil { return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block") } if k.Type != Ed25519PrivateKeyBanner { return nil, r, fmt.Errorf("bytes did not contain a proper nebula Ed25519 private key banner") } if len(k.Bytes) != ed25519.PrivateKeySize { return nil, r, fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key") } return k.Bytes, r, nil } // MarshalX25519PublicKey is a simple helper to PEM encode an X25519 public key func MarshalX25519PublicKey(b []byte) []byte { return pem.EncodeToMemory(&pem.Block{Type: X25519PublicKeyBanner, Bytes: b}) } // MarshalEd25519PublicKey is a simple helper to PEM encode an Ed25519 public key func MarshalEd25519PublicKey(key ed25519.PublicKey) []byte { return pem.EncodeToMemory(&pem.Block{Type: Ed25519PublicKeyBanner, Bytes: key}) } // UnmarshalX25519PublicKey will try to pem decode an X25519 public key, returning any other bytes b // or an error on failure func UnmarshalX25519PublicKey(b []byte) ([]byte, []byte, error) { k, r := pem.Decode(b) if k == nil { return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block") } if k.Type != X25519PublicKeyBanner { return nil, r, fmt.Errorf("bytes did not contain a proper nebula X25519 public key banner") } if len(k.Bytes) != publicKeyLen { return nil, r, fmt.Errorf("key was not 32 bytes, is invalid X25519 public key") } return k.Bytes, r, nil } // UnmarshalEd25519PublicKey will try to pem decode an Ed25519 public key, returning any other bytes b // or an error on failure func UnmarshalEd25519PublicKey(b []byte) (ed25519.PublicKey, []byte, error) { k, r := pem.Decode(b) if k == nil { return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block") } if k.Type != Ed25519PublicKeyBanner { return nil, r, fmt.Errorf("bytes did not contain a proper nebula Ed25519 public key banner") } if len(k.Bytes) != ed25519.PublicKeySize { return nil, r, fmt.Errorf("key was not 32 bytes, is invalid ed25519 public key") } return k.Bytes, r, nil } // Sign signs a nebula cert with the provided private key func (nc *NebulaCertificate) Sign(key ed25519.PrivateKey) error { b, err := proto.Marshal(nc.getRawDetails()) if err != nil { return err } sig, err := key.Sign(rand.Reader, b, crypto.Hash(0)) if err != nil { return err } nc.Signature = sig return nil } // CheckSignature verifies the signature against the provided public key func (nc *NebulaCertificate) CheckSignature(key ed25519.PublicKey) bool { b, err := proto.Marshal(nc.getRawDetails()) if err != nil { return false } return ed25519.Verify(key, b, nc.Signature) } // Expired will return true if the nebula cert is too young or too old compared to the provided time, otherwise false func (nc *NebulaCertificate) Expired(t time.Time) bool { return nc.Details.NotBefore.After(t) || nc.Details.NotAfter.Before(t) } // Verify will ensure a certificate is good in all respects (expiry, group membership, signature, cert blocklist, etc) func (nc *NebulaCertificate) Verify(t time.Time, ncp *NebulaCAPool) (bool, error) { if ncp.IsBlocklisted(nc) { return false, fmt.Errorf("certificate has been blocked") } signer, err := ncp.GetCAForCert(nc) if err != nil { return false, err } if signer.Expired(t) { return false, fmt.Errorf("root certificate is expired") } if nc.Expired(t) { return false, fmt.Errorf("certificate is expired") } if !nc.CheckSignature(signer.Details.PublicKey) { return false, fmt.Errorf("certificate signature did not match") } if err := nc.CheckRootConstrains(signer); err != nil { return false, err } return true, nil } // CheckRootConstrains returns an error if the certificate violates constraints set on the root (groups, ips, subnets) func (nc *NebulaCertificate) CheckRootConstrains(signer *NebulaCertificate) error { // Make sure this cert wasn't valid before the root if signer.Details.NotAfter.Before(nc.Details.NotAfter) { return fmt.Errorf("certificate expires after signing certificate") } // Make sure this cert isn't valid after the root if signer.Details.NotBefore.After(nc.Details.NotBefore) { return fmt.Errorf("certificate is valid before the signing certificate") } // If the signer has a limited set of groups make sure the cert only contains a subset if len(signer.Details.InvertedGroups) > 0 { for _, g := range nc.Details.Groups { if _, ok := signer.Details.InvertedGroups[g]; !ok { return fmt.Errorf("certificate contained a group not present on the signing ca: %s", g) } } } // If the signer has a limited set of ip ranges to issue from make sure the cert only contains a subset if len(signer.Details.Ips) > 0 { for _, ip := range nc.Details.Ips { if !netMatch(ip, signer.Details.Ips) { return fmt.Errorf("certificate contained an ip assignment outside the limitations of the signing ca: %s", ip.String()) } } } // If the signer has a limited set of subnet ranges to issue from make sure the cert only contains a subset if len(signer.Details.Subnets) > 0 { for _, subnet := range nc.Details.Subnets { if !netMatch(subnet, signer.Details.Subnets) { return fmt.Errorf("certificate contained a subnet assignment outside the limitations of the signing ca: %s", subnet) } } } return nil } // VerifyPrivateKey checks that the public key in the Nebula certificate and a supplied private key match func (nc *NebulaCertificate) VerifyPrivateKey(key []byte) error { if nc.Details.IsCA { // the call to PublicKey below will panic slice bounds out of range otherwise if len(key) != ed25519.PrivateKeySize { return fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key") } if !ed25519.PublicKey(nc.Details.PublicKey).Equal(ed25519.PrivateKey(key).Public()) { return fmt.Errorf("public key in cert and private key supplied don't match") } return nil } pub, err := curve25519.X25519(key, curve25519.Basepoint) if err != nil { return err } if !bytes.Equal(pub, nc.Details.PublicKey) { return fmt.Errorf("public key in cert and private key supplied don't match") } return nil } // String will return a pretty printed representation of a nebula cert func (nc *NebulaCertificate) String() string { if nc == nil { return "NebulaCertificate {}\n" } s := "NebulaCertificate {\n" s += "\tDetails {\n" s += fmt.Sprintf("\t\tName: %v\n", nc.Details.Name) if len(nc.Details.Ips) > 0 { s += "\t\tIps: [\n" for _, ip := range nc.Details.Ips { s += fmt.Sprintf("\t\t\t%v\n", ip.String()) } s += "\t\t]\n" } else { s += "\t\tIps: []\n" } if len(nc.Details.Subnets) > 0 { s += "\t\tSubnets: [\n" for _, ip := range nc.Details.Subnets { s += fmt.Sprintf("\t\t\t%v\n", ip.String()) } s += "\t\t]\n" } else { s += "\t\tSubnets: []\n" } if len(nc.Details.Groups) > 0 { s += "\t\tGroups: [\n" for _, g := range nc.Details.Groups { s += fmt.Sprintf("\t\t\t\"%v\"\n", g) } s += "\t\t]\n" } else { s += "\t\tGroups: []\n" } s += fmt.Sprintf("\t\tNot before: %v\n", nc.Details.NotBefore) s += fmt.Sprintf("\t\tNot After: %v\n", nc.Details.NotAfter) s += fmt.Sprintf("\t\tIs CA: %v\n", nc.Details.IsCA) s += fmt.Sprintf("\t\tIssuer: %s\n", nc.Details.Issuer) s += fmt.Sprintf("\t\tPublic key: %x\n", nc.Details.PublicKey) s += "\t}\n" fp, err := nc.Sha256Sum() if err == nil { s += fmt.Sprintf("\tFingerprint: %s\n", fp) } s += fmt.Sprintf("\tSignature: %x\n", nc.Signature) s += "}" return s } // getRawDetails marshals the raw details into protobuf ready struct func (nc *NebulaCertificate) getRawDetails() *RawNebulaCertificateDetails { rd := &RawNebulaCertificateDetails{ Name: nc.Details.Name, Groups: nc.Details.Groups, NotBefore: nc.Details.NotBefore.Unix(), NotAfter: nc.Details.NotAfter.Unix(), PublicKey: make([]byte, len(nc.Details.PublicKey)), IsCA: nc.Details.IsCA, } for _, ipNet := range nc.Details.Ips { rd.Ips = append(rd.Ips, ip2int(ipNet.IP), ip2int(ipNet.Mask)) } for _, ipNet := range nc.Details.Subnets { rd.Subnets = append(rd.Subnets, ip2int(ipNet.IP), ip2int(ipNet.Mask)) } copy(rd.PublicKey, nc.Details.PublicKey[:]) // I know, this is terrible rd.Issuer, _ = hex.DecodeString(nc.Details.Issuer) return rd } // Marshal will marshal a nebula cert into a protobuf byte array func (nc *NebulaCertificate) Marshal() ([]byte, error) { rc := RawNebulaCertificate{ Details: nc.getRawDetails(), Signature: nc.Signature, } return proto.Marshal(&rc) } // MarshalToPEM will marshal a nebula cert into a protobuf byte array and pem encode the result func (nc *NebulaCertificate) MarshalToPEM() ([]byte, error) { b, err := nc.Marshal() if err != nil { return nil, err } return pem.EncodeToMemory(&pem.Block{Type: CertBanner, Bytes: b}), nil } // Sha256Sum calculates a sha-256 sum of the marshaled certificate func (nc *NebulaCertificate) Sha256Sum() (string, error) { b, err := nc.Marshal() if err != nil { return "", err } sum := sha256.Sum256(b) return hex.EncodeToString(sum[:]), nil } func (nc *NebulaCertificate) MarshalJSON() ([]byte, error) { toString := func(ips []*net.IPNet) []string { s := []string{} for _, ip := range ips { s = append(s, ip.String()) } return s } fp, _ := nc.Sha256Sum() jc := m{ "details": m{ "name": nc.Details.Name, "ips": toString(nc.Details.Ips), "subnets": toString(nc.Details.Subnets), "groups": nc.Details.Groups, "notBefore": nc.Details.NotBefore, "notAfter": nc.Details.NotAfter, "publicKey": fmt.Sprintf("%x", nc.Details.PublicKey), "isCa": nc.Details.IsCA, "issuer": nc.Details.Issuer, }, "fingerprint": fp, "signature": fmt.Sprintf("%x", nc.Signature), } return json.Marshal(jc) } //func (nc *NebulaCertificate) Copy() *NebulaCertificate { // r, err := nc.Marshal() // if err != nil { // //TODO // return nil // } // // c, err := UnmarshalNebulaCertificate(r) // return c //} func (nc *NebulaCertificate) Copy() *NebulaCertificate { c := &NebulaCertificate{ Details: NebulaCertificateDetails{ Name: nc.Details.Name, Groups: make([]string, len(nc.Details.Groups)), Ips: make([]*net.IPNet, len(nc.Details.Ips)), Subnets: make([]*net.IPNet, len(nc.Details.Subnets)), NotBefore: nc.Details.NotBefore, NotAfter: nc.Details.NotAfter, PublicKey: make([]byte, len(nc.Details.PublicKey)), IsCA: nc.Details.IsCA, Issuer: nc.Details.Issuer, InvertedGroups: make(map[string]struct{}, len(nc.Details.InvertedGroups)), }, Signature: make([]byte, len(nc.Signature)), } copy(c.Signature, nc.Signature) copy(c.Details.Groups, nc.Details.Groups) copy(c.Details.PublicKey, nc.Details.PublicKey) for i, p := range nc.Details.Ips { c.Details.Ips[i] = &net.IPNet{ IP: make(net.IP, len(p.IP)), Mask: make(net.IPMask, len(p.Mask)), } copy(c.Details.Ips[i].IP, p.IP) copy(c.Details.Ips[i].Mask, p.Mask) } for i, p := range nc.Details.Subnets { c.Details.Subnets[i] = &net.IPNet{ IP: make(net.IP, len(p.IP)), Mask: make(net.IPMask, len(p.Mask)), } copy(c.Details.Subnets[i].IP, p.IP) copy(c.Details.Subnets[i].Mask, p.Mask) } for g := range nc.Details.InvertedGroups { c.Details.InvertedGroups[g] = struct{}{} } return c } func netMatch(certIp *net.IPNet, rootIps []*net.IPNet) bool { for _, net := range rootIps { if net.Contains(certIp.IP) && maskContains(net.Mask, certIp.Mask) { return true } } return false } func maskContains(caMask, certMask net.IPMask) bool { caM := maskTo4(caMask) cM := maskTo4(certMask) // Make sure forcing to ipv4 didn't nuke us if caM == nil || cM == nil { return false } // Make sure the cert mask is not greater than the ca mask for i := 0; i < len(caMask); i++ { if caM[i] > cM[i] { return false } } return true } func maskTo4(ip net.IPMask) net.IPMask { if len(ip) == net.IPv4len { return ip } if len(ip) == net.IPv6len && isZeros(ip[0:10]) && ip[10] == 0xff && ip[11] == 0xff { return ip[12:16] } return nil } func isZeros(b []byte) bool { for i := 0; i < len(b); i++ { if b[i] != 0 { return false } } return true } func ip2int(ip []byte) uint32 { if len(ip) == 16 { return binary.BigEndian.Uint32(ip[12:16]) } return binary.BigEndian.Uint32(ip) } func int2ip(nn uint32) net.IP { ip := make(net.IP, net.IPv4len) binary.BigEndian.PutUint32(ip, nn) return ip } nebula-1.6.1+dfsg/cert/cert.pb.go000066400000000000000000000233141434072716400165400ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.0 // protoc v3.20.0 // source: cert.proto package cert import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) type RawNebulaCertificate struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Details *RawNebulaCertificateDetails `protobuf:"bytes,1,opt,name=Details,proto3" json:"Details,omitempty"` Signature []byte `protobuf:"bytes,2,opt,name=Signature,proto3" json:"Signature,omitempty"` } func (x *RawNebulaCertificate) Reset() { *x = RawNebulaCertificate{} if protoimpl.UnsafeEnabled { mi := &file_cert_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *RawNebulaCertificate) String() string { return protoimpl.X.MessageStringOf(x) } func (*RawNebulaCertificate) ProtoMessage() {} func (x *RawNebulaCertificate) ProtoReflect() protoreflect.Message { mi := &file_cert_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use RawNebulaCertificate.ProtoReflect.Descriptor instead. func (*RawNebulaCertificate) Descriptor() ([]byte, []int) { return file_cert_proto_rawDescGZIP(), []int{0} } func (x *RawNebulaCertificate) GetDetails() *RawNebulaCertificateDetails { if x != nil { return x.Details } return nil } func (x *RawNebulaCertificate) GetSignature() []byte { if x != nil { return x.Signature } return nil } type RawNebulaCertificateDetails struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` // Ips and Subnets are in big endian 32 bit pairs, 1st the ip, 2nd the mask Ips []uint32 `protobuf:"varint,2,rep,packed,name=Ips,proto3" json:"Ips,omitempty"` Subnets []uint32 `protobuf:"varint,3,rep,packed,name=Subnets,proto3" json:"Subnets,omitempty"` Groups []string `protobuf:"bytes,4,rep,name=Groups,proto3" json:"Groups,omitempty"` NotBefore int64 `protobuf:"varint,5,opt,name=NotBefore,proto3" json:"NotBefore,omitempty"` NotAfter int64 `protobuf:"varint,6,opt,name=NotAfter,proto3" json:"NotAfter,omitempty"` PublicKey []byte `protobuf:"bytes,7,opt,name=PublicKey,proto3" json:"PublicKey,omitempty"` IsCA bool `protobuf:"varint,8,opt,name=IsCA,proto3" json:"IsCA,omitempty"` // sha-256 of the issuer certificate, if this field is blank the cert is self-signed Issuer []byte `protobuf:"bytes,9,opt,name=Issuer,proto3" json:"Issuer,omitempty"` } func (x *RawNebulaCertificateDetails) Reset() { *x = RawNebulaCertificateDetails{} if protoimpl.UnsafeEnabled { mi := &file_cert_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *RawNebulaCertificateDetails) String() string { return protoimpl.X.MessageStringOf(x) } func (*RawNebulaCertificateDetails) ProtoMessage() {} func (x *RawNebulaCertificateDetails) ProtoReflect() protoreflect.Message { mi := &file_cert_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use RawNebulaCertificateDetails.ProtoReflect.Descriptor instead. func (*RawNebulaCertificateDetails) Descriptor() ([]byte, []int) { return file_cert_proto_rawDescGZIP(), []int{1} } func (x *RawNebulaCertificateDetails) GetName() string { if x != nil { return x.Name } return "" } func (x *RawNebulaCertificateDetails) GetIps() []uint32 { if x != nil { return x.Ips } return nil } func (x *RawNebulaCertificateDetails) GetSubnets() []uint32 { if x != nil { return x.Subnets } return nil } func (x *RawNebulaCertificateDetails) GetGroups() []string { if x != nil { return x.Groups } return nil } func (x *RawNebulaCertificateDetails) GetNotBefore() int64 { if x != nil { return x.NotBefore } return 0 } func (x *RawNebulaCertificateDetails) GetNotAfter() int64 { if x != nil { return x.NotAfter } return 0 } func (x *RawNebulaCertificateDetails) GetPublicKey() []byte { if x != nil { return x.PublicKey } return nil } func (x *RawNebulaCertificateDetails) GetIsCA() bool { if x != nil { return x.IsCA } return false } func (x *RawNebulaCertificateDetails) GetIssuer() []byte { if x != nil { return x.Issuer } return nil } var File_cert_proto protoreflect.FileDescriptor var file_cert_proto_rawDesc = []byte{ 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x63, 0x65, 0x72, 0x74, 0x22, 0x71, 0x0a, 0x14, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xf9, 0x01, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x49, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x03, 0x49, 0x70, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x4e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x4e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x49, 0x73, 0x43, 0x41, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x49, 0x73, 0x43, 0x41, 0x12, 0x16, 0x0a, 0x06, 0x49, 0x73, 0x73, 0x75, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x49, 0x73, 0x73, 0x75, 0x65, 0x72, 0x42, 0x20, 0x5a, 0x1e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6c, 0x61, 0x63, 0x6b, 0x68, 0x71, 0x2f, 0x6e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_cert_proto_rawDescOnce sync.Once file_cert_proto_rawDescData = file_cert_proto_rawDesc ) func file_cert_proto_rawDescGZIP() []byte { file_cert_proto_rawDescOnce.Do(func() { file_cert_proto_rawDescData = protoimpl.X.CompressGZIP(file_cert_proto_rawDescData) }) return file_cert_proto_rawDescData } var file_cert_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_cert_proto_goTypes = []interface{}{ (*RawNebulaCertificate)(nil), // 0: cert.RawNebulaCertificate (*RawNebulaCertificateDetails)(nil), // 1: cert.RawNebulaCertificateDetails } var file_cert_proto_depIdxs = []int32{ 1, // 0: cert.RawNebulaCertificate.Details:type_name -> cert.RawNebulaCertificateDetails 1, // [1:1] is the sub-list for method output_type 1, // [1:1] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name 1, // [1:1] is the sub-list for extension extendee 0, // [0:1] is the sub-list for field type_name } func init() { file_cert_proto_init() } func file_cert_proto_init() { if File_cert_proto != nil { return } if !protoimpl.UnsafeEnabled { file_cert_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RawNebulaCertificate); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_cert_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RawNebulaCertificateDetails); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_cert_proto_rawDesc, NumEnums: 0, NumMessages: 2, NumExtensions: 0, NumServices: 0, }, GoTypes: file_cert_proto_goTypes, DependencyIndexes: file_cert_proto_depIdxs, MessageInfos: file_cert_proto_msgTypes, }.Build() File_cert_proto = out.File file_cert_proto_rawDesc = nil file_cert_proto_goTypes = nil file_cert_proto_depIdxs = nil } nebula-1.6.1+dfsg/cert/cert.proto000066400000000000000000000012501434072716400166710ustar00rootroot00000000000000syntax = "proto3"; package cert; option go_package = "github.com/slackhq/nebula/cert"; //import "google/protobuf/timestamp.proto"; message RawNebulaCertificate { RawNebulaCertificateDetails Details = 1; bytes Signature = 2; } message RawNebulaCertificateDetails { string Name = 1; // Ips and Subnets are in big endian 32 bit pairs, 1st the ip, 2nd the mask repeated uint32 Ips = 2; repeated uint32 Subnets = 3; repeated string Groups = 4; int64 NotBefore = 5; int64 NotAfter = 6; bytes PublicKey = 7; bool IsCA = 8; // sha-256 of the issuer certificate, if this field is blank the cert is self-signed bytes Issuer = 9; }nebula-1.6.1+dfsg/cert/cert_test.go000066400000000000000000001033321434072716400171760ustar00rootroot00000000000000package cert import ( "crypto/rand" "fmt" "io" "net" "testing" "time" "github.com/slackhq/nebula/test" "github.com/stretchr/testify/assert" "golang.org/x/crypto/curve25519" "golang.org/x/crypto/ed25519" "google.golang.org/protobuf/proto" ) func TestMarshalingNebulaCertificate(t *testing.T) { before := time.Now().Add(time.Second * -60).Round(time.Second) after := time.Now().Add(time.Second * 60).Round(time.Second) pubKey := []byte("1234567890abcedfghij1234567890ab") nc := NebulaCertificate{ Details: NebulaCertificateDetails{ Name: "testing", Ips: []*net.IPNet{ {IP: net.ParseIP("10.1.1.1"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))}, {IP: net.ParseIP("10.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))}, {IP: net.ParseIP("10.1.1.3"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))}, }, Subnets: []*net.IPNet{ {IP: net.ParseIP("9.1.1.1"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))}, {IP: net.ParseIP("9.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))}, {IP: net.ParseIP("9.1.1.3"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))}, }, Groups: []string{"test-group1", "test-group2", "test-group3"}, NotBefore: before, NotAfter: after, PublicKey: pubKey, IsCA: false, Issuer: "1234567890abcedfghij1234567890ab", }, Signature: []byte("1234567890abcedfghij1234567890ab"), } b, err := nc.Marshal() assert.Nil(t, err) //t.Log("Cert size:", len(b)) nc2, err := UnmarshalNebulaCertificate(b) assert.Nil(t, err) assert.Equal(t, nc.Signature, nc2.Signature) assert.Equal(t, nc.Details.Name, nc2.Details.Name) assert.Equal(t, nc.Details.NotBefore, nc2.Details.NotBefore) assert.Equal(t, nc.Details.NotAfter, nc2.Details.NotAfter) assert.Equal(t, nc.Details.PublicKey, nc2.Details.PublicKey) assert.Equal(t, nc.Details.IsCA, nc2.Details.IsCA) // IP byte arrays can be 4 or 16 in length so we have to go this route assert.Equal(t, len(nc.Details.Ips), len(nc2.Details.Ips)) for i, wIp := range nc.Details.Ips { assert.Equal(t, wIp.String(), nc2.Details.Ips[i].String()) } assert.Equal(t, len(nc.Details.Subnets), len(nc2.Details.Subnets)) for i, wIp := range nc.Details.Subnets { assert.Equal(t, wIp.String(), nc2.Details.Subnets[i].String()) } assert.EqualValues(t, nc.Details.Groups, nc2.Details.Groups) } func TestNebulaCertificate_Sign(t *testing.T) { before := time.Now().Add(time.Second * -60).Round(time.Second) after := time.Now().Add(time.Second * 60).Round(time.Second) pubKey := []byte("1234567890abcedfghij1234567890ab") nc := NebulaCertificate{ Details: NebulaCertificateDetails{ Name: "testing", Ips: []*net.IPNet{ {IP: net.ParseIP("10.1.1.1"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))}, {IP: net.ParseIP("10.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))}, {IP: net.ParseIP("10.1.1.3"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))}, }, Subnets: []*net.IPNet{ {IP: net.ParseIP("9.1.1.1"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))}, {IP: net.ParseIP("9.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))}, {IP: net.ParseIP("9.1.1.3"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))}, }, Groups: []string{"test-group1", "test-group2", "test-group3"}, NotBefore: before, NotAfter: after, PublicKey: pubKey, IsCA: false, Issuer: "1234567890abcedfghij1234567890ab", }, } pub, priv, err := ed25519.GenerateKey(rand.Reader) assert.Nil(t, err) assert.False(t, nc.CheckSignature(pub)) assert.Nil(t, nc.Sign(priv)) assert.True(t, nc.CheckSignature(pub)) _, err = nc.Marshal() assert.Nil(t, err) //t.Log("Cert size:", len(b)) } func TestNebulaCertificate_Expired(t *testing.T) { nc := NebulaCertificate{ Details: NebulaCertificateDetails{ NotBefore: time.Now().Add(time.Second * -60).Round(time.Second), NotAfter: time.Now().Add(time.Second * 60).Round(time.Second), }, } assert.True(t, nc.Expired(time.Now().Add(time.Hour))) assert.True(t, nc.Expired(time.Now().Add(-time.Hour))) assert.False(t, nc.Expired(time.Now())) } func TestNebulaCertificate_MarshalJSON(t *testing.T) { time.Local = time.UTC pubKey := []byte("1234567890abcedfghij1234567890ab") nc := NebulaCertificate{ Details: NebulaCertificateDetails{ Name: "testing", Ips: []*net.IPNet{ {IP: net.ParseIP("10.1.1.1"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))}, {IP: net.ParseIP("10.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))}, {IP: net.ParseIP("10.1.1.3"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))}, }, Subnets: []*net.IPNet{ {IP: net.ParseIP("9.1.1.1"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))}, {IP: net.ParseIP("9.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))}, {IP: net.ParseIP("9.1.1.3"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))}, }, Groups: []string{"test-group1", "test-group2", "test-group3"}, NotBefore: time.Date(1, 0, 0, 1, 0, 0, 0, time.UTC), NotAfter: time.Date(1, 0, 0, 2, 0, 0, 0, time.UTC), PublicKey: pubKey, IsCA: false, Issuer: "1234567890abcedfghij1234567890ab", }, Signature: []byte("1234567890abcedfghij1234567890ab"), } b, err := nc.MarshalJSON() assert.Nil(t, err) assert.Equal( t, "{\"details\":{\"groups\":[\"test-group1\",\"test-group2\",\"test-group3\"],\"ips\":[\"10.1.1.1/24\",\"10.1.1.2/16\",\"10.1.1.3/ff00ff00\"],\"isCa\":false,\"issuer\":\"1234567890abcedfghij1234567890ab\",\"name\":\"testing\",\"notAfter\":\"0000-11-30T02:00:00Z\",\"notBefore\":\"0000-11-30T01:00:00Z\",\"publicKey\":\"313233343536373839306162636564666768696a313233343536373839306162\",\"subnets\":[\"9.1.1.1/ff00ff00\",\"9.1.1.2/24\",\"9.1.1.3/16\"]},\"fingerprint\":\"26cb1c30ad7872c804c166b5150fa372f437aa3856b04edb4334b4470ec728e4\",\"signature\":\"313233343536373839306162636564666768696a313233343536373839306162\"}", string(b), ) } func TestNebulaCertificate_Verify(t *testing.T) { ca, _, caKey, err := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) assert.Nil(t, err) c, _, _, err := newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) assert.Nil(t, err) h, err := ca.Sha256Sum() assert.Nil(t, err) caPool := NewCAPool() caPool.CAs[h] = ca f, err := c.Sha256Sum() assert.Nil(t, err) caPool.BlocklistFingerprint(f) v, err := c.Verify(time.Now(), caPool) assert.False(t, v) assert.EqualError(t, err, "certificate has been blocked") caPool.ResetCertBlocklist() v, err = c.Verify(time.Now(), caPool) assert.True(t, v) assert.Nil(t, err) v, err = c.Verify(time.Now().Add(time.Hour*1000), caPool) assert.False(t, v) assert.EqualError(t, err, "root certificate is expired") c, _, _, err = newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{}) assert.Nil(t, err) v, err = c.Verify(time.Now().Add(time.Minute*6), caPool) assert.False(t, v) assert.EqualError(t, err, "certificate is expired") // Test group assertion ca, _, caKey, err = newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{"test1", "test2"}) assert.Nil(t, err) caPem, err := ca.MarshalToPEM() assert.Nil(t, err) caPool = NewCAPool() caPool.AddCACertificate(caPem) c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{"test1", "bad"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.False(t, v) assert.EqualError(t, err, "certificate contained a group not present on the signing ca: bad") c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{"test1"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.True(t, v) assert.Nil(t, err) } func TestNebulaCertificate_Verify_IPs(t *testing.T) { _, caIp1, _ := net.ParseCIDR("10.0.0.0/16") _, caIp2, _ := net.ParseCIDR("192.168.0.0/24") ca, _, caKey, err := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{caIp1, caIp2}, []*net.IPNet{}, []string{"test"}) assert.Nil(t, err) caPem, err := ca.MarshalToPEM() assert.Nil(t, err) caPool := NewCAPool() caPool.AddCACertificate(caPem) // ip is outside the network cIp1 := &net.IPNet{IP: net.ParseIP("10.1.0.0"), Mask: []byte{255, 255, 255, 0}} cIp2 := &net.IPNet{IP: net.ParseIP("192.168.0.1"), Mask: []byte{255, 255, 0, 0}} c, _, _, err := newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{cIp1, cIp2}, []*net.IPNet{}, []string{"test"}) assert.Nil(t, err) v, err := c.Verify(time.Now(), caPool) assert.False(t, v) assert.EqualError(t, err, "certificate contained an ip assignment outside the limitations of the signing ca: 10.1.0.0/24") // ip is outside the network reversed order of above cIp1 = &net.IPNet{IP: net.ParseIP("192.168.0.1"), Mask: []byte{255, 255, 255, 0}} cIp2 = &net.IPNet{IP: net.ParseIP("10.1.0.0"), Mask: []byte{255, 255, 255, 0}} c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{cIp1, cIp2}, []*net.IPNet{}, []string{"test"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.False(t, v) assert.EqualError(t, err, "certificate contained an ip assignment outside the limitations of the signing ca: 10.1.0.0/24") // ip is within the network but mask is outside cIp1 = &net.IPNet{IP: net.ParseIP("10.0.1.0"), Mask: []byte{255, 254, 0, 0}} cIp2 = &net.IPNet{IP: net.ParseIP("192.168.0.1"), Mask: []byte{255, 255, 255, 0}} c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{cIp1, cIp2}, []*net.IPNet{}, []string{"test"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.False(t, v) assert.EqualError(t, err, "certificate contained an ip assignment outside the limitations of the signing ca: 10.0.1.0/15") // ip is within the network but mask is outside reversed order of above cIp1 = &net.IPNet{IP: net.ParseIP("192.168.0.1"), Mask: []byte{255, 255, 255, 0}} cIp2 = &net.IPNet{IP: net.ParseIP("10.0.1.0"), Mask: []byte{255, 254, 0, 0}} c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{cIp1, cIp2}, []*net.IPNet{}, []string{"test"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.False(t, v) assert.EqualError(t, err, "certificate contained an ip assignment outside the limitations of the signing ca: 10.0.1.0/15") // ip and mask are within the network cIp1 = &net.IPNet{IP: net.ParseIP("10.0.1.0"), Mask: []byte{255, 255, 0, 0}} cIp2 = &net.IPNet{IP: net.ParseIP("192.168.0.1"), Mask: []byte{255, 255, 255, 128}} c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{cIp1, cIp2}, []*net.IPNet{}, []string{"test"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.True(t, v) assert.Nil(t, err) // Exact matches c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{caIp1, caIp2}, []*net.IPNet{}, []string{"test"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.True(t, v) assert.Nil(t, err) // Exact matches reversed c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{caIp2, caIp1}, []*net.IPNet{}, []string{"test"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.True(t, v) assert.Nil(t, err) // Exact matches reversed with just 1 c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{caIp1}, []*net.IPNet{}, []string{"test"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.True(t, v) assert.Nil(t, err) } func TestNebulaCertificate_Verify_Subnets(t *testing.T) { _, caIp1, _ := net.ParseCIDR("10.0.0.0/16") _, caIp2, _ := net.ParseCIDR("192.168.0.0/24") ca, _, caKey, err := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{caIp1, caIp2}, []string{"test"}) assert.Nil(t, err) caPem, err := ca.MarshalToPEM() assert.Nil(t, err) caPool := NewCAPool() caPool.AddCACertificate(caPem) // ip is outside the network cIp1 := &net.IPNet{IP: net.ParseIP("10.1.0.0"), Mask: []byte{255, 255, 255, 0}} cIp2 := &net.IPNet{IP: net.ParseIP("192.168.0.1"), Mask: []byte{255, 255, 0, 0}} c, _, _, err := newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{cIp1, cIp2}, []string{"test"}) assert.Nil(t, err) v, err := c.Verify(time.Now(), caPool) assert.False(t, v) assert.EqualError(t, err, "certificate contained a subnet assignment outside the limitations of the signing ca: 10.1.0.0/24") // ip is outside the network reversed order of above cIp1 = &net.IPNet{IP: net.ParseIP("192.168.0.1"), Mask: []byte{255, 255, 255, 0}} cIp2 = &net.IPNet{IP: net.ParseIP("10.1.0.0"), Mask: []byte{255, 255, 255, 0}} c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{cIp1, cIp2}, []string{"test"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.False(t, v) assert.EqualError(t, err, "certificate contained a subnet assignment outside the limitations of the signing ca: 10.1.0.0/24") // ip is within the network but mask is outside cIp1 = &net.IPNet{IP: net.ParseIP("10.0.1.0"), Mask: []byte{255, 254, 0, 0}} cIp2 = &net.IPNet{IP: net.ParseIP("192.168.0.1"), Mask: []byte{255, 255, 255, 0}} c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{cIp1, cIp2}, []string{"test"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.False(t, v) assert.EqualError(t, err, "certificate contained a subnet assignment outside the limitations of the signing ca: 10.0.1.0/15") // ip is within the network but mask is outside reversed order of above cIp1 = &net.IPNet{IP: net.ParseIP("192.168.0.1"), Mask: []byte{255, 255, 255, 0}} cIp2 = &net.IPNet{IP: net.ParseIP("10.0.1.0"), Mask: []byte{255, 254, 0, 0}} c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{cIp1, cIp2}, []string{"test"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.False(t, v) assert.EqualError(t, err, "certificate contained a subnet assignment outside the limitations of the signing ca: 10.0.1.0/15") // ip and mask are within the network cIp1 = &net.IPNet{IP: net.ParseIP("10.0.1.0"), Mask: []byte{255, 255, 0, 0}} cIp2 = &net.IPNet{IP: net.ParseIP("192.168.0.1"), Mask: []byte{255, 255, 255, 128}} c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{cIp1, cIp2}, []string{"test"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.True(t, v) assert.Nil(t, err) // Exact matches c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{caIp1, caIp2}, []string{"test"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.True(t, v) assert.Nil(t, err) // Exact matches reversed c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{caIp2, caIp1}, []string{"test"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.True(t, v) assert.Nil(t, err) // Exact matches reversed with just 1 c, _, _, err = newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{caIp1}, []string{"test"}) assert.Nil(t, err) v, err = c.Verify(time.Now(), caPool) assert.True(t, v) assert.Nil(t, err) } func TestNebulaCertificate_VerifyPrivateKey(t *testing.T) { ca, _, caKey, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{}) assert.Nil(t, err) err = ca.VerifyPrivateKey(caKey) assert.Nil(t, err) _, _, caKey2, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{}) assert.Nil(t, err) err = ca.VerifyPrivateKey(caKey2) assert.NotNil(t, err) c, _, priv, err := newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{}) err = c.VerifyPrivateKey(priv) assert.Nil(t, err) _, priv2 := x25519Keypair() err = c.VerifyPrivateKey(priv2) assert.NotNil(t, err) } func TestNewCAPoolFromBytes(t *testing.T) { noNewLines := ` # Current provisional, Remove once everything moves over to the real root. -----BEGIN NEBULA CERTIFICATE----- CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB -----END NEBULA CERTIFICATE----- # root-ca01 -----BEGIN NEBULA CERTIFICATE----- CkMKEW5lYnVsYSByb290IGNhIDAxKJL2u9EFMJL86+cGOiDPXMH4oU6HZTk/CqTG BVG+oJpAoqokUBbI4U0N8CSfpUABEkB/Pm5A2xyH/nc8mg/wvGUWG3pZ7nHzaDMf 8/phAUt+FLzqTECzQKisYswKvE3pl9mbEYKbOdIHrxdIp95mo4sF -----END NEBULA CERTIFICATE----- ` withNewLines := ` # Current provisional, Remove once everything moves over to the real root. -----BEGIN NEBULA CERTIFICATE----- CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB -----END NEBULA CERTIFICATE----- # root-ca01 -----BEGIN NEBULA CERTIFICATE----- CkMKEW5lYnVsYSByb290IGNhIDAxKJL2u9EFMJL86+cGOiDPXMH4oU6HZTk/CqTG BVG+oJpAoqokUBbI4U0N8CSfpUABEkB/Pm5A2xyH/nc8mg/wvGUWG3pZ7nHzaDMf 8/phAUt+FLzqTECzQKisYswKvE3pl9mbEYKbOdIHrxdIp95mo4sF -----END NEBULA CERTIFICATE----- ` expired := ` # expired certificate -----BEGIN NEBULA CERTIFICATE----- CjkKB2V4cGlyZWQouPmWjQYwufmWjQY6ILCRaoCkJlqHgv5jfDN4lzLHBvDzaQm4 vZxfu144hmgjQAESQG4qlnZi8DncvD/LDZnLgJHOaX1DWCHHEh59epVsC+BNgTie WH1M9n4O7cFtGlM6sJJOS+rCVVEJ3ABS7+MPdQs= -----END NEBULA CERTIFICATE----- ` rootCA := NebulaCertificate{ Details: NebulaCertificateDetails{ Name: "nebula root ca", }, } rootCA01 := NebulaCertificate{ Details: NebulaCertificateDetails{ Name: "nebula root ca 01", }, } p, err := NewCAPoolFromBytes([]byte(noNewLines)) assert.Nil(t, err) assert.Equal(t, p.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name) assert.Equal(t, p.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name) pp, err := NewCAPoolFromBytes([]byte(withNewLines)) assert.Nil(t, err) assert.Equal(t, pp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name) assert.Equal(t, pp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name) // expired cert, no valid certs ppp, err := NewCAPoolFromBytes([]byte(expired)) assert.Equal(t, ErrExpired, err) assert.Equal(t, ppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired") // expired cert, with valid certs pppp, err := NewCAPoolFromBytes(append([]byte(expired), noNewLines...)) assert.Equal(t, ErrExpired, err) assert.Equal(t, pppp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name) assert.Equal(t, pppp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name) assert.Equal(t, pppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired") assert.Equal(t, len(pppp.CAs), 3) } func appendByteSlices(b ...[]byte) []byte { retSlice := []byte{} for _, v := range b { retSlice = append(retSlice, v...) } return retSlice } func TestUnmrshalCertPEM(t *testing.T) { goodCert := []byte(` # A good cert -----BEGIN NEBULA CERTIFICATE----- CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB -----END NEBULA CERTIFICATE----- `) badBanner := []byte(`# A bad banner -----BEGIN NOT A NEBULA CERTIFICATE----- CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB -----END NOT A NEBULA CERTIFICATE----- `) invalidPem := []byte(`# Not a valid PEM format -BEGIN NEBULA CERTIFICATE----- CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB -END NEBULA CERTIFICATE----`) certBundle := appendByteSlices(goodCert, badBanner, invalidPem) // Success test case cert, rest, err := UnmarshalNebulaCertificateFromPEM(certBundle) assert.NotNil(t, cert) assert.Equal(t, rest, append(badBanner, invalidPem...)) assert.Nil(t, err) // Fail due to invalid banner. cert, rest, err = UnmarshalNebulaCertificateFromPEM(rest) assert.Nil(t, cert) assert.Equal(t, rest, invalidPem) assert.EqualError(t, err, "bytes did not contain a proper nebula certificate banner") // Fail due to ivalid PEM format, because // it's missing the requisite pre-encapsulation boundary. cert, rest, err = UnmarshalNebulaCertificateFromPEM(rest) assert.Nil(t, cert) assert.Equal(t, rest, invalidPem) assert.EqualError(t, err, "input did not contain a valid PEM encoded block") } func TestUnmarshalEd25519PrivateKey(t *testing.T) { privKey := []byte(`# A good key -----BEGIN NEBULA ED25519 PRIVATE KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA== -----END NEBULA ED25519 PRIVATE KEY----- `) shortKey := []byte(`# A short key -----BEGIN NEBULA ED25519 PRIVATE KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -----END NEBULA ED25519 PRIVATE KEY----- `) invalidBanner := []byte(`# Invalid banner -----BEGIN NOT A NEBULA PRIVATE KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA== -----END NOT A NEBULA PRIVATE KEY----- `) invalidPem := []byte(`# Not a valid PEM format -BEGIN NEBULA ED25519 PRIVATE KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA== -END NEBULA ED25519 PRIVATE KEY-----`) keyBundle := appendByteSlices(privKey, shortKey, invalidBanner, invalidPem) // Success test case k, rest, err := UnmarshalEd25519PrivateKey(keyBundle) assert.Len(t, k, 64) assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem)) assert.Nil(t, err) // Fail due to short key k, rest, err = UnmarshalEd25519PrivateKey(rest) assert.Nil(t, k) assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem)) assert.EqualError(t, err, "key was not 64 bytes, is invalid ed25519 private key") // Fail due to invalid banner k, rest, err = UnmarshalEd25519PrivateKey(rest) assert.Nil(t, k) assert.Equal(t, rest, invalidPem) assert.EqualError(t, err, "bytes did not contain a proper nebula Ed25519 private key banner") // Fail due to ivalid PEM format, because // it's missing the requisite pre-encapsulation boundary. k, rest, err = UnmarshalEd25519PrivateKey(rest) assert.Nil(t, k) assert.Equal(t, rest, invalidPem) assert.EqualError(t, err, "input did not contain a valid PEM encoded block") } func TestUnmarshalX25519PrivateKey(t *testing.T) { privKey := []byte(`# A good key -----BEGIN NEBULA X25519 PRIVATE KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= -----END NEBULA X25519 PRIVATE KEY----- `) shortKey := []byte(`# A short key -----BEGIN NEBULA X25519 PRIVATE KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA== -----END NEBULA X25519 PRIVATE KEY----- `) invalidBanner := []byte(`# Invalid banner -----BEGIN NOT A NEBULA PRIVATE KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= -----END NOT A NEBULA PRIVATE KEY----- `) invalidPem := []byte(`# Not a valid PEM format -BEGIN NEBULA X25519 PRIVATE KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= -END NEBULA X25519 PRIVATE KEY-----`) keyBundle := appendByteSlices(privKey, shortKey, invalidBanner, invalidPem) // Success test case k, rest, err := UnmarshalX25519PrivateKey(keyBundle) assert.Len(t, k, 32) assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem)) assert.Nil(t, err) // Fail due to short key k, rest, err = UnmarshalX25519PrivateKey(rest) assert.Nil(t, k) assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem)) assert.EqualError(t, err, "key was not 32 bytes, is invalid X25519 private key") // Fail due to invalid banner k, rest, err = UnmarshalX25519PrivateKey(rest) assert.Nil(t, k) assert.Equal(t, rest, invalidPem) assert.EqualError(t, err, "bytes did not contain a proper nebula X25519 private key banner") // Fail due to ivalid PEM format, because // it's missing the requisite pre-encapsulation boundary. k, rest, err = UnmarshalX25519PrivateKey(rest) assert.Nil(t, k) assert.Equal(t, rest, invalidPem) assert.EqualError(t, err, "input did not contain a valid PEM encoded block") } func TestUnmarshalEd25519PublicKey(t *testing.T) { pubKey := []byte(`# A good key -----BEGIN NEBULA ED25519 PUBLIC KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= -----END NEBULA ED25519 PUBLIC KEY----- `) shortKey := []byte(`# A short key -----BEGIN NEBULA ED25519 PUBLIC KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA== -----END NEBULA ED25519 PUBLIC KEY----- `) invalidBanner := []byte(`# Invalid banner -----BEGIN NOT A NEBULA PUBLIC KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= -----END NOT A NEBULA PUBLIC KEY----- `) invalidPem := []byte(`# Not a valid PEM format -BEGIN NEBULA ED25519 PUBLIC KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= -END NEBULA ED25519 PUBLIC KEY-----`) keyBundle := appendByteSlices(pubKey, shortKey, invalidBanner, invalidPem) // Success test case k, rest, err := UnmarshalEd25519PublicKey(keyBundle) assert.Equal(t, len(k), 32) assert.Nil(t, err) assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem)) // Fail due to short key k, rest, err = UnmarshalEd25519PublicKey(rest) assert.Nil(t, k) assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem)) assert.EqualError(t, err, "key was not 32 bytes, is invalid ed25519 public key") // Fail due to invalid banner k, rest, err = UnmarshalEd25519PublicKey(rest) assert.Nil(t, k) assert.EqualError(t, err, "bytes did not contain a proper nebula Ed25519 public key banner") assert.Equal(t, rest, invalidPem) // Fail due to ivalid PEM format, because // it's missing the requisite pre-encapsulation boundary. k, rest, err = UnmarshalEd25519PublicKey(rest) assert.Nil(t, k) assert.Equal(t, rest, invalidPem) assert.EqualError(t, err, "input did not contain a valid PEM encoded block") } func TestUnmarshalX25519PublicKey(t *testing.T) { pubKey := []byte(`# A good key -----BEGIN NEBULA X25519 PUBLIC KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= -----END NEBULA X25519 PUBLIC KEY----- `) shortKey := []byte(`# A short key -----BEGIN NEBULA X25519 PUBLIC KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA== -----END NEBULA X25519 PUBLIC KEY----- `) invalidBanner := []byte(`# Invalid banner -----BEGIN NOT A NEBULA PUBLIC KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= -----END NOT A NEBULA PUBLIC KEY----- `) invalidPem := []byte(`# Not a valid PEM format -BEGIN NEBULA X25519 PUBLIC KEY----- AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= -END NEBULA X25519 PUBLIC KEY-----`) keyBundle := appendByteSlices(pubKey, shortKey, invalidBanner, invalidPem) // Success test case k, rest, err := UnmarshalX25519PublicKey(keyBundle) assert.Equal(t, len(k), 32) assert.Nil(t, err) assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem)) // Fail due to short key k, rest, err = UnmarshalX25519PublicKey(rest) assert.Nil(t, k) assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem)) assert.EqualError(t, err, "key was not 32 bytes, is invalid X25519 public key") // Fail due to invalid banner k, rest, err = UnmarshalX25519PublicKey(rest) assert.Nil(t, k) assert.EqualError(t, err, "bytes did not contain a proper nebula X25519 public key banner") assert.Equal(t, rest, invalidPem) // Fail due to ivalid PEM format, because // it's missing the requisite pre-encapsulation boundary. k, rest, err = UnmarshalX25519PublicKey(rest) assert.Nil(t, k) assert.Equal(t, rest, invalidPem) assert.EqualError(t, err, "input did not contain a valid PEM encoded block") } // Ensure that upgrading the protobuf library does not change how certificates // are marshalled, since this would break signature verification func TestMarshalingNebulaCertificateConsistency(t *testing.T) { before := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) after := time.Date(2017, time.January, 18, 28, 40, 0, 0, time.UTC) pubKey := []byte("1234567890abcedfghij1234567890ab") nc := NebulaCertificate{ Details: NebulaCertificateDetails{ Name: "testing", Ips: []*net.IPNet{ {IP: net.ParseIP("10.1.1.1"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))}, {IP: net.ParseIP("10.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))}, {IP: net.ParseIP("10.1.1.3"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))}, }, Subnets: []*net.IPNet{ {IP: net.ParseIP("9.1.1.1"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))}, {IP: net.ParseIP("9.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))}, {IP: net.ParseIP("9.1.1.3"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))}, }, Groups: []string{"test-group1", "test-group2", "test-group3"}, NotBefore: before, NotAfter: after, PublicKey: pubKey, IsCA: false, Issuer: "1234567890abcedfghij1234567890ab", }, Signature: []byte("1234567890abcedfghij1234567890ab"), } b, err := nc.Marshal() assert.Nil(t, err) //t.Log("Cert size:", len(b)) assert.Equal(t, "0aa2010a0774657374696e67121b8182845080feffff0f828284508080fcff0f8382845080fe83f80f1a1b8182844880fe83f80f8282844880feffff0f838284488080fcff0f220b746573742d67726f757031220b746573742d67726f757032220b746573742d67726f75703328f0e0e7d70430a08681c4053a20313233343536373839306162636564666768696a3132333435363738393061624a081234567890abcedf1220313233343536373839306162636564666768696a313233343536373839306162", fmt.Sprintf("%x", b)) b, err = proto.Marshal(nc.getRawDetails()) assert.Nil(t, err) //t.Log("Raw cert size:", len(b)) assert.Equal(t, "0a0774657374696e67121b8182845080feffff0f828284508080fcff0f8382845080fe83f80f1a1b8182844880fe83f80f8282844880feffff0f838284488080fcff0f220b746573742d67726f757031220b746573742d67726f757032220b746573742d67726f75703328f0e0e7d70430a08681c4053a20313233343536373839306162636564666768696a3132333435363738393061624a081234567890abcedf", fmt.Sprintf("%x", b)) } func TestNebulaCertificate_Copy(t *testing.T) { ca, _, caKey, err := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) assert.Nil(t, err) c, _, _, err := newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) assert.Nil(t, err) cc := c.Copy() test.AssertDeepCopyEqual(t, c, cc) } func TestUnmarshalNebulaCertificate(t *testing.T) { // Test that we don't panic with an invalid certificate (#332) data := []byte("\x98\x00\x00") _, err := UnmarshalNebulaCertificate(data) assert.EqualError(t, err, "encoded Details was nil") } func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) { pub, priv, err := ed25519.GenerateKey(rand.Reader) if before.IsZero() { before = time.Now().Add(time.Second * -60).Round(time.Second) } if after.IsZero() { after = time.Now().Add(time.Second * 60).Round(time.Second) } nc := &NebulaCertificate{ Details: NebulaCertificateDetails{ Name: "test ca", NotBefore: time.Unix(before.Unix(), 0), NotAfter: time.Unix(after.Unix(), 0), PublicKey: pub, IsCA: true, InvertedGroups: make(map[string]struct{}), }, } if len(ips) > 0 { nc.Details.Ips = ips } if len(subnets) > 0 { nc.Details.Subnets = subnets } if len(groups) > 0 { nc.Details.Groups = groups } err = nc.Sign(priv) if err != nil { return nil, nil, nil, err } return nc, pub, priv, nil } func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) { issuer, err := ca.Sha256Sum() if err != nil { return nil, nil, nil, err } if before.IsZero() { before = time.Now().Add(time.Second * -60).Round(time.Second) } if after.IsZero() { after = time.Now().Add(time.Second * 60).Round(time.Second) } if len(groups) == 0 { groups = []string{"test-group1", "test-group2", "test-group3"} } if len(ips) == 0 { ips = []*net.IPNet{ {IP: net.ParseIP("10.1.1.1").To4(), Mask: net.IPMask(net.ParseIP("255.255.255.0").To4())}, {IP: net.ParseIP("10.1.1.2").To4(), Mask: net.IPMask(net.ParseIP("255.255.0.0").To4())}, {IP: net.ParseIP("10.1.1.3").To4(), Mask: net.IPMask(net.ParseIP("255.0.255.0").To4())}, } } if len(subnets) == 0 { subnets = []*net.IPNet{ {IP: net.ParseIP("9.1.1.1").To4(), Mask: net.IPMask(net.ParseIP("255.0.255.0").To4())}, {IP: net.ParseIP("9.1.1.2").To4(), Mask: net.IPMask(net.ParseIP("255.255.255.0").To4())}, {IP: net.ParseIP("9.1.1.3").To4(), Mask: net.IPMask(net.ParseIP("255.255.0.0").To4())}, } } pub, rawPriv := x25519Keypair() nc := &NebulaCertificate{ Details: NebulaCertificateDetails{ Name: "testing", Ips: ips, Subnets: subnets, Groups: groups, NotBefore: time.Unix(before.Unix(), 0), NotAfter: time.Unix(after.Unix(), 0), PublicKey: pub, IsCA: false, Issuer: issuer, InvertedGroups: make(map[string]struct{}), }, } err = nc.Sign(key) if err != nil { return nil, nil, nil, err } return nc, pub, rawPriv, nil } func x25519Keypair() ([]byte, []byte) { privkey := make([]byte, 32) if _, err := io.ReadFull(rand.Reader, privkey); err != nil { panic(err) } pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint) if err != nil { panic(err) } return pubkey, privkey } nebula-1.6.1+dfsg/cert/errors.go000066400000000000000000000003331434072716400165130ustar00rootroot00000000000000package cert import "errors" var ( ErrExpired = errors.New("certificate is expired") ErrNotCA = errors.New("certificate is not a CA") ErrNotSelfSigned = errors.New("certificate is not self-signed") ) nebula-1.6.1+dfsg/cidr/000077500000000000000000000000001434072716400146355ustar00rootroot00000000000000nebula-1.6.1+dfsg/cidr/parse.go000066400000000000000000000004031434072716400162730ustar00rootroot00000000000000package cidr import "net" // Parse is a convenience function that returns only the IPNet // This function ignores errors since it is primarily a test helper, the result could be nil func Parse(s string) *net.IPNet { _, c, _ := net.ParseCIDR(s) return c } nebula-1.6.1+dfsg/cidr/tree4.go000066400000000000000000000041221434072716400162060ustar00rootroot00000000000000package cidr import ( "net" "github.com/slackhq/nebula/iputil" ) type Node struct { left *Node right *Node parent *Node value interface{} } type Tree4 struct { root *Node } const ( startbit = iputil.VpnIp(0x80000000) ) func NewTree4() *Tree4 { tree := new(Tree4) tree.root = &Node{} return tree } func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) { bit := startbit node := tree.root next := tree.root ip := iputil.Ip2VpnIp(cidr.IP) mask := iputil.Ip2VpnIp(cidr.Mask) // Find our last ancestor in the tree for bit&mask != 0 { if ip&bit != 0 { next = node.right } else { next = node.left } if next == nil { break } bit = bit >> 1 node = next } // We already have this range so update the value if next != nil { node.value = val return } // Build up the rest of the tree we don't already have for bit&mask != 0 { next = &Node{} next.parent = node if ip&bit != 0 { node.right = next } else { node.left = next } bit >>= 1 node = next } // Final node marks our cidr, set the value node.value = val } // Finds the first match, which may be the least specific func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) { bit := startbit node := tree.root for node != nil { if node.value != nil { return node.value } if ip&bit != 0 { node = node.right } else { node = node.left } bit >>= 1 } return value } // Finds the most specific match func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) { bit := startbit node := tree.root for node != nil { if node.value != nil { value = node.value } if ip&bit != 0 { node = node.right } else { node = node.left } bit >>= 1 } return value } // Finds the most specific match func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) { bit := startbit node := tree.root lastNode := node for node != nil { lastNode = node if ip&bit != 0 { node = node.right } else { node = node.left } bit >>= 1 } if bit == 0 && lastNode != nil { value = lastNode.value } return value } nebula-1.6.1+dfsg/cidr/tree4_test.go000066400000000000000000000074511434072716400172550ustar00rootroot00000000000000package cidr import ( "net" "testing" "github.com/slackhq/nebula/iputil" "github.com/stretchr/testify/assert" ) func TestCIDRTree_Contains(t *testing.T) { tree := NewTree4() tree.AddCIDR(Parse("1.0.0.0/8"), "1") tree.AddCIDR(Parse("2.1.0.0/16"), "2") tree.AddCIDR(Parse("3.1.1.0/24"), "3") tree.AddCIDR(Parse("4.1.1.0/24"), "4a") tree.AddCIDR(Parse("4.1.1.1/32"), "4b") tree.AddCIDR(Parse("4.1.2.1/32"), "4c") tree.AddCIDR(Parse("254.0.0.0/4"), "5") tests := []struct { Result interface{} IP string }{ {"1", "1.0.0.0"}, {"1", "1.255.255.255"}, {"2", "2.1.0.0"}, {"2", "2.1.255.255"}, {"3", "3.1.1.0"}, {"3", "3.1.1.255"}, {"4a", "4.1.1.255"}, {"4a", "4.1.1.1"}, {"5", "240.0.0.0"}, {"5", "255.255.255.255"}, {nil, "239.0.0.0"}, {nil, "4.1.2.2"}, } for _, tt := range tests { assert.Equal(t, tt.Result, tree.Contains(iputil.Ip2VpnIp(net.ParseIP(tt.IP)))) } tree = NewTree4() tree.AddCIDR(Parse("1.1.1.1/0"), "cool") assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0")))) assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255")))) } func TestCIDRTree_MostSpecificContains(t *testing.T) { tree := NewTree4() tree.AddCIDR(Parse("1.0.0.0/8"), "1") tree.AddCIDR(Parse("2.1.0.0/16"), "2") tree.AddCIDR(Parse("3.1.1.0/24"), "3") tree.AddCIDR(Parse("4.1.1.0/24"), "4a") tree.AddCIDR(Parse("4.1.1.0/30"), "4b") tree.AddCIDR(Parse("4.1.1.1/32"), "4c") tree.AddCIDR(Parse("254.0.0.0/4"), "5") tests := []struct { Result interface{} IP string }{ {"1", "1.0.0.0"}, {"1", "1.255.255.255"}, {"2", "2.1.0.0"}, {"2", "2.1.255.255"}, {"3", "3.1.1.0"}, {"3", "3.1.1.255"}, {"4a", "4.1.1.255"}, {"4b", "4.1.1.2"}, {"4c", "4.1.1.1"}, {"5", "240.0.0.0"}, {"5", "255.255.255.255"}, {nil, "239.0.0.0"}, {nil, "4.1.2.2"}, } for _, tt := range tests { assert.Equal(t, tt.Result, tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP(tt.IP)))) } tree = NewTree4() tree.AddCIDR(Parse("1.1.1.1/0"), "cool") assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0")))) assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255")))) } func TestCIDRTree_Match(t *testing.T) { tree := NewTree4() tree.AddCIDR(Parse("4.1.1.0/32"), "1a") tree.AddCIDR(Parse("4.1.1.1/32"), "1b") tests := []struct { Result interface{} IP string }{ {"1a", "4.1.1.0"}, {"1b", "4.1.1.1"}, } for _, tt := range tests { assert.Equal(t, tt.Result, tree.Match(iputil.Ip2VpnIp(net.ParseIP(tt.IP)))) } tree = NewTree4() tree.AddCIDR(Parse("1.1.1.1/0"), "cool") assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0")))) assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255")))) } func BenchmarkCIDRTree_Contains(b *testing.B) { tree := NewTree4() tree.AddCIDR(Parse("1.1.0.0/16"), "1") tree.AddCIDR(Parse("1.2.1.1/32"), "1") tree.AddCIDR(Parse("192.2.1.1/32"), "1") tree.AddCIDR(Parse("172.2.1.1/32"), "1") ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1")) b.Run("found", func(b *testing.B) { for i := 0; i < b.N; i++ { tree.Contains(ip) } }) ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255")) b.Run("not found", func(b *testing.B) { for i := 0; i < b.N; i++ { tree.Contains(ip) } }) } func BenchmarkCIDRTree_Match(b *testing.B) { tree := NewTree4() tree.AddCIDR(Parse("1.1.0.0/16"), "1") tree.AddCIDR(Parse("1.2.1.1/32"), "1") tree.AddCIDR(Parse("192.2.1.1/32"), "1") tree.AddCIDR(Parse("172.2.1.1/32"), "1") ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1")) b.Run("found", func(b *testing.B) { for i := 0; i < b.N; i++ { tree.Match(ip) } }) ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255")) b.Run("not found", func(b *testing.B) { for i := 0; i < b.N; i++ { tree.Match(ip) } }) } nebula-1.6.1+dfsg/cidr/tree6.go000066400000000000000000000052561434072716400162210ustar00rootroot00000000000000package cidr import ( "net" "github.com/slackhq/nebula/iputil" ) const startbit6 = uint64(1 << 63) type Tree6 struct { root4 *Node root6 *Node } func NewTree6() *Tree6 { tree := new(Tree6) tree.root4 = &Node{} tree.root6 = &Node{} return tree } func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) { var node, next *Node cidrIP, ipv4 := isIPV4(cidr.IP) if ipv4 { node = tree.root4 next = tree.root4 } else { node = tree.root6 next = tree.root6 } for i := 0; i < len(cidrIP); i += 4 { ip := iputil.Ip2VpnIp(cidrIP[i : i+4]) mask := iputil.Ip2VpnIp(cidr.Mask[i : i+4]) bit := startbit // Find our last ancestor in the tree for bit&mask != 0 { if ip&bit != 0 { next = node.right } else { next = node.left } if next == nil { break } bit = bit >> 1 node = next } // Build up the rest of the tree we don't already have for bit&mask != 0 { next = &Node{} next.parent = node if ip&bit != 0 { node.right = next } else { node.left = next } bit >>= 1 node = next } } // Final node marks our cidr, set the value node.value = val } // Finds the most specific match func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) { var node *Node wholeIP, ipv4 := isIPV4(ip) if ipv4 { node = tree.root4 } else { node = tree.root6 } for i := 0; i < len(wholeIP); i += 4 { ip := iputil.Ip2VpnIp(wholeIP[i : i+4]) bit := startbit for node != nil { if node.value != nil { value = node.value } if bit == 0 { break } if ip&bit != 0 { node = node.right } else { node = node.left } bit >>= 1 } } return value } func (tree *Tree6) MostSpecificContainsIpV4(ip iputil.VpnIp) (value interface{}) { bit := startbit node := tree.root4 for node != nil { if node.value != nil { value = node.value } if ip&bit != 0 { node = node.right } else { node = node.left } bit >>= 1 } return value } func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) { ip := hi node := tree.root6 for i := 0; i < 2; i++ { bit := startbit6 for node != nil { if node.value != nil { value = node.value } if bit == 0 { break } if ip&bit != 0 { node = node.right } else { node = node.left } bit >>= 1 } ip = lo } return value } func isIPV4(ip net.IP) (net.IP, bool) { if len(ip) == net.IPv4len { return ip, true } if len(ip) == net.IPv6len && isZeros(ip[0:10]) && ip[10] == 0xff && ip[11] == 0xff { return ip[12:16], true } return ip, false } func isZeros(p net.IP) bool { for i := 0; i < len(p); i++ { if p[i] != 0 { return false } } return true } nebula-1.6.1+dfsg/cidr/tree6_test.go000066400000000000000000000041701434072716400172520ustar00rootroot00000000000000package cidr import ( "encoding/binary" "net" "testing" "github.com/stretchr/testify/assert" ) func TestCIDR6Tree_MostSpecificContains(t *testing.T) { tree := NewTree6() tree.AddCIDR(Parse("1.0.0.0/8"), "1") tree.AddCIDR(Parse("2.1.0.0/16"), "2") tree.AddCIDR(Parse("3.1.1.0/24"), "3") tree.AddCIDR(Parse("4.1.1.1/24"), "4a") tree.AddCIDR(Parse("4.1.1.1/30"), "4b") tree.AddCIDR(Parse("4.1.1.1/32"), "4c") tree.AddCIDR(Parse("254.0.0.0/4"), "5") tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a") tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b") tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c") tests := []struct { Result interface{} IP string }{ {"1", "1.0.0.0"}, {"1", "1.255.255.255"}, {"2", "2.1.0.0"}, {"2", "2.1.255.255"}, {"3", "3.1.1.0"}, {"3", "3.1.1.255"}, {"4a", "4.1.1.255"}, {"4b", "4.1.1.2"}, {"4c", "4.1.1.1"}, {"5", "240.0.0.0"}, {"5", "255.255.255.255"}, {"6a", "1:2:0:4:1:1:1:1"}, {"6b", "1:2:0:4:5:1:1:1"}, {"6c", "1:2:0:4:5:0:0:0"}, {nil, "239.0.0.0"}, {nil, "4.1.2.2"}, } for _, tt := range tests { assert.Equal(t, tt.Result, tree.MostSpecificContains(net.ParseIP(tt.IP))) } tree = NewTree6() tree.AddCIDR(Parse("1.1.1.1/0"), "cool") tree.AddCIDR(Parse("::/0"), "cool6") assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("0.0.0.0"))) assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("255.255.255.255"))) assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("::"))) assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("1:2:3:4:5:6:7:8"))) } func TestCIDR6Tree_MostSpecificContainsIpV6(t *testing.T) { tree := NewTree6() tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a") tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b") tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c") tests := []struct { Result interface{} IP string }{ {"6a", "1:2:0:4:1:1:1:1"}, {"6b", "1:2:0:4:5:1:1:1"}, {"6c", "1:2:0:4:5:0:0:0"}, } for _, tt := range tests { ip := net.ParseIP(tt.IP) hi := binary.BigEndian.Uint64(ip[:8]) lo := binary.BigEndian.Uint64(ip[8:]) assert.Equal(t, tt.Result, tree.MostSpecificContainsIpV6(hi, lo)) } } nebula-1.6.1+dfsg/cmd/000077500000000000000000000000001434072716400144575ustar00rootroot00000000000000nebula-1.6.1+dfsg/cmd/nebula-cert/000077500000000000000000000000001434072716400166605ustar00rootroot00000000000000nebula-1.6.1+dfsg/cmd/nebula-cert/ca.go000066400000000000000000000116721434072716400176010ustar00rootroot00000000000000package main import ( "crypto/rand" "flag" "fmt" "io" "io/ioutil" "net" "os" "strings" "time" "github.com/skip2/go-qrcode" "github.com/slackhq/nebula/cert" "golang.org/x/crypto/ed25519" ) type caFlags struct { set *flag.FlagSet name *string duration *time.Duration outKeyPath *string outCertPath *string outQRPath *string groups *string ips *string subnets *string } func newCaFlags() *caFlags { cf := caFlags{set: flag.NewFlagSet("ca", flag.ContinueOnError)} cf.set.Usage = func() {} cf.name = cf.set.String("name", "", "Required: name of the certificate authority") cf.duration = cf.set.Duration("duration", time.Duration(time.Hour*8760), "Optional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"") cf.outKeyPath = cf.set.String("out-key", "ca.key", "Optional: path to write the private key to") cf.outCertPath = cf.set.String("out-crt", "ca.crt", "Optional: path to write the certificate to") cf.outQRPath = cf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate") cf.groups = cf.set.String("groups", "", "Optional: comma separated list of groups. This will limit which groups subordinate certs can use") cf.ips = cf.set.String("ips", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use for ip addresses") cf.subnets = cf.set.String("subnets", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use in subnets") return &cf } func ca(args []string, out io.Writer, errOut io.Writer) error { cf := newCaFlags() err := cf.set.Parse(args) if err != nil { return err } if err := mustFlagString("name", cf.name); err != nil { return err } if err := mustFlagString("out-key", cf.outKeyPath); err != nil { return err } if err := mustFlagString("out-crt", cf.outCertPath); err != nil { return err } if *cf.duration <= 0 { return &helpError{"-duration must be greater than 0"} } var groups []string if *cf.groups != "" { for _, rg := range strings.Split(*cf.groups, ",") { g := strings.TrimSpace(rg) if g != "" { groups = append(groups, g) } } } var ips []*net.IPNet if *cf.ips != "" { for _, rs := range strings.Split(*cf.ips, ",") { rs := strings.Trim(rs, " ") if rs != "" { ip, ipNet, err := net.ParseCIDR(rs) if err != nil { return newHelpErrorf("invalid ip definition: %s", err) } if ip.To4() == nil { return newHelpErrorf("invalid ip definition: can only be ipv4, have %s", rs) } ipNet.IP = ip ips = append(ips, ipNet) } } } var subnets []*net.IPNet if *cf.subnets != "" { for _, rs := range strings.Split(*cf.subnets, ",") { rs := strings.Trim(rs, " ") if rs != "" { _, s, err := net.ParseCIDR(rs) if err != nil { return newHelpErrorf("invalid subnet definition: %s", err) } if s.IP.To4() == nil { return newHelpErrorf("invalid subnet definition: can only be ipv4, have %s", rs) } subnets = append(subnets, s) } } } pub, rawPriv, err := ed25519.GenerateKey(rand.Reader) if err != nil { return fmt.Errorf("error while generating ed25519 keys: %s", err) } nc := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: *cf.name, Groups: groups, Ips: ips, Subnets: subnets, NotBefore: time.Now(), NotAfter: time.Now().Add(*cf.duration), PublicKey: pub, IsCA: true, }, } if _, err := os.Stat(*cf.outKeyPath); err == nil { return fmt.Errorf("refusing to overwrite existing CA key: %s", *cf.outKeyPath) } if _, err := os.Stat(*cf.outCertPath); err == nil { return fmt.Errorf("refusing to overwrite existing CA cert: %s", *cf.outCertPath) } err = nc.Sign(rawPriv) if err != nil { return fmt.Errorf("error while signing: %s", err) } err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalEd25519PrivateKey(rawPriv), 0600) if err != nil { return fmt.Errorf("error while writing out-key: %s", err) } b, err := nc.MarshalToPEM() if err != nil { return fmt.Errorf("error while marshalling certificate: %s", err) } err = ioutil.WriteFile(*cf.outCertPath, b, 0600) if err != nil { return fmt.Errorf("error while writing out-crt: %s", err) } if *cf.outQRPath != "" { b, err = qrcode.Encode(string(b), qrcode.Medium, -5) if err != nil { return fmt.Errorf("error while generating qr code: %s", err) } err = ioutil.WriteFile(*cf.outQRPath, b, 0600) if err != nil { return fmt.Errorf("error while writing out-qr: %s", err) } } return nil } func caSummary() string { return "ca : create a self signed certificate authority" } func caHelp(out io.Writer) { cf := newCaFlags() out.Write([]byte("Usage of " + os.Args[0] + " " + caSummary() + "\n")) cf.set.SetOutput(out) cf.set.PrintDefaults() } nebula-1.6.1+dfsg/cmd/nebula-cert/ca_test.go000066400000000000000000000130431434072716400206320ustar00rootroot00000000000000//go:build !windows // +build !windows package main import ( "bytes" "io/ioutil" "os" "testing" "time" "github.com/slackhq/nebula/cert" "github.com/stretchr/testify/assert" ) //TODO: test file permissions func Test_caSummary(t *testing.T) { assert.Equal(t, "ca : create a self signed certificate authority", caSummary()) } func Test_caHelp(t *testing.T) { ob := &bytes.Buffer{} caHelp(ob) assert.Equal( t, "Usage of "+os.Args[0]+" ca : create a self signed certificate authority\n"+ " -duration duration\n"+ " \tOptional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\" (default 8760h0m0s)\n"+ " -groups string\n"+ " \tOptional: comma separated list of groups. This will limit which groups subordinate certs can use\n"+ " -ips string\n"+ " \tOptional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use for ip addresses\n"+ " -name string\n"+ " \tRequired: name of the certificate authority\n"+ " -out-crt string\n"+ " \tOptional: path to write the certificate to (default \"ca.crt\")\n"+ " -out-key string\n"+ " \tOptional: path to write the private key to (default \"ca.key\")\n"+ " -out-qr string\n"+ " \tOptional: output a qr code image (png) of the certificate\n"+ " -subnets string\n"+ " \tOptional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use in subnets\n", ob.String(), ) } func Test_ca(t *testing.T) { ob := &bytes.Buffer{} eb := &bytes.Buffer{} // required args assertHelpError(t, ca([]string{"-out-key", "nope", "-out-crt", "nope", "duration", "100m"}, ob, eb), "-name is required") assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) // ipv4 only ips assertHelpError(t, ca([]string{"-name", "ipv6", "-ips", "100::100/100"}, ob, eb), "invalid ip definition: can only be ipv4, have 100::100/100") assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) // ipv4 only subnets assertHelpError(t, ca([]string{"-name", "ipv6", "-subnets", "100::100/100"}, ob, eb), "invalid subnet definition: can only be ipv4, have 100::100/100") assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) // failed key write ob.Reset() eb.Reset() args := []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey"} assert.EqualError(t, ca(args, ob, eb), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) // create temp key file keyF, err := ioutil.TempFile("", "test.key") assert.Nil(t, err) os.Remove(keyF.Name()) // failed cert write ob.Reset() eb.Reset() args = []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name()} assert.EqualError(t, ca(args, ob, eb), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) // create temp cert file crtF, err := ioutil.TempFile("", "test.crt") assert.Nil(t, err) os.Remove(crtF.Name()) os.Remove(keyF.Name()) // test proper cert with removed empty groups and subnets ob.Reset() eb.Reset() args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()} assert.Nil(t, ca(args, ob, eb)) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) // read cert and key files rb, _ := ioutil.ReadFile(keyF.Name()) lKey, b, err := cert.UnmarshalEd25519PrivateKey(rb) assert.Len(t, b, 0) assert.Nil(t, err) assert.Len(t, lKey, 64) rb, _ = ioutil.ReadFile(crtF.Name()) lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb) assert.Len(t, b, 0) assert.Nil(t, err) assert.Equal(t, "test", lCrt.Details.Name) assert.Len(t, lCrt.Details.Ips, 0) assert.True(t, lCrt.Details.IsCA) assert.Equal(t, []string{"1", "2", "3", "4", "5"}, lCrt.Details.Groups) assert.Len(t, lCrt.Details.Subnets, 0) assert.Len(t, lCrt.Details.PublicKey, 32) assert.Equal(t, time.Duration(time.Minute*100), lCrt.Details.NotAfter.Sub(lCrt.Details.NotBefore)) assert.Equal(t, "", lCrt.Details.Issuer) assert.True(t, lCrt.CheckSignature(lCrt.Details.PublicKey)) // create valid cert/key for overwrite tests os.Remove(keyF.Name()) os.Remove(crtF.Name()) ob.Reset() eb.Reset() args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()} assert.Nil(t, ca(args, ob, eb)) // test that we won't overwrite existing certificate file ob.Reset() eb.Reset() args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()} assert.EqualError(t, ca(args, ob, eb), "refusing to overwrite existing CA key: "+keyF.Name()) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) // test that we won't overwrite existing key file os.Remove(keyF.Name()) ob.Reset() eb.Reset() args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()} assert.EqualError(t, ca(args, ob, eb), "refusing to overwrite existing CA cert: "+crtF.Name()) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) os.Remove(keyF.Name()) } nebula-1.6.1+dfsg/cmd/nebula-cert/keygen.go000066400000000000000000000030161434072716400204710ustar00rootroot00000000000000package main import ( "flag" "fmt" "io" "io/ioutil" "os" "github.com/slackhq/nebula/cert" ) type keygenFlags struct { set *flag.FlagSet outKeyPath *string outPubPath *string } func newKeygenFlags() *keygenFlags { cf := keygenFlags{set: flag.NewFlagSet("keygen", flag.ContinueOnError)} cf.set.Usage = func() {} cf.outPubPath = cf.set.String("out-pub", "", "Required: path to write the public key to") cf.outKeyPath = cf.set.String("out-key", "", "Required: path to write the private key to") return &cf } func keygen(args []string, out io.Writer, errOut io.Writer) error { cf := newKeygenFlags() err := cf.set.Parse(args) if err != nil { return err } if err := mustFlagString("out-key", cf.outKeyPath); err != nil { return err } if err := mustFlagString("out-pub", cf.outPubPath); err != nil { return err } pub, rawPriv := x25519Keypair() err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalX25519PrivateKey(rawPriv), 0600) if err != nil { return fmt.Errorf("error while writing out-key: %s", err) } err = ioutil.WriteFile(*cf.outPubPath, cert.MarshalX25519PublicKey(pub), 0600) if err != nil { return fmt.Errorf("error while writing out-pub: %s", err) } return nil } func keygenSummary() string { return "keygen : create a public/private key pair. the public key can be passed to `nebula-cert sign`" } func keygenHelp(out io.Writer) { cf := newKeygenFlags() out.Write([]byte("Usage of " + os.Args[0] + " " + keygenSummary() + "\n")) cf.set.SetOutput(out) cf.set.PrintDefaults() } nebula-1.6.1+dfsg/cmd/nebula-cert/keygen_test.go000066400000000000000000000051141434072716400215310ustar00rootroot00000000000000package main import ( "bytes" "io/ioutil" "os" "testing" "github.com/slackhq/nebula/cert" "github.com/stretchr/testify/assert" ) //TODO: test file permissions func Test_keygenSummary(t *testing.T) { assert.Equal(t, "keygen : create a public/private key pair. the public key can be passed to `nebula-cert sign`", keygenSummary()) } func Test_keygenHelp(t *testing.T) { ob := &bytes.Buffer{} keygenHelp(ob) assert.Equal( t, "Usage of "+os.Args[0]+" keygen : create a public/private key pair. the public key can be passed to `nebula-cert sign`\n"+ " -out-key string\n"+ " \tRequired: path to write the private key to\n"+ " -out-pub string\n"+ " \tRequired: path to write the public key to\n", ob.String(), ) } func Test_keygen(t *testing.T) { ob := &bytes.Buffer{} eb := &bytes.Buffer{} // required args assertHelpError(t, keygen([]string{"-out-pub", "nope"}, ob, eb), "-out-key is required") assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) assertHelpError(t, keygen([]string{"-out-key", "nope"}, ob, eb), "-out-pub is required") assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) // failed key write ob.Reset() eb.Reset() args := []string{"-out-pub", "/do/not/write/pleasepub", "-out-key", "/do/not/write/pleasekey"} assert.EqualError(t, keygen(args, ob, eb), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) // create temp key file keyF, err := ioutil.TempFile("", "test.key") assert.Nil(t, err) defer os.Remove(keyF.Name()) // failed pub write ob.Reset() eb.Reset() args = []string{"-out-pub", "/do/not/write/pleasepub", "-out-key", keyF.Name()} assert.EqualError(t, keygen(args, ob, eb), "error while writing out-pub: open /do/not/write/pleasepub: "+NoSuchDirError) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) // create temp pub file pubF, err := ioutil.TempFile("", "test.pub") assert.Nil(t, err) defer os.Remove(pubF.Name()) // test proper keygen ob.Reset() eb.Reset() args = []string{"-out-pub", pubF.Name(), "-out-key", keyF.Name()} assert.Nil(t, keygen(args, ob, eb)) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) // read cert and key files rb, _ := ioutil.ReadFile(keyF.Name()) lKey, b, err := cert.UnmarshalX25519PrivateKey(rb) assert.Len(t, b, 0) assert.Nil(t, err) assert.Len(t, lKey, 32) rb, _ = ioutil.ReadFile(pubF.Name()) lPub, b, err := cert.UnmarshalX25519PublicKey(rb) assert.Len(t, b, 0) assert.Nil(t, err) assert.Len(t, lPub, 32) } nebula-1.6.1+dfsg/cmd/nebula-cert/main.go000066400000000000000000000051161434072716400201360ustar00rootroot00000000000000package main import ( "flag" "fmt" "io" "os" ) var Build string type helpError struct { s string } func (he *helpError) Error() string { return he.s } func newHelpErrorf(s string, v ...interface{}) error { return &helpError{s: fmt.Sprintf(s, v...)} } func main() { flag.Usage = func() { help("", os.Stderr) os.Exit(1) } printVersion := flag.Bool("version", false, "Print version") flagHelp := flag.Bool("help", false, "Print command line usage") flagH := flag.Bool("h", false, "Print command line usage") printUsage := false flag.Parse() if *flagH || *flagHelp { printUsage = true } args := flag.Args() if *printVersion { fmt.Printf("Version: %v\n", Build) os.Exit(0) } if len(args) < 1 { if printUsage { help("", os.Stderr) os.Exit(0) } help("No mode was provided", os.Stderr) os.Exit(1) } else if printUsage { handleError(args[0], &helpError{}, os.Stderr) os.Exit(0) } var err error switch args[0] { case "ca": err = ca(args[1:], os.Stdout, os.Stderr) case "keygen": err = keygen(args[1:], os.Stdout, os.Stderr) case "sign": err = signCert(args[1:], os.Stdout, os.Stderr) case "print": err = printCert(args[1:], os.Stdout, os.Stderr) case "verify": err = verify(args[1:], os.Stdout, os.Stderr) default: err = fmt.Errorf("unknown mode: %s", args[0]) } if err != nil { os.Exit(handleError(args[0], err, os.Stderr)) } } func handleError(mode string, e error, out io.Writer) int { code := 1 // Handle -help, -h flags properly if e == flag.ErrHelp { code = 0 e = &helpError{} } else if e != nil && e.Error() != "" { fmt.Fprintln(out, "Error:", e) } switch e.(type) { case *helpError: switch mode { case "ca": caHelp(out) case "keygen": keygenHelp(out) case "sign": signHelp(out) case "print": printHelp(out) case "verify": verifyHelp(out) } } return code } func help(err string, out io.Writer) { if err != "" { fmt.Fprintln(out, "Error:", err) fmt.Fprintln(out, "") } fmt.Fprintf(out, "Usage of %s :\n", os.Args[0]) fmt.Fprintln(out, " Global flags:") fmt.Fprintln(out, " -version: Prints the version") fmt.Fprintln(out, " -h, -help: Prints this help message") fmt.Fprintln(out, "") fmt.Fprintln(out, " Modes:") fmt.Fprintln(out, " "+caSummary()) fmt.Fprintln(out, " "+keygenSummary()) fmt.Fprintln(out, " "+signSummary()) fmt.Fprintln(out, " "+printSummary()) fmt.Fprintln(out, " "+verifySummary()) } func mustFlagString(name string, val *string) error { if *val == "" { return newHelpErrorf("-%s is required", name) } return nil } nebula-1.6.1+dfsg/cmd/nebula-cert/main_test.go000066400000000000000000000032731434072716400211770ustar00rootroot00000000000000package main import ( "bytes" "errors" "io" "os" "testing" "github.com/stretchr/testify/assert" ) //TODO: all flag parsing continueOnError will print to stderr on its own currently func Test_help(t *testing.T) { expected := "Usage of " + os.Args[0] + " :\n" + " Global flags:\n" + " -version: Prints the version\n" + " -h, -help: Prints this help message\n\n" + " Modes:\n" + " " + caSummary() + "\n" + " " + keygenSummary() + "\n" + " " + signSummary() + "\n" + " " + printSummary() + "\n" + " " + verifySummary() + "\n" ob := &bytes.Buffer{} // No error test help("", ob) assert.Equal( t, expected, ob.String(), ) // Error test ob.Reset() help("test error", ob) assert.Equal( t, "Error: test error\n\n"+expected, ob.String(), ) } func Test_handleError(t *testing.T) { ob := &bytes.Buffer{} // normal error handleError("", errors.New("test error"), ob) assert.Equal(t, "Error: test error\n", ob.String()) // unknown mode help error ob.Reset() handleError("", newHelpErrorf("test %s", "error"), ob) assert.Equal(t, "Error: test error\n", ob.String()) // test all modes with help error modes := map[string]func(io.Writer){"ca": caHelp, "print": printHelp, "sign": signHelp, "verify": verifyHelp} eb := &bytes.Buffer{} for mode, fn := range modes { ob.Reset() eb.Reset() fn(eb) handleError(mode, newHelpErrorf("test %s", "error"), ob) assert.Equal(t, "Error: test error\n"+eb.String(), ob.String()) } } func assertHelpError(t *testing.T, err error, msg string) { switch err.(type) { case *helpError: // good default: t.Fatal("err was not a helpError") } assert.EqualError(t, err, msg) } nebula-1.6.1+dfsg/cmd/nebula-cert/print.go000066400000000000000000000044121434072716400203440ustar00rootroot00000000000000package main import ( "encoding/json" "flag" "fmt" "io" "io/ioutil" "os" "strings" "github.com/skip2/go-qrcode" "github.com/slackhq/nebula/cert" ) type printFlags struct { set *flag.FlagSet json *bool outQRPath *string path *string } func newPrintFlags() *printFlags { pf := printFlags{set: flag.NewFlagSet("print", flag.ContinueOnError)} pf.set.Usage = func() {} pf.json = pf.set.Bool("json", false, "Optional: outputs certificates in json format") pf.outQRPath = pf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate") pf.path = pf.set.String("path", "", "Required: path to the certificate") return &pf } func printCert(args []string, out io.Writer, errOut io.Writer) error { pf := newPrintFlags() err := pf.set.Parse(args) if err != nil { return err } if err := mustFlagString("path", pf.path); err != nil { return err } rawCert, err := ioutil.ReadFile(*pf.path) if err != nil { return fmt.Errorf("unable to read cert; %s", err) } var c *cert.NebulaCertificate var qrBytes []byte part := 0 for { c, rawCert, err = cert.UnmarshalNebulaCertificateFromPEM(rawCert) if err != nil { return fmt.Errorf("error while unmarshaling cert: %s", err) } if *pf.json { b, _ := json.Marshal(c) out.Write(b) out.Write([]byte("\n")) } else { out.Write([]byte(c.String())) out.Write([]byte("\n")) } if *pf.outQRPath != "" { b, err := c.MarshalToPEM() if err != nil { return fmt.Errorf("error while marshalling cert to PEM: %s", err) } qrBytes = append(qrBytes, b...) } if rawCert == nil || len(rawCert) == 0 || strings.TrimSpace(string(rawCert)) == "" { break } part++ } if *pf.outQRPath != "" { b, err := qrcode.Encode(string(qrBytes), qrcode.Medium, -5) if err != nil { return fmt.Errorf("error while generating qr code: %s", err) } err = ioutil.WriteFile(*pf.outQRPath, b, 0600) if err != nil { return fmt.Errorf("error while writing out-qr: %s", err) } } return nil } func printSummary() string { return "print : prints details about a certificate" } func printHelp(out io.Writer) { pf := newPrintFlags() out.Write([]byte("Usage of " + os.Args[0] + " " + printSummary() + "\n")) pf.set.SetOutput(out) pf.set.PrintDefaults() } nebula-1.6.1+dfsg/cmd/nebula-cert/print_test.go000066400000000000000000000133151434072716400214050ustar00rootroot00000000000000package main import ( "bytes" "io/ioutil" "os" "testing" "time" "github.com/slackhq/nebula/cert" "github.com/stretchr/testify/assert" ) func Test_printSummary(t *testing.T) { assert.Equal(t, "print : prints details about a certificate", printSummary()) } func Test_printHelp(t *testing.T) { ob := &bytes.Buffer{} printHelp(ob) assert.Equal( t, "Usage of "+os.Args[0]+" print : prints details about a certificate\n"+ " -json\n"+ " \tOptional: outputs certificates in json format\n"+ " -out-qr string\n"+ " \tOptional: output a qr code image (png) of the certificate\n"+ " -path string\n"+ " \tRequired: path to the certificate\n", ob.String(), ) } func Test_printCert(t *testing.T) { // Orient our local time and avoid headaches time.Local = time.UTC ob := &bytes.Buffer{} eb := &bytes.Buffer{} // no path err := printCert([]string{}, ob, eb) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) assertHelpError(t, err, "-path is required") // no cert at path ob.Reset() eb.Reset() err = printCert([]string{"-path", "does_not_exist"}, ob, eb) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) assert.EqualError(t, err, "unable to read cert; open does_not_exist: "+NoSuchFileError) // invalid cert at path ob.Reset() eb.Reset() tf, err := ioutil.TempFile("", "print-cert") assert.Nil(t, err) defer os.Remove(tf.Name()) tf.WriteString("-----BEGIN NOPE-----") err = printCert([]string{"-path", tf.Name()}, ob, eb) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) assert.EqualError(t, err, "error while unmarshaling cert: input did not contain a valid PEM encoded block") // test multiple certs ob.Reset() eb.Reset() tf.Truncate(0) tf.Seek(0, 0) c := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "test", Groups: []string{"hi"}, PublicKey: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2}, }, Signature: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2}, } p, _ := c.MarshalToPEM() tf.Write(p) tf.Write(p) tf.Write(p) err = printCert([]string{"-path", tf.Name()}, ob, eb) assert.Nil(t, err) assert.Equal( t, "NebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\n", ob.String(), ) assert.Equal(t, "", eb.String()) // test json ob.Reset() eb.Reset() tf.Truncate(0) tf.Seek(0, 0) c = cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "test", Groups: []string{"hi"}, PublicKey: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2}, }, Signature: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2}, } p, _ = c.MarshalToPEM() tf.Write(p) tf.Write(p) tf.Write(p) err = printCert([]string{"-json", "-path", tf.Name()}, ob, eb) assert.Nil(t, err) assert.Equal( t, "{\"details\":{\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n", ob.String(), ) assert.Equal(t, "", eb.String()) } nebula-1.6.1+dfsg/cmd/nebula-cert/sign.go000066400000000000000000000156441434072716400201610ustar00rootroot00000000000000package main import ( "crypto/rand" "flag" "fmt" "io" "io/ioutil" "net" "os" "strings" "time" "github.com/skip2/go-qrcode" "github.com/slackhq/nebula/cert" "golang.org/x/crypto/curve25519" ) type signFlags struct { set *flag.FlagSet caKeyPath *string caCertPath *string name *string ip *string duration *time.Duration inPubPath *string outKeyPath *string outCertPath *string outQRPath *string groups *string subnets *string } func newSignFlags() *signFlags { sf := signFlags{set: flag.NewFlagSet("sign", flag.ContinueOnError)} sf.set.Usage = func() {} sf.caKeyPath = sf.set.String("ca-key", "ca.key", "Optional: path to the signing CA key") sf.caCertPath = sf.set.String("ca-crt", "ca.crt", "Optional: path to the signing CA cert") sf.name = sf.set.String("name", "", "Required: name of the cert, usually a hostname") sf.ip = sf.set.String("ip", "", "Required: ipv4 address and network in CIDR notation to assign the cert") sf.duration = sf.set.Duration("duration", 0, "Optional: how long the cert should be valid for. The default is 1 second before the signing cert expires. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"") sf.inPubPath = sf.set.String("in-pub", "", "Optional (if out-key not set): path to read a previously generated public key") sf.outKeyPath = sf.set.String("out-key", "", "Optional (if in-pub not set): path to write the private key to") sf.outCertPath = sf.set.String("out-crt", "", "Optional: path to write the certificate to") sf.outQRPath = sf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate") sf.groups = sf.set.String("groups", "", "Optional: comma separated list of groups") sf.subnets = sf.set.String("subnets", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. Subnets this cert can serve for") return &sf } func signCert(args []string, out io.Writer, errOut io.Writer) error { sf := newSignFlags() err := sf.set.Parse(args) if err != nil { return err } if err := mustFlagString("ca-key", sf.caKeyPath); err != nil { return err } if err := mustFlagString("ca-crt", sf.caCertPath); err != nil { return err } if err := mustFlagString("name", sf.name); err != nil { return err } if err := mustFlagString("ip", sf.ip); err != nil { return err } if *sf.inPubPath != "" && *sf.outKeyPath != "" { return newHelpErrorf("cannot set both -in-pub and -out-key") } rawCAKey, err := ioutil.ReadFile(*sf.caKeyPath) if err != nil { return fmt.Errorf("error while reading ca-key: %s", err) } caKey, _, err := cert.UnmarshalEd25519PrivateKey(rawCAKey) if err != nil { return fmt.Errorf("error while parsing ca-key: %s", err) } rawCACert, err := ioutil.ReadFile(*sf.caCertPath) if err != nil { return fmt.Errorf("error while reading ca-crt: %s", err) } caCert, _, err := cert.UnmarshalNebulaCertificateFromPEM(rawCACert) if err != nil { return fmt.Errorf("error while parsing ca-crt: %s", err) } if err := caCert.VerifyPrivateKey(caKey); err != nil { return fmt.Errorf("refusing to sign, root certificate does not match private key") } issuer, err := caCert.Sha256Sum() if err != nil { return fmt.Errorf("error while getting -ca-crt fingerprint: %s", err) } if caCert.Expired(time.Now()) { return fmt.Errorf("ca certificate is expired") } // if no duration is given, expire one second before the root expires if *sf.duration <= 0 { *sf.duration = time.Until(caCert.Details.NotAfter) - time.Second*1 } ip, ipNet, err := net.ParseCIDR(*sf.ip) if err != nil { return newHelpErrorf("invalid ip definition: %s", err) } if ip.To4() == nil { return newHelpErrorf("invalid ip definition: can only be ipv4, have %s", *sf.ip) } ipNet.IP = ip groups := []string{} if *sf.groups != "" { for _, rg := range strings.Split(*sf.groups, ",") { g := strings.TrimSpace(rg) if g != "" { groups = append(groups, g) } } } subnets := []*net.IPNet{} if *sf.subnets != "" { for _, rs := range strings.Split(*sf.subnets, ",") { rs := strings.Trim(rs, " ") if rs != "" { _, s, err := net.ParseCIDR(rs) if err != nil { return newHelpErrorf("invalid subnet definition: %s", err) } if s.IP.To4() == nil { return newHelpErrorf("invalid subnet definition: can only be ipv4, have %s", rs) } subnets = append(subnets, s) } } } var pub, rawPriv []byte if *sf.inPubPath != "" { rawPub, err := ioutil.ReadFile(*sf.inPubPath) if err != nil { return fmt.Errorf("error while reading in-pub: %s", err) } pub, _, err = cert.UnmarshalX25519PublicKey(rawPub) if err != nil { return fmt.Errorf("error while parsing in-pub: %s", err) } } else { pub, rawPriv = x25519Keypair() } nc := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: *sf.name, Ips: []*net.IPNet{ipNet}, Groups: groups, Subnets: subnets, NotBefore: time.Now(), NotAfter: time.Now().Add(*sf.duration), PublicKey: pub, IsCA: false, Issuer: issuer, }, } if err := nc.CheckRootConstrains(caCert); err != nil { return fmt.Errorf("refusing to sign, root certificate constraints violated: %s", err) } if *sf.outKeyPath == "" { *sf.outKeyPath = *sf.name + ".key" } if *sf.outCertPath == "" { *sf.outCertPath = *sf.name + ".crt" } if _, err := os.Stat(*sf.outCertPath); err == nil { return fmt.Errorf("refusing to overwrite existing cert: %s", *sf.outCertPath) } err = nc.Sign(caKey) if err != nil { return fmt.Errorf("error while signing: %s", err) } if *sf.inPubPath == "" { if _, err := os.Stat(*sf.outKeyPath); err == nil { return fmt.Errorf("refusing to overwrite existing key: %s", *sf.outKeyPath) } err = ioutil.WriteFile(*sf.outKeyPath, cert.MarshalX25519PrivateKey(rawPriv), 0600) if err != nil { return fmt.Errorf("error while writing out-key: %s", err) } } b, err := nc.MarshalToPEM() if err != nil { return fmt.Errorf("error while marshalling certificate: %s", err) } err = ioutil.WriteFile(*sf.outCertPath, b, 0600) if err != nil { return fmt.Errorf("error while writing out-crt: %s", err) } if *sf.outQRPath != "" { b, err = qrcode.Encode(string(b), qrcode.Medium, -5) if err != nil { return fmt.Errorf("error while generating qr code: %s", err) } err = ioutil.WriteFile(*sf.outQRPath, b, 0600) if err != nil { return fmt.Errorf("error while writing out-qr: %s", err) } } return nil } func x25519Keypair() ([]byte, []byte) { privkey := make([]byte, 32) if _, err := io.ReadFull(rand.Reader, privkey); err != nil { panic(err) } pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint) if err != nil { panic(err) } return pubkey, privkey } func signSummary() string { return "sign : create and sign a certificate" } func signHelp(out io.Writer) { sf := newSignFlags() out.Write([]byte("Usage of " + os.Args[0] + " " + signSummary() + "\n")) sf.set.SetOutput(out) sf.set.PrintDefaults() } nebula-1.6.1+dfsg/cmd/nebula-cert/sign_test.go000066400000000000000000000330341434072716400212110ustar00rootroot00000000000000//go:build !windows // +build !windows package main import ( "bytes" "crypto/rand" "io/ioutil" "os" "testing" "time" "github.com/slackhq/nebula/cert" "github.com/stretchr/testify/assert" "golang.org/x/crypto/ed25519" ) //TODO: test file permissions func Test_signSummary(t *testing.T) { assert.Equal(t, "sign : create and sign a certificate", signSummary()) } func Test_signHelp(t *testing.T) { ob := &bytes.Buffer{} signHelp(ob) assert.Equal( t, "Usage of "+os.Args[0]+" sign : create and sign a certificate\n"+ " -ca-crt string\n"+ " \tOptional: path to the signing CA cert (default \"ca.crt\")\n"+ " -ca-key string\n"+ " \tOptional: path to the signing CA key (default \"ca.key\")\n"+ " -duration duration\n"+ " \tOptional: how long the cert should be valid for. The default is 1 second before the signing cert expires. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"\n"+ " -groups string\n"+ " \tOptional: comma separated list of groups\n"+ " -in-pub string\n"+ " \tOptional (if out-key not set): path to read a previously generated public key\n"+ " -ip string\n"+ " \tRequired: ipv4 address and network in CIDR notation to assign the cert\n"+ " -name string\n"+ " \tRequired: name of the cert, usually a hostname\n"+ " -out-crt string\n"+ " \tOptional: path to write the certificate to\n"+ " -out-key string\n"+ " \tOptional (if in-pub not set): path to write the private key to\n"+ " -out-qr string\n"+ " \tOptional: output a qr code image (png) of the certificate\n"+ " -subnets string\n"+ " \tOptional: comma separated list of ipv4 address and network in CIDR notation. Subnets this cert can serve for\n", ob.String(), ) } func Test_signCert(t *testing.T) { ob := &bytes.Buffer{} eb := &bytes.Buffer{} // required args assertHelpError(t, signCert([]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-ip", "1.1.1.1/24", "-out-key", "nope", "-out-crt", "nope"}, ob, eb), "-name is required") assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) assertHelpError(t, signCert([]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-out-key", "nope", "-out-crt", "nope"}, ob, eb), "-ip is required") assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) // cannot set -in-pub and -out-key assertHelpError(t, signCert([]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-in-pub", "nope", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope"}, ob, eb), "cannot set both -in-pub and -out-key") assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) // failed to read key ob.Reset() eb.Reset() args := []string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"} assert.EqualError(t, signCert(args, ob, eb), "error while reading ca-key: open ./nope: "+NoSuchFileError) // failed to unmarshal key ob.Reset() eb.Reset() caKeyF, err := ioutil.TempFile("", "sign-cert.key") assert.Nil(t, err) defer os.Remove(caKeyF.Name()) args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"} assert.EqualError(t, signCert(args, ob, eb), "error while parsing ca-key: input did not contain a valid PEM encoded block") assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) // Write a proper ca key for later ob.Reset() eb.Reset() caPub, caPriv, _ := ed25519.GenerateKey(rand.Reader) caKeyF.Write(cert.MarshalEd25519PrivateKey(caPriv)) // failed to read cert args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"} assert.EqualError(t, signCert(args, ob, eb), "error while reading ca-crt: open ./nope: "+NoSuchFileError) assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) // failed to unmarshal cert ob.Reset() eb.Reset() caCrtF, err := ioutil.TempFile("", "sign-cert.crt") assert.Nil(t, err) defer os.Remove(caCrtF.Name()) args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"} assert.EqualError(t, signCert(args, ob, eb), "error while parsing ca-crt: input did not contain a valid PEM encoded block") assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) // write a proper ca cert for later ca := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "ca", NotBefore: time.Now(), NotAfter: time.Now().Add(time.Minute * 200), PublicKey: caPub, IsCA: true, }, } b, _ := ca.MarshalToPEM() caCrtF.Write(b) // failed to read pub args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", "./nope", "-duration", "100m"} assert.EqualError(t, signCert(args, ob, eb), "error while reading in-pub: open ./nope: "+NoSuchFileError) assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) // failed to unmarshal pub ob.Reset() eb.Reset() inPubF, err := ioutil.TempFile("", "in.pub") assert.Nil(t, err) defer os.Remove(inPubF.Name()) args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", inPubF.Name(), "-duration", "100m"} assert.EqualError(t, signCert(args, ob, eb), "error while parsing in-pub: input did not contain a valid PEM encoded block") assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) // write a proper pub for later ob.Reset() eb.Reset() inPub, _ := x25519Keypair() inPubF.Write(cert.MarshalX25519PublicKey(inPub)) // bad ip cidr ob.Reset() eb.Reset() args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "a1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"} assertHelpError(t, signCert(args, ob, eb), "invalid ip definition: invalid CIDR address: a1.1.1.1/24") assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) ob.Reset() eb.Reset() args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "100::100/100", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"} assertHelpError(t, signCert(args, ob, eb), "invalid ip definition: can only be ipv4, have 100::100/100") assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) // bad subnet cidr ob.Reset() eb.Reset() args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"} assertHelpError(t, signCert(args, ob, eb), "invalid subnet definition: invalid CIDR address: a") assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) ob.Reset() eb.Reset() args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "100::100/100"} assertHelpError(t, signCert(args, ob, eb), "invalid subnet definition: can only be ipv4, have 100::100/100") assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) // mismatched ca key _, caPriv2, _ := ed25519.GenerateKey(rand.Reader) caKeyF2, err := ioutil.TempFile("", "sign-cert-2.key") assert.Nil(t, err) defer os.Remove(caKeyF2.Name()) caKeyF2.Write(cert.MarshalEd25519PrivateKey(caPriv2)) ob.Reset() eb.Reset() args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF2.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"} assert.EqualError(t, signCert(args, ob, eb), "refusing to sign, root certificate does not match private key") assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) // failed key write ob.Reset() eb.Reset() args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey", "-duration", "100m", "-subnets", "10.1.1.1/32"} assert.EqualError(t, signCert(args, ob, eb), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError) assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) // create temp key file keyF, err := ioutil.TempFile("", "test.key") assert.Nil(t, err) os.Remove(keyF.Name()) // failed cert write ob.Reset() eb.Reset() args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32"} assert.EqualError(t, signCert(args, ob, eb), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError) assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) os.Remove(keyF.Name()) // create temp cert file crtF, err := ioutil.TempFile("", "test.crt") assert.Nil(t, err) os.Remove(crtF.Name()) // test proper cert with removed empty groups and subnets ob.Reset() eb.Reset() args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"} assert.Nil(t, signCert(args, ob, eb)) assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) // read cert and key files rb, _ := ioutil.ReadFile(keyF.Name()) lKey, b, err := cert.UnmarshalX25519PrivateKey(rb) assert.Len(t, b, 0) assert.Nil(t, err) assert.Len(t, lKey, 32) rb, _ = ioutil.ReadFile(crtF.Name()) lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb) assert.Len(t, b, 0) assert.Nil(t, err) assert.Equal(t, "test", lCrt.Details.Name) assert.Equal(t, "1.1.1.1/24", lCrt.Details.Ips[0].String()) assert.Len(t, lCrt.Details.Ips, 1) assert.False(t, lCrt.Details.IsCA) assert.Equal(t, []string{"1", "2", "3", "4", "5"}, lCrt.Details.Groups) assert.Len(t, lCrt.Details.Subnets, 3) assert.Len(t, lCrt.Details.PublicKey, 32) assert.Equal(t, time.Duration(time.Minute*100), lCrt.Details.NotAfter.Sub(lCrt.Details.NotBefore)) sns := []string{} for _, sn := range lCrt.Details.Subnets { sns = append(sns, sn.String()) } assert.Equal(t, []string{"10.1.1.1/32", "10.2.2.2/32", "10.5.5.5/32"}, sns) issuer, _ := ca.Sha256Sum() assert.Equal(t, issuer, lCrt.Details.Issuer) assert.True(t, lCrt.CheckSignature(caPub)) // test proper cert with in-pub os.Remove(keyF.Name()) os.Remove(crtF.Name()) ob.Reset() eb.Reset() args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-in-pub", inPubF.Name(), "-duration", "100m", "-groups", "1"} assert.Nil(t, signCert(args, ob, eb)) assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) // read cert file and check pub key matches in-pub rb, _ = ioutil.ReadFile(crtF.Name()) lCrt, b, err = cert.UnmarshalNebulaCertificateFromPEM(rb) assert.Len(t, b, 0) assert.Nil(t, err) assert.Equal(t, lCrt.Details.PublicKey, inPub) // test refuse to sign cert with duration beyond root ob.Reset() eb.Reset() args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "1000m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"} assert.EqualError(t, signCert(args, ob, eb), "refusing to sign, root certificate constraints violated: certificate expires after signing certificate") assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) // create valid cert/key for overwrite tests os.Remove(keyF.Name()) os.Remove(crtF.Name()) args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"} assert.Nil(t, signCert(args, ob, eb)) // test that we won't overwrite existing key file os.Remove(crtF.Name()) ob.Reset() eb.Reset() args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"} assert.EqualError(t, signCert(args, ob, eb), "refusing to overwrite existing key: "+keyF.Name()) assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) // create valid cert/key for overwrite tests os.Remove(keyF.Name()) os.Remove(crtF.Name()) args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"} assert.Nil(t, signCert(args, ob, eb)) // test that we won't overwrite existing certificate file os.Remove(keyF.Name()) ob.Reset() eb.Reset() args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"} assert.EqualError(t, signCert(args, ob, eb), "refusing to overwrite existing cert: "+crtF.Name()) assert.Empty(t, ob.String()) assert.Empty(t, eb.String()) } nebula-1.6.1+dfsg/cmd/nebula-cert/test_darwin.go000066400000000000000000000001651434072716400215340ustar00rootroot00000000000000package main const NoSuchFileError = "no such file or directory" const NoSuchDirError = "no such file or directory" nebula-1.6.1+dfsg/cmd/nebula-cert/test_linux.go000066400000000000000000000001651434072716400214070ustar00rootroot00000000000000package main const NoSuchFileError = "no such file or directory" const NoSuchDirError = "no such file or directory" nebula-1.6.1+dfsg/cmd/nebula-cert/test_windows.go000066400000000000000000000002271434072716400217410ustar00rootroot00000000000000package main const NoSuchFileError = "The system cannot find the file specified." const NoSuchDirError = "The system cannot find the path specified." nebula-1.6.1+dfsg/cmd/nebula-cert/verify.go000066400000000000000000000036261434072716400205220ustar00rootroot00000000000000package main import ( "flag" "fmt" "io" "io/ioutil" "os" "strings" "time" "github.com/slackhq/nebula/cert" ) type verifyFlags struct { set *flag.FlagSet caPath *string certPath *string } func newVerifyFlags() *verifyFlags { vf := verifyFlags{set: flag.NewFlagSet("verify", flag.ContinueOnError)} vf.set.Usage = func() {} vf.caPath = vf.set.String("ca", "", "Required: path to a file containing one or more ca certificates") vf.certPath = vf.set.String("crt", "", "Required: path to a file containing a single certificate") return &vf } func verify(args []string, out io.Writer, errOut io.Writer) error { vf := newVerifyFlags() err := vf.set.Parse(args) if err != nil { return err } if err := mustFlagString("ca", vf.caPath); err != nil { return err } if err := mustFlagString("crt", vf.certPath); err != nil { return err } rawCACert, err := ioutil.ReadFile(*vf.caPath) if err != nil { return fmt.Errorf("error while reading ca: %s", err) } caPool := cert.NewCAPool() for { rawCACert, err = caPool.AddCACertificate(rawCACert) if err != nil { return fmt.Errorf("error while adding ca cert to pool: %s", err) } if rawCACert == nil || len(rawCACert) == 0 || strings.TrimSpace(string(rawCACert)) == "" { break } } rawCert, err := ioutil.ReadFile(*vf.certPath) if err != nil { return fmt.Errorf("unable to read crt; %s", err) } c, _, err := cert.UnmarshalNebulaCertificateFromPEM(rawCert) if err != nil { return fmt.Errorf("error while parsing crt: %s", err) } good, err := c.Verify(time.Now(), caPool) if !good { return err } return nil } func verifySummary() string { return "verify : verifies a certificate isn't expired and was signed by a trusted authority." } func verifyHelp(out io.Writer) { vf := newVerifyFlags() out.Write([]byte("Usage of " + os.Args[0] + " " + verifySummary() + "\n")) vf.set.SetOutput(out) vf.set.PrintDefaults() } nebula-1.6.1+dfsg/cmd/nebula-cert/verify_test.go000066400000000000000000000100241434072716400215470ustar00rootroot00000000000000package main import ( "bytes" "crypto/rand" "io/ioutil" "os" "testing" "time" "github.com/slackhq/nebula/cert" "github.com/stretchr/testify/assert" "golang.org/x/crypto/ed25519" ) func Test_verifySummary(t *testing.T) { assert.Equal(t, "verify : verifies a certificate isn't expired and was signed by a trusted authority.", verifySummary()) } func Test_verifyHelp(t *testing.T) { ob := &bytes.Buffer{} verifyHelp(ob) assert.Equal( t, "Usage of "+os.Args[0]+" verify : verifies a certificate isn't expired and was signed by a trusted authority.\n"+ " -ca string\n"+ " \tRequired: path to a file containing one or more ca certificates\n"+ " -crt string\n"+ " \tRequired: path to a file containing a single certificate\n", ob.String(), ) } func Test_verify(t *testing.T) { time.Local = time.UTC ob := &bytes.Buffer{} eb := &bytes.Buffer{} // required args assertHelpError(t, verify([]string{"-ca", "derp"}, ob, eb), "-crt is required") assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) assertHelpError(t, verify([]string{"-crt", "derp"}, ob, eb), "-ca is required") assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) // no ca at path ob.Reset() eb.Reset() err := verify([]string{"-ca", "does_not_exist", "-crt", "does_not_exist"}, ob, eb) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) assert.EqualError(t, err, "error while reading ca: open does_not_exist: "+NoSuchFileError) // invalid ca at path ob.Reset() eb.Reset() caFile, err := ioutil.TempFile("", "verify-ca") assert.Nil(t, err) defer os.Remove(caFile.Name()) caFile.WriteString("-----BEGIN NOPE-----") err = verify([]string{"-ca", caFile.Name(), "-crt", "does_not_exist"}, ob, eb) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) assert.EqualError(t, err, "error while adding ca cert to pool: input did not contain a valid PEM encoded block") // make a ca for later caPub, caPriv, _ := ed25519.GenerateKey(rand.Reader) ca := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "test-ca", NotBefore: time.Now().Add(time.Hour * -1), NotAfter: time.Now().Add(time.Hour * 2), PublicKey: caPub, IsCA: true, }, } ca.Sign(caPriv) b, _ := ca.MarshalToPEM() caFile.Truncate(0) caFile.Seek(0, 0) caFile.Write(b) // no crt at path err = verify([]string{"-ca", caFile.Name(), "-crt", "does_not_exist"}, ob, eb) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) assert.EqualError(t, err, "unable to read crt; open does_not_exist: "+NoSuchFileError) // invalid crt at path ob.Reset() eb.Reset() certFile, err := ioutil.TempFile("", "verify-cert") assert.Nil(t, err) defer os.Remove(certFile.Name()) certFile.WriteString("-----BEGIN NOPE-----") err = verify([]string{"-ca", caFile.Name(), "-crt", certFile.Name()}, ob, eb) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) assert.EqualError(t, err, "error while parsing crt: input did not contain a valid PEM encoded block") // unverifiable cert at path _, badPriv, _ := ed25519.GenerateKey(rand.Reader) certPub, _ := x25519Keypair() signer, _ := ca.Sha256Sum() crt := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "test-cert", NotBefore: time.Now().Add(time.Hour * -1), NotAfter: time.Now().Add(time.Hour), PublicKey: certPub, IsCA: false, Issuer: signer, }, } crt.Sign(badPriv) b, _ = crt.MarshalToPEM() certFile.Truncate(0) certFile.Seek(0, 0) certFile.Write(b) err = verify([]string{"-ca", caFile.Name(), "-crt", certFile.Name()}, ob, eb) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) assert.EqualError(t, err, "certificate signature did not match") // verified cert at path crt.Sign(caPriv) b, _ = crt.MarshalToPEM() certFile.Truncate(0) certFile.Seek(0, 0) certFile.Write(b) err = verify([]string{"-ca", caFile.Name(), "-crt", certFile.Name()}, ob, eb) assert.Equal(t, "", ob.String()) assert.Equal(t, "", eb.String()) assert.Nil(t, err) } nebula-1.6.1+dfsg/cmd/nebula-service/000077500000000000000000000000001434072716400173635ustar00rootroot00000000000000nebula-1.6.1+dfsg/cmd/nebula-service/logs_generic.go000066400000000000000000000002641434072716400223540ustar00rootroot00000000000000//go:build !windows // +build !windows package main import "github.com/sirupsen/logrus" func HookLogger(l *logrus.Logger) { // Do nothing, let the logs flow to stdout/stderr } nebula-1.6.1+dfsg/cmd/nebula-service/logs_windows.go000066400000000000000000000020661434072716400224340ustar00rootroot00000000000000package main import ( "fmt" "io/ioutil" "os" "github.com/kardianos/service" "github.com/sirupsen/logrus" ) // HookLogger routes the logrus logs through the service logger so that they end up in the Windows Event Viewer // logrus output will be discarded func HookLogger(l *logrus.Logger) { l.AddHook(newLogHook(logger)) l.SetOutput(ioutil.Discard) } type logHook struct { sl service.Logger } func newLogHook(sl service.Logger) *logHook { return &logHook{sl: sl} } func (h *logHook) Fire(entry *logrus.Entry) error { line, err := entry.String() if err != nil { fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) return err } switch entry.Level { case logrus.PanicLevel: return h.sl.Error(line) case logrus.FatalLevel: return h.sl.Error(line) case logrus.ErrorLevel: return h.sl.Error(line) case logrus.WarnLevel: return h.sl.Warning(line) case logrus.InfoLevel: return h.sl.Info(line) case logrus.DebugLevel: return h.sl.Info(line) default: return nil } } func (h *logHook) Levels() []logrus.Level { return logrus.AllLevels } nebula-1.6.1+dfsg/cmd/nebula-service/main.go000066400000000000000000000030111434072716400206310ustar00rootroot00000000000000package main import ( "flag" "fmt" "os" "github.com/sirupsen/logrus" "github.com/slackhq/nebula" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/util" ) // A version string that can be set with // // -ldflags "-X main.Build=SOMEVERSION" // // at compile-time. var Build string func main() { serviceFlag := flag.String("service", "", "Control the system service.") configPath := flag.String("config", "", "Path to either a file or directory to load configuration from") configTest := flag.Bool("test", false, "Test the config and print the end result. Non zero exit indicates a faulty config") printVersion := flag.Bool("version", false, "Print version") printUsage := flag.Bool("help", false, "Print command line usage") flag.Parse() if *printVersion { fmt.Printf("Version: %s\n", Build) os.Exit(0) } if *printUsage { flag.Usage() os.Exit(0) } if *serviceFlag != "" { doService(configPath, configTest, Build, serviceFlag) os.Exit(1) } if *configPath == "" { fmt.Println("-config flag must be set") flag.Usage() os.Exit(1) } l := logrus.New() l.Out = os.Stdout c := config.NewC(l) err := c.Load(*configPath) if err != nil { fmt.Printf("failed to load config: %s", err) os.Exit(1) } ctrl, err := nebula.Main(c, *configTest, Build, l, nil) switch v := err.(type) { case util.ContextualError: v.Log(l) os.Exit(1) case error: l.WithError(err).Error("Failed to start") os.Exit(1) } if !*configTest { ctrl.Start() ctrl.ShutdownBlock() } os.Exit(0) } nebula-1.6.1+dfsg/cmd/nebula-service/service.go000066400000000000000000000047161434072716400213620ustar00rootroot00000000000000package main import ( "fmt" "log" "os" "path/filepath" "github.com/kardianos/service" "github.com/sirupsen/logrus" "github.com/slackhq/nebula" "github.com/slackhq/nebula/config" ) var logger service.Logger type program struct { configPath *string configTest *bool build string control *nebula.Control } func (p *program) Start(s service.Service) error { // Start should not block. logger.Info("Nebula service starting.") l := logrus.New() HookLogger(l) c := config.NewC(l) err := c.Load(*p.configPath) if err != nil { return fmt.Errorf("failed to load config: %s", err) } p.control, err = nebula.Main(c, *p.configTest, Build, l, nil) if err != nil { return err } p.control.Start() return nil } func (p *program) Stop(s service.Service) error { logger.Info("Nebula service stopping.") p.control.Stop() return nil } func doService(configPath *string, configTest *bool, build string, serviceFlag *string) { if *configPath == "" { ex, err := os.Executable() if err != nil { panic(err) } *configPath = filepath.Dir(ex) + "/config.yaml" } svcConfig := &service.Config{ Name: "Nebula", DisplayName: "Nebula Network Service", Description: "Nebula network connectivity daemon for encrypted communications", Arguments: []string{"-service", "run", "-config", *configPath}, } prg := &program{ configPath: configPath, configTest: configTest, build: build, } // Here are what the different loggers are doing: // - `log` is the standard go log utility, meant to be used while the process is still attached to stdout/stderr // - `logger` is the service log utility that may be attached to a special place depending on OS (Windows will have it attached to the event log) // - above, in `Run` we create a `logrus.Logger` which is what nebula expects to use s, err := service.New(prg, svcConfig) if err != nil { log.Fatal(err) } errs := make(chan error, 5) logger, err = s.Logger(errs) if err != nil { log.Fatal(err) } go func() { for { err := <-errs if err != nil { // Route any errors from the system logger to stdout as a best effort to notice issues there log.Print(err) } } }() switch *serviceFlag { case "run": err = s.Run() if err != nil { // Route any errors to the system logger logger.Error(err) } default: err := service.Control(s, *serviceFlag) if err != nil { log.Printf("Valid actions: %q\n", service.ControlAction) log.Fatal(err) } return } } nebula-1.6.1+dfsg/cmd/nebula/000077500000000000000000000000001434072716400157255ustar00rootroot00000000000000nebula-1.6.1+dfsg/cmd/nebula/main.go000066400000000000000000000025351434072716400172050ustar00rootroot00000000000000package main import ( "flag" "fmt" "os" "github.com/sirupsen/logrus" "github.com/slackhq/nebula" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/util" ) // A version string that can be set with // // -ldflags "-X main.Build=SOMEVERSION" // // at compile-time. var Build string func main() { configPath := flag.String("config", "", "Path to either a file or directory to load configuration from") configTest := flag.Bool("test", false, "Test the config and print the end result. Non zero exit indicates a faulty config") printVersion := flag.Bool("version", false, "Print version") printUsage := flag.Bool("help", false, "Print command line usage") flag.Parse() if *printVersion { fmt.Printf("Version: %s\n", Build) os.Exit(0) } if *printUsage { flag.Usage() os.Exit(0) } if *configPath == "" { fmt.Println("-config flag must be set") flag.Usage() os.Exit(1) } l := logrus.New() l.Out = os.Stdout c := config.NewC(l) err := c.Load(*configPath) if err != nil { fmt.Printf("failed to load config: %s", err) os.Exit(1) } ctrl, err := nebula.Main(c, *configTest, Build, l, nil) switch v := err.(type) { case util.ContextualError: v.Log(l) os.Exit(1) case error: l.WithError(err).Error("Failed to start") os.Exit(1) } if !*configTest { ctrl.Start() ctrl.ShutdownBlock() } os.Exit(0) } nebula-1.6.1+dfsg/config/000077500000000000000000000000001434072716400151615ustar00rootroot00000000000000nebula-1.6.1+dfsg/config/config.go000066400000000000000000000173541434072716400167670ustar00rootroot00000000000000package config import ( "context" "errors" "fmt" "io/ioutil" "os" "os/signal" "path/filepath" "sort" "strconv" "strings" "sync" "syscall" "time" "github.com/imdario/mergo" "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" ) type C struct { path string files []string Settings map[interface{}]interface{} oldSettings map[interface{}]interface{} callbacks []func(*C) l *logrus.Logger reloadLock sync.Mutex } func NewC(l *logrus.Logger) *C { return &C{ Settings: make(map[interface{}]interface{}), l: l, } } // Load will find all yaml files within path and load them in lexical order func (c *C) Load(path string) error { c.path = path c.files = make([]string, 0) err := c.resolve(path, true) if err != nil { return err } if len(c.files) == 0 { return fmt.Errorf("no config files found at %s", path) } sort.Strings(c.files) err = c.parse() if err != nil { return err } return nil } func (c *C) LoadString(raw string) error { if raw == "" { return errors.New("Empty configuration") } return c.parseRaw([]byte(raw)) } // RegisterReloadCallback stores a function to be called when a config reload is triggered. The functions registered // here should decide if they need to make a change to the current process before making the change. HasChanged can be // used to help decide if a change is necessary. // These functions should return quickly or spawn their own go routine if they will take a while func (c *C) RegisterReloadCallback(f func(*C)) { c.callbacks = append(c.callbacks, f) } // InitialLoad returns true if this is the first load of the config, and ReloadConfig has not been called yet. func (c *C) InitialLoad() bool { return c.oldSettings == nil } // HasChanged checks if the underlying structure of the provided key has changed after a config reload. The value of // k in both the old and new settings will be serialized, the result of the string comparison is returned. // If k is an empty string the entire config is tested. // It's important to note that this is very rudimentary and susceptible to configuration ordering issues indicating // there is change when there actually wasn't any. func (c *C) HasChanged(k string) bool { if c.oldSettings == nil { return false } var ( nv interface{} ov interface{} ) if k == "" { nv = c.Settings ov = c.oldSettings k = "all settings" } else { nv = c.get(k, c.Settings) ov = c.get(k, c.oldSettings) } newVals, err := yaml.Marshal(nv) if err != nil { c.l.WithField("config_path", k).WithError(err).Error("Error while marshaling new config") } oldVals, err := yaml.Marshal(ov) if err != nil { c.l.WithField("config_path", k).WithError(err).Error("Error while marshaling old config") } return string(newVals) != string(oldVals) } // CatchHUP will listen for the HUP signal in a go routine and reload all configs found in the // original path provided to Load. The old settings are shallow copied for change detection after the reload. func (c *C) CatchHUP(ctx context.Context) { ch := make(chan os.Signal, 1) signal.Notify(ch, syscall.SIGHUP) go func() { for { select { case <-ctx.Done(): signal.Stop(ch) close(ch) return case <-ch: c.l.Info("Caught HUP, reloading config") c.ReloadConfig() } } }() } func (c *C) ReloadConfig() { c.reloadLock.Lock() defer c.reloadLock.Unlock() c.oldSettings = make(map[interface{}]interface{}) for k, v := range c.Settings { c.oldSettings[k] = v } err := c.Load(c.path) if err != nil { c.l.WithField("config_path", c.path).WithError(err).Error("Error occurred while reloading config") return } for _, v := range c.callbacks { v(c) } } func (c *C) ReloadConfigString(raw string) error { c.reloadLock.Lock() defer c.reloadLock.Unlock() c.oldSettings = make(map[interface{}]interface{}) for k, v := range c.Settings { c.oldSettings[k] = v } err := c.LoadString(raw) if err != nil { return err } for _, v := range c.callbacks { v(c) } return nil } // GetString will get the string for k or return the default d if not found or invalid func (c *C) GetString(k, d string) string { r := c.Get(k) if r == nil { return d } return fmt.Sprintf("%v", r) } // GetStringSlice will get the slice of strings for k or return the default d if not found or invalid func (c *C) GetStringSlice(k string, d []string) []string { r := c.Get(k) if r == nil { return d } rv, ok := r.([]interface{}) if !ok { return d } v := make([]string, len(rv)) for i := 0; i < len(v); i++ { v[i] = fmt.Sprintf("%v", rv[i]) } return v } // GetMap will get the map for k or return the default d if not found or invalid func (c *C) GetMap(k string, d map[interface{}]interface{}) map[interface{}]interface{} { r := c.Get(k) if r == nil { return d } v, ok := r.(map[interface{}]interface{}) if !ok { return d } return v } // GetInt will get the int for k or return the default d if not found or invalid func (c *C) GetInt(k string, d int) int { r := c.GetString(k, strconv.Itoa(d)) v, err := strconv.Atoi(r) if err != nil { return d } return v } // GetBool will get the bool for k or return the default d if not found or invalid func (c *C) GetBool(k string, d bool) bool { r := strings.ToLower(c.GetString(k, fmt.Sprintf("%v", d))) v, err := strconv.ParseBool(r) if err != nil { switch r { case "y", "yes": return true case "n", "no": return false } return d } return v } // GetDuration will get the duration for k or return the default d if not found or invalid func (c *C) GetDuration(k string, d time.Duration) time.Duration { r := c.GetString(k, "") v, err := time.ParseDuration(r) if err != nil { return d } return v } func (c *C) Get(k string) interface{} { return c.get(k, c.Settings) } func (c *C) IsSet(k string) bool { return c.get(k, c.Settings) != nil } func (c *C) get(k string, v interface{}) interface{} { parts := strings.Split(k, ".") for _, p := range parts { m, ok := v.(map[interface{}]interface{}) if !ok { return nil } v, ok = m[p] if !ok { return nil } } return v } // direct signifies if this is the config path directly specified by the user, // versus a file/dir found by recursing into that path func (c *C) resolve(path string, direct bool) error { i, err := os.Stat(path) if err != nil { return nil } if !i.IsDir() { c.addFile(path, direct) return nil } paths, err := readDirNames(path) if err != nil { return fmt.Errorf("problem while reading directory %s: %s", path, err) } for _, p := range paths { err := c.resolve(filepath.Join(path, p), false) if err != nil { return err } } return nil } func (c *C) addFile(path string, direct bool) error { ext := filepath.Ext(path) if !direct && ext != ".yaml" && ext != ".yml" { return nil } ap, err := filepath.Abs(path) if err != nil { return err } c.files = append(c.files, ap) return nil } func (c *C) parseRaw(b []byte) error { var m map[interface{}]interface{} err := yaml.Unmarshal(b, &m) if err != nil { return err } c.Settings = m return nil } func (c *C) parse() error { var m map[interface{}]interface{} for _, path := range c.files { b, err := ioutil.ReadFile(path) if err != nil { return err } var nm map[interface{}]interface{} err = yaml.Unmarshal(b, &nm) if err != nil { return err } // We need to use WithAppendSlice so that firewall rules in separate // files are appended together err = mergo.Merge(&nm, m, mergo.WithAppendSlice) m = nm if err != nil { return err } } c.Settings = m return nil } func readDirNames(path string) ([]string, error) { f, err := os.Open(path) if err != nil { return nil, err } paths, err := f.Readdirnames(-1) f.Close() if err != nil { return nil, err } sort.Strings(paths) return paths, nil } nebula-1.6.1+dfsg/config/config_test.go000066400000000000000000000074501434072716400200220ustar00rootroot00000000000000package config import ( "io/ioutil" "os" "path/filepath" "testing" "time" "github.com/slackhq/nebula/test" "github.com/stretchr/testify/assert" ) func TestConfig_Load(t *testing.T) { l := test.NewLogger() dir, err := ioutil.TempDir("", "config-test") // invalid yaml c := NewC(l) ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644) assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}") // simple multi config merge c = NewC(l) os.RemoveAll(dir) os.Mkdir(dir, 0755) assert.Nil(t, err) ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644) ioutil.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644) assert.Nil(t, c.Load(dir)) expected := map[interface{}]interface{}{ "outer": map[interface{}]interface{}{ "inner": "override", }, "new": "hi", } assert.Equal(t, expected, c.Settings) //TODO: test symlinked file //TODO: test symlinked directory } func TestConfig_Get(t *testing.T) { l := test.NewLogger() // test simple type c := NewC(l) c.Settings["firewall"] = map[interface{}]interface{}{"outbound": "hi"} assert.Equal(t, "hi", c.Get("firewall.outbound")) // test complex type inner := []map[interface{}]interface{}{{"port": "1", "code": "2"}} c.Settings["firewall"] = map[interface{}]interface{}{"outbound": inner} assert.EqualValues(t, inner, c.Get("firewall.outbound")) // test missing assert.Nil(t, c.Get("firewall.nope")) } func TestConfig_GetStringSlice(t *testing.T) { l := test.NewLogger() c := NewC(l) c.Settings["slice"] = []interface{}{"one", "two"} assert.Equal(t, []string{"one", "two"}, c.GetStringSlice("slice", []string{})) } func TestConfig_GetBool(t *testing.T) { l := test.NewLogger() c := NewC(l) c.Settings["bool"] = true assert.Equal(t, true, c.GetBool("bool", false)) c.Settings["bool"] = "true" assert.Equal(t, true, c.GetBool("bool", false)) c.Settings["bool"] = false assert.Equal(t, false, c.GetBool("bool", true)) c.Settings["bool"] = "false" assert.Equal(t, false, c.GetBool("bool", true)) c.Settings["bool"] = "Y" assert.Equal(t, true, c.GetBool("bool", false)) c.Settings["bool"] = "yEs" assert.Equal(t, true, c.GetBool("bool", false)) c.Settings["bool"] = "N" assert.Equal(t, false, c.GetBool("bool", true)) c.Settings["bool"] = "nO" assert.Equal(t, false, c.GetBool("bool", true)) } func TestConfig_HasChanged(t *testing.T) { l := test.NewLogger() // No reload has occurred, return false c := NewC(l) c.Settings["test"] = "hi" assert.False(t, c.HasChanged("")) // Test key change c = NewC(l) c.Settings["test"] = "hi" c.oldSettings = map[interface{}]interface{}{"test": "no"} assert.True(t, c.HasChanged("test")) assert.True(t, c.HasChanged("")) // No key change c = NewC(l) c.Settings["test"] = "hi" c.oldSettings = map[interface{}]interface{}{"test": "hi"} assert.False(t, c.HasChanged("test")) assert.False(t, c.HasChanged("")) } func TestConfig_ReloadConfig(t *testing.T) { l := test.NewLogger() done := make(chan bool, 1) dir, err := ioutil.TempDir("", "config-test") assert.Nil(t, err) ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644) c := NewC(l) assert.Nil(t, c.Load(dir)) assert.False(t, c.HasChanged("outer.inner")) assert.False(t, c.HasChanged("outer")) assert.False(t, c.HasChanged("")) ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644) c.RegisterReloadCallback(func(c *C) { done <- true }) c.ReloadConfig() assert.True(t, c.HasChanged("outer.inner")) assert.True(t, c.HasChanged("outer")) assert.True(t, c.HasChanged("")) // Make sure we call the callbacks select { case <-done: case <-time.After(1 * time.Second): panic("timeout") } } nebula-1.6.1+dfsg/connection_manager.go000066400000000000000000000174771434072716400201140ustar00rootroot00000000000000package nebula import ( "context" "sync" "time" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" ) // TODO: incount and outcount are intended as a shortcut to locking the mutexes for every single packet // and something like every 10 packets we could lock, send 10, then unlock for a moment type connectionManager struct { hostMap *HostMap in map[iputil.VpnIp]struct{} inLock *sync.RWMutex inCount int out map[iputil.VpnIp]struct{} outLock *sync.RWMutex outCount int TrafficTimer *SystemTimerWheel intf *Interface pendingDeletion map[iputil.VpnIp]int pendingDeletionLock *sync.RWMutex pendingDeletionTimer *SystemTimerWheel checkInterval int pendingDeletionInterval int l *logrus.Logger // I wanted to call one matLock } func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval int) *connectionManager { nc := &connectionManager{ hostMap: intf.hostMap, in: make(map[iputil.VpnIp]struct{}), inLock: &sync.RWMutex{}, inCount: 0, out: make(map[iputil.VpnIp]struct{}), outLock: &sync.RWMutex{}, outCount: 0, TrafficTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60), intf: intf, pendingDeletion: make(map[iputil.VpnIp]int), pendingDeletionLock: &sync.RWMutex{}, pendingDeletionTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60), checkInterval: checkInterval, pendingDeletionInterval: pendingDeletionInterval, l: l, } nc.Start(ctx) return nc } func (n *connectionManager) In(ip iputil.VpnIp) { n.inLock.RLock() // If this already exists, return if _, ok := n.in[ip]; ok { n.inLock.RUnlock() return } n.inLock.RUnlock() n.inLock.Lock() n.in[ip] = struct{}{} n.inLock.Unlock() } func (n *connectionManager) Out(ip iputil.VpnIp) { n.outLock.RLock() // If this already exists, return if _, ok := n.out[ip]; ok { n.outLock.RUnlock() return } n.outLock.RUnlock() n.outLock.Lock() // double check since we dropped the lock temporarily if _, ok := n.out[ip]; ok { n.outLock.Unlock() return } n.out[ip] = struct{}{} n.AddTrafficWatch(ip, n.checkInterval) n.outLock.Unlock() } func (n *connectionManager) CheckIn(vpnIp iputil.VpnIp) bool { n.inLock.RLock() if _, ok := n.in[vpnIp]; ok { n.inLock.RUnlock() return true } n.inLock.RUnlock() return false } func (n *connectionManager) ClearIP(ip iputil.VpnIp) { n.inLock.Lock() n.outLock.Lock() delete(n.in, ip) delete(n.out, ip) n.inLock.Unlock() n.outLock.Unlock() } func (n *connectionManager) ClearPendingDeletion(ip iputil.VpnIp) { n.pendingDeletionLock.Lock() delete(n.pendingDeletion, ip) n.pendingDeletionLock.Unlock() } func (n *connectionManager) AddPendingDeletion(ip iputil.VpnIp) { n.pendingDeletionLock.Lock() if _, ok := n.pendingDeletion[ip]; ok { n.pendingDeletion[ip] += 1 } else { n.pendingDeletion[ip] = 0 } n.pendingDeletionTimer.Add(ip, time.Second*time.Duration(n.pendingDeletionInterval)) n.pendingDeletionLock.Unlock() } func (n *connectionManager) checkPendingDeletion(ip iputil.VpnIp) bool { n.pendingDeletionLock.RLock() if _, ok := n.pendingDeletion[ip]; ok { n.pendingDeletionLock.RUnlock() return true } n.pendingDeletionLock.RUnlock() return false } func (n *connectionManager) AddTrafficWatch(vpnIp iputil.VpnIp, seconds int) { n.TrafficTimer.Add(vpnIp, time.Second*time.Duration(seconds)) } func (n *connectionManager) Start(ctx context.Context) { go n.Run(ctx) } func (n *connectionManager) Run(ctx context.Context) { clockSource := time.NewTicker(500 * time.Millisecond) defer clockSource.Stop() p := []byte("") nb := make([]byte, 12, 12) out := make([]byte, mtu) for { select { case <-ctx.Done(): return case now := <-clockSource.C: n.HandleMonitorTick(now, p, nb, out) n.HandleDeletionTick(now) } } } func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) { n.TrafficTimer.advance(now) for { ep := n.TrafficTimer.Purge() if ep == nil { break } vpnIp := ep.(iputil.VpnIp) // Check for traffic coming back in from this host. traf := n.CheckIn(vpnIp) hostinfo, err := n.hostMap.QueryVpnIp(vpnIp) if err != nil { n.l.Debugf("Not found in hostmap: %s", vpnIp) n.ClearIP(vpnIp) n.ClearPendingDeletion(vpnIp) continue } if n.handleInvalidCertificate(now, vpnIp, hostinfo) { continue } // If we saw an incoming packets from this ip and peer's certificate is not // expired, just ignore. if traf { if n.l.Level >= logrus.DebugLevel { n.l.WithField("vpnIp", vpnIp). WithField("tunnelCheck", m{"state": "alive", "method": "passive"}). Debug("Tunnel status") } n.ClearIP(vpnIp) n.ClearPendingDeletion(vpnIp) continue } hostinfo.logger(n.l). WithField("tunnelCheck", m{"state": "testing", "method": "active"}). Debug("Tunnel status") if hostinfo != nil && hostinfo.ConnectionState != nil { // Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues n.intf.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, p, nb, out) } else { hostinfo.logger(n.l).Debugf("Hostinfo sadness: %s", vpnIp) } n.AddPendingDeletion(vpnIp) } } func (n *connectionManager) HandleDeletionTick(now time.Time) { n.pendingDeletionTimer.advance(now) for { ep := n.pendingDeletionTimer.Purge() if ep == nil { break } vpnIp := ep.(iputil.VpnIp) hostinfo, err := n.hostMap.QueryVpnIp(vpnIp) if err != nil { n.l.Debugf("Not found in hostmap: %s", vpnIp) n.ClearIP(vpnIp) n.ClearPendingDeletion(vpnIp) continue } if n.handleInvalidCertificate(now, vpnIp, hostinfo) { continue } // If we saw an incoming packets from this ip and peer's certificate is not // expired, just ignore. traf := n.CheckIn(vpnIp) if traf { n.l.WithField("vpnIp", vpnIp). WithField("tunnelCheck", m{"state": "alive", "method": "active"}). Debug("Tunnel status") n.ClearIP(vpnIp) n.ClearPendingDeletion(vpnIp) continue } // If it comes around on deletion wheel and hasn't resolved itself, delete if n.checkPendingDeletion(vpnIp) { cn := "" if hostinfo.ConnectionState != nil && hostinfo.ConnectionState.peerCert != nil { cn = hostinfo.ConnectionState.peerCert.Details.Name } hostinfo.logger(n.l). WithField("tunnelCheck", m{"state": "dead", "method": "active"}). WithField("certName", cn). Info("Tunnel status") n.ClearIP(vpnIp) n.ClearPendingDeletion(vpnIp) // TODO: This is only here to let tests work. Should do proper mocking if n.intf.lightHouse != nil { n.intf.lightHouse.DeleteVpnIp(vpnIp) } n.hostMap.DeleteHostInfo(hostinfo) } else { n.ClearIP(vpnIp) n.ClearPendingDeletion(vpnIp) } } } // handleInvalidCertificates will destroy a tunnel if pki.disconnect_invalid is true and the certificate is no longer valid func (n *connectionManager) handleInvalidCertificate(now time.Time, vpnIp iputil.VpnIp, hostinfo *HostInfo) bool { if !n.intf.disconnectInvalid { return false } remoteCert := hostinfo.GetCert() if remoteCert == nil { return false } valid, err := remoteCert.Verify(now, n.intf.caPool) if valid { return false } fingerprint, _ := remoteCert.Sha256Sum() n.l.WithField("vpnIp", vpnIp).WithError(err). WithField("certName", remoteCert.Details.Name). WithField("fingerprint", fingerprint). Info("Remote certificate is no longer valid, tearing down the tunnel") // Inform the remote and close the tunnel locally n.intf.sendCloseTunnel(hostinfo) n.intf.closeTunnel(hostinfo) n.ClearIP(vpnIp) n.ClearPendingDeletion(vpnIp) return true } nebula-1.6.1+dfsg/connection_manager_test.go000066400000000000000000000175101434072716400211370ustar00rootroot00000000000000package nebula import ( "context" "crypto/ed25519" "crypto/rand" "net" "testing" "time" "github.com/flynn/noise" "github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/test" "github.com/slackhq/nebula/udp" "github.com/stretchr/testify/assert" ) var vpnIp iputil.VpnIp func Test_NewConnectionManagerTest(t *testing.T) { l := test.NewLogger() //_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24") _, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") _, localrange, _ := net.ParseCIDR("10.1.1.1/24") vpnIp = iputil.Ip2VpnIp(net.ParseIP("172.1.1.2")) preferredRanges := []*net.IPNet{localrange} // Very incomplete mock objects hostMap := NewHostMap(l, "test", vpncidr, preferredRanges) cs := &CertState{ rawCertificate: []byte{}, privateKey: []byte{}, certificate: &cert.NebulaCertificate{}, rawCertificateNoKey: []byte{}, } lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})} ifce := &Interface{ hostMap: hostMap, inside: &test.NoopTun{}, outside: &udp.Conn{}, certState: cs, firewall: &Firewall{}, lightHouse: lh, handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig), l: l, } now := time.Now() // Create manager ctx, cancel := context.WithCancel(context.Background()) defer cancel() nc := newConnectionManager(ctx, l, ifce, 5, 10) p := []byte("") nb := make([]byte, 12, 12) out := make([]byte, mtu) nc.HandleMonitorTick(now, p, nb, out) // Add an ip we have established a connection w/ to hostmap hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil) hostinfo.ConnectionState = &ConnectionState{ certState: cs, H: &noise.HandshakeState{}, } // We saw traffic out to vpnIp nc.Out(vpnIp) assert.NotContains(t, nc.pendingDeletion, vpnIp) assert.Contains(t, nc.hostMap.Hosts, vpnIp) // Move ahead 5s. Nothing should happen next_tick := now.Add(5 * time.Second) nc.HandleMonitorTick(next_tick, p, nb, out) nc.HandleDeletionTick(next_tick) // Move ahead 6s. We haven't heard back next_tick = now.Add(6 * time.Second) nc.HandleMonitorTick(next_tick, p, nb, out) nc.HandleDeletionTick(next_tick) // This host should now be up for deletion assert.Contains(t, nc.pendingDeletion, vpnIp) assert.Contains(t, nc.hostMap.Hosts, vpnIp) // Move ahead some more next_tick = now.Add(45 * time.Second) nc.HandleMonitorTick(next_tick, p, nb, out) nc.HandleDeletionTick(next_tick) // The host should be evicted assert.NotContains(t, nc.pendingDeletion, vpnIp) assert.NotContains(t, nc.hostMap.Hosts, vpnIp) } func Test_NewConnectionManagerTest2(t *testing.T) { l := test.NewLogger() //_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24") _, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") _, localrange, _ := net.ParseCIDR("10.1.1.1/24") preferredRanges := []*net.IPNet{localrange} // Very incomplete mock objects hostMap := NewHostMap(l, "test", vpncidr, preferredRanges) cs := &CertState{ rawCertificate: []byte{}, privateKey: []byte{}, certificate: &cert.NebulaCertificate{}, rawCertificateNoKey: []byte{}, } lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})} ifce := &Interface{ hostMap: hostMap, inside: &test.NoopTun{}, outside: &udp.Conn{}, certState: cs, firewall: &Firewall{}, lightHouse: lh, handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig), l: l, } now := time.Now() // Create manager ctx, cancel := context.WithCancel(context.Background()) defer cancel() nc := newConnectionManager(ctx, l, ifce, 5, 10) p := []byte("") nb := make([]byte, 12, 12) out := make([]byte, mtu) nc.HandleMonitorTick(now, p, nb, out) // Add an ip we have established a connection w/ to hostmap hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil) hostinfo.ConnectionState = &ConnectionState{ certState: cs, H: &noise.HandshakeState{}, } // We saw traffic out to vpnIp nc.Out(vpnIp) assert.NotContains(t, nc.pendingDeletion, vpnIp) assert.Contains(t, nc.hostMap.Hosts, vpnIp) // Move ahead 5s. Nothing should happen next_tick := now.Add(5 * time.Second) nc.HandleMonitorTick(next_tick, p, nb, out) nc.HandleDeletionTick(next_tick) // Move ahead 6s. We haven't heard back next_tick = now.Add(6 * time.Second) nc.HandleMonitorTick(next_tick, p, nb, out) nc.HandleDeletionTick(next_tick) // This host should now be up for deletion assert.Contains(t, nc.pendingDeletion, vpnIp) assert.Contains(t, nc.hostMap.Hosts, vpnIp) // We heard back this time nc.In(vpnIp) // Move ahead some more next_tick = now.Add(45 * time.Second) nc.HandleMonitorTick(next_tick, p, nb, out) nc.HandleDeletionTick(next_tick) // The host should be evicted assert.NotContains(t, nc.pendingDeletion, vpnIp) assert.Contains(t, nc.hostMap.Hosts, vpnIp) } // Check if we can disconnect the peer. // Validate if the peer's certificate is invalid (expired, etc.) // Disconnect only if disconnectInvalid: true is set. func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) { now := time.Now() l := test.NewLogger() ipNet := net.IPNet{ IP: net.IPv4(172, 1, 1, 2), Mask: net.IPMask{255, 255, 255, 0}, } _, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") _, localrange, _ := net.ParseCIDR("10.1.1.1/24") preferredRanges := []*net.IPNet{localrange} hostMap := NewHostMap(l, "test", vpncidr, preferredRanges) // Generate keys for CA and peer's cert. pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader) caCert := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "ca", NotBefore: now, NotAfter: now.Add(1 * time.Hour), IsCA: true, PublicKey: pubCA, }, } caCert.Sign(privCA) ncp := &cert.NebulaCAPool{ CAs: cert.NewCAPool().CAs, } ncp.CAs["ca"] = &caCert pubCrt, _, _ := ed25519.GenerateKey(rand.Reader) peerCert := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "host", Ips: []*net.IPNet{&ipNet}, Subnets: []*net.IPNet{}, NotBefore: now, NotAfter: now.Add(60 * time.Second), PublicKey: pubCrt, IsCA: false, Issuer: "ca", }, } peerCert.Sign(privCA) cs := &CertState{ rawCertificate: []byte{}, privateKey: []byte{}, certificate: &cert.NebulaCertificate{}, rawCertificateNoKey: []byte{}, } lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})} ifce := &Interface{ hostMap: hostMap, inside: &test.NoopTun{}, outside: &udp.Conn{}, certState: cs, firewall: &Firewall{}, lightHouse: lh, handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig), l: l, disconnectInvalid: true, caPool: ncp, } // Create manager ctx, cancel := context.WithCancel(context.Background()) defer cancel() nc := newConnectionManager(ctx, l, ifce, 5, 10) ifce.connectionManager = nc hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil) hostinfo.ConnectionState = &ConnectionState{ certState: cs, peerCert: &peerCert, H: &noise.HandshakeState{}, } // Move ahead 45s. // Check if to disconnect with invalid certificate. // Should be alive. nextTick := now.Add(45 * time.Second) destroyed := nc.handleInvalidCertificate(nextTick, vpnIp, hostinfo) assert.False(t, destroyed) // Move ahead 61s. // Check if to disconnect with invalid certificate. // Should be disconnected. nextTick = now.Add(61 * time.Second) destroyed = nc.handleInvalidCertificate(nextTick, vpnIp, hostinfo) assert.True(t, destroyed) } nebula-1.6.1+dfsg/connection_state.go000066400000000000000000000040501434072716400176010ustar00rootroot00000000000000package nebula import ( "crypto/rand" "encoding/json" "sync" "sync/atomic" "github.com/flynn/noise" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cert" ) const ReplayWindow = 1024 type ConnectionState struct { eKey *NebulaCipherState dKey *NebulaCipherState H *noise.HandshakeState certState *CertState peerCert *cert.NebulaCertificate initiator bool atomicMessageCounter uint64 window *Bits queueLock sync.Mutex writeLock sync.Mutex ready bool } func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState { cs := noise.NewCipherSuite(noise.DH25519, noise.CipherAESGCM, noise.HashSHA256) if f.cipher == "chachapoly" { cs = noise.NewCipherSuite(noise.DH25519, noise.CipherChaChaPoly, noise.HashSHA256) } curCertState := f.certState static := noise.DHKey{Private: curCertState.privateKey, Public: curCertState.publicKey} b := NewBits(ReplayWindow) // Clear out bit 0, we never transmit it and we don't want it showing as packet loss b.Update(l, 0) hs, err := noise.NewHandshakeState(noise.Config{ CipherSuite: cs, Random: rand.Reader, Pattern: pattern, Initiator: initiator, StaticKeypair: static, PresharedKey: psk, PresharedKeyPlacement: pskStage, }) if err != nil { return nil } // The queue and ready params prevent a counter race that would happen when // sending stored packets and simultaneously accepting new traffic. ci := &ConnectionState{ H: hs, initiator: initiator, window: b, ready: false, certState: curCertState, } return ci } func (cs *ConnectionState) MarshalJSON() ([]byte, error) { return json.Marshal(m{ "certificate": cs.peerCert, "initiator": cs.initiator, "message_counter": atomic.LoadUint64(&cs.atomicMessageCounter), "ready": cs.ready, }) } nebula-1.6.1+dfsg/control.go000066400000000000000000000155261434072716400157340ustar00rootroot00000000000000package nebula import ( "context" "net" "os" "os/signal" "sync/atomic" "syscall" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/udp" ) // Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching // core. This means copying IP objects, slices, de-referencing pointers and taking the actual value, etc type Control struct { f *Interface l *logrus.Logger cancel context.CancelFunc sshStart func() statsStart func() dnsStart func() } type ControlHostInfo struct { VpnIp net.IP `json:"vpnIp"` LocalIndex uint32 `json:"localIndex"` RemoteIndex uint32 `json:"remoteIndex"` RemoteAddrs []*udp.Addr `json:"remoteAddrs"` CachedPackets int `json:"cachedPackets"` Cert *cert.NebulaCertificate `json:"cert"` MessageCounter uint64 `json:"messageCounter"` CurrentRemote *udp.Addr `json:"currentRemote"` CurrentRelaysToMe []iputil.VpnIp `json:"currentRelaysToMe"` CurrentRelaysThroughMe []iputil.VpnIp `json:"currentRelaysThroughMe"` } // Start actually runs nebula, this is a nonblocking call. To block use Control.ShutdownBlock() func (c *Control) Start() { // Activate the interface c.f.activate() // Call all the delayed funcs that waited patiently for the interface to be created. if c.sshStart != nil { go c.sshStart() } if c.statsStart != nil { go c.statsStart() } if c.dnsStart != nil { go c.dnsStart() } // Start reading packets. c.f.run() } // Stop signals nebula to shutdown, returns after the shutdown is complete func (c *Control) Stop() { // Stop the handshakeManager (and other serivces), to prevent new tunnels from // being created while we're shutting them all down. c.cancel() c.CloseAllTunnels(false) if err := c.f.Close(); err != nil { c.l.WithError(err).Error("Close interface failed") } c.l.Info("Goodbye") } // ShutdownBlock will listen for and block on term and interrupt signals, calling Control.Stop() once signalled func (c *Control) ShutdownBlock() { sigChan := make(chan os.Signal) signal.Notify(sigChan, syscall.SIGTERM) signal.Notify(sigChan, syscall.SIGINT) rawSig := <-sigChan sig := rawSig.String() c.l.WithField("signal", sig).Info("Caught signal, shutting down") c.Stop() } // RebindUDPServer asks the UDP listener to rebind it's listener. Mainly used on mobile clients when interfaces change func (c *Control) RebindUDPServer() { _ = c.f.outside.Rebind() // Trigger a lighthouse update, useful for mobile clients that should have an update interval of 0 c.f.lightHouse.SendUpdate(c.f) // Let the main interface know that we rebound so that underlying tunnels know to trigger punches from their remotes c.f.rebindCount++ } // ListHostmap returns details about the actual or pending (handshaking) hostmap func (c *Control) ListHostmap(pendingMap bool) []ControlHostInfo { if pendingMap { return listHostMap(c.f.handshakeManager.pendingHostMap) } else { return listHostMap(c.f.hostMap) } } // GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not found func (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlHostInfo { var hm *HostMap if pending { hm = c.f.handshakeManager.pendingHostMap } else { hm = c.f.hostMap } h, err := hm.QueryVpnIp(vpnIp) if err != nil { return nil } ch := copyHostInfo(h, c.f.hostMap.preferredRanges) return &ch } // SetRemoteForTunnel forces a tunnel to use a specific remote func (c *Control) SetRemoteForTunnel(vpnIp iputil.VpnIp, addr udp.Addr) *ControlHostInfo { hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp) if err != nil { return nil } hostInfo.SetRemote(addr.Copy()) ch := copyHostInfo(hostInfo, c.f.hostMap.preferredRanges) return &ch } // CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well. func (c *Control) CloseTunnel(vpnIp iputil.VpnIp, localOnly bool) bool { hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp) if err != nil { return false } if !localOnly { c.f.send( header.CloseTunnel, 0, hostInfo.ConnectionState, hostInfo, []byte{}, make([]byte, 12, 12), make([]byte, mtu), ) } c.f.closeTunnel(hostInfo) return true } // CloseAllTunnels is just like CloseTunnel except it goes through and shuts them all down, optionally you can avoid shutting down lighthouse tunnels // the int returned is a count of tunnels closed func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) { //TODO: this is probably better as a function in ConnectionManager or HostMap directly lighthouses := c.f.lightHouse.GetLighthouses() shutdown := func(h *HostInfo) { if excludeLighthouses { if _, ok := lighthouses[h.vpnIp]; ok { return } } c.f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu)) c.f.closeTunnel(h) c.l.WithField("vpnIp", h.vpnIp).WithField("udpAddr", h.remote). Debug("Sending close tunnel message") closed++ } // Learn which hosts are being used as relays, so we can shut them down last. relayingHosts := map[iputil.VpnIp]*HostInfo{} // Grab the hostMap lock to access the Relays map c.f.hostMap.Lock() for _, relayingHost := range c.f.hostMap.Relays { relayingHosts[relayingHost.vpnIp] = relayingHost } c.f.hostMap.Unlock() hostInfos := []*HostInfo{} // Grab the hostMap lock to access the Hosts map c.f.hostMap.Lock() for _, relayHost := range c.f.hostMap.Hosts { if _, ok := relayingHosts[relayHost.vpnIp]; !ok { hostInfos = append(hostInfos, relayHost) } } c.f.hostMap.Unlock() for _, h := range hostInfos { shutdown(h) } for _, h := range relayingHosts { shutdown(h) } return } func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo { chi := ControlHostInfo{ VpnIp: h.vpnIp.ToIP(), LocalIndex: h.localIndexId, RemoteIndex: h.remoteIndexId, RemoteAddrs: h.remotes.CopyAddrs(preferredRanges), CachedPackets: len(h.packetStore), CurrentRelaysToMe: h.relayState.CopyRelayIps(), CurrentRelaysThroughMe: h.relayState.CopyRelayForIps(), } if h.ConnectionState != nil { chi.MessageCounter = atomic.LoadUint64(&h.ConnectionState.atomicMessageCounter) } if c := h.GetCert(); c != nil { chi.Cert = c.Copy() } if h.remote != nil { chi.CurrentRemote = h.remote.Copy() } return chi } func listHostMap(hm *HostMap) []ControlHostInfo { hm.RLock() hosts := make([]ControlHostInfo, len(hm.Hosts)) i := 0 for _, v := range hm.Hosts { hosts[i] = copyHostInfo(v, hm.preferredRanges) i++ } hm.RUnlock() return hosts } nebula-1.6.1+dfsg/control_test.go000066400000000000000000000072631434072716400167720ustar00rootroot00000000000000package nebula import ( "net" "reflect" "testing" "time" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/test" "github.com/slackhq/nebula/udp" "github.com/stretchr/testify/assert" ) func TestControl_GetHostInfoByVpnIp(t *testing.T) { l := test.NewLogger() // Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object // To properly ensure we are not exposing core memory to the caller hm := NewHostMap(l, "test", &net.IPNet{}, make([]*net.IPNet, 0)) remote1 := udp.NewAddr(net.ParseIP("0.0.0.100"), 4444) remote2 := udp.NewAddr(net.ParseIP("1:2:3:4:5:6:7:8"), 4444) ipNet := net.IPNet{ IP: net.IPv4(1, 2, 3, 4), Mask: net.IPMask{255, 255, 255, 0}, } ipNet2 := net.IPNet{ IP: net.ParseIP("1:2:3:4:5:6:7:8"), Mask: net.IPMask{255, 255, 255, 0}, } crt := &cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "test", Ips: []*net.IPNet{&ipNet}, Subnets: []*net.IPNet{}, Groups: []string{"default-group"}, NotBefore: time.Unix(1, 0), NotAfter: time.Unix(2, 0), PublicKey: []byte{5, 6, 7, 8}, IsCA: false, Issuer: "the-issuer", InvertedGroups: map[string]struct{}{"default-group": {}}, }, Signature: []byte{1, 2, 1, 2, 1, 3}, } remotes := NewRemoteList() remotes.unlockedPrependV4(0, NewIp4AndPort(remote1.IP, uint32(remote1.Port))) remotes.unlockedPrependV6(0, NewIp6AndPort(remote2.IP, uint32(remote2.Port))) hm.Add(iputil.Ip2VpnIp(ipNet.IP), &HostInfo{ remote: remote1, remotes: remotes, ConnectionState: &ConnectionState{ peerCert: crt, }, remoteIndexId: 200, localIndexId: 201, vpnIp: iputil.Ip2VpnIp(ipNet.IP), relayState: RelayState{ relays: map[iputil.VpnIp]struct{}{}, relayForByIp: map[iputil.VpnIp]*Relay{}, relayForByIdx: map[uint32]*Relay{}, }, }) hm.Add(iputil.Ip2VpnIp(ipNet2.IP), &HostInfo{ remote: remote1, remotes: remotes, ConnectionState: &ConnectionState{ peerCert: nil, }, remoteIndexId: 200, localIndexId: 201, vpnIp: iputil.Ip2VpnIp(ipNet2.IP), relayState: RelayState{ relays: map[iputil.VpnIp]struct{}{}, relayForByIp: map[iputil.VpnIp]*Relay{}, relayForByIdx: map[uint32]*Relay{}, }, }) c := Control{ f: &Interface{ hostMap: hm, }, l: logrus.New(), } thi := c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet.IP), false) expectedInfo := ControlHostInfo{ VpnIp: net.IPv4(1, 2, 3, 4).To4(), LocalIndex: 201, RemoteIndex: 200, RemoteAddrs: []*udp.Addr{remote2, remote1}, CachedPackets: 0, Cert: crt.Copy(), MessageCounter: 0, CurrentRemote: udp.NewAddr(net.ParseIP("0.0.0.100"), 4444), CurrentRelaysToMe: []iputil.VpnIp{}, CurrentRelaysThroughMe: []iputil.VpnIp{}, } // Make sure we don't have any unexpected fields assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "CachedPackets", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi) test.AssertDeepCopyEqual(t, &expectedInfo, thi) // Make sure we don't panic if the host info doesn't have a cert yet assert.NotPanics(t, func() { thi = c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet2.IP), false) }) } func assertFields(t *testing.T, expected []string, actualStruct interface{}) { val := reflect.ValueOf(actualStruct).Elem() fields := make([]string, val.NumField()) for i := 0; i < val.NumField(); i++ { fields[i] = val.Type().Field(i).Name } assert.Equal(t, expected, fields) } nebula-1.6.1+dfsg/control_tester.go000066400000000000000000000106121434072716400173110ustar00rootroot00000000000000//go:build e2e_testing // +build e2e_testing package nebula import ( "net" "github.com/google/gopacket" "github.com/google/gopacket/layers" "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/overlay" "github.com/slackhq/nebula/udp" ) // WaitForTypeByIndex will pipe all messages from this control device into the pipeTo control device // returning after a message matching the criteria has been piped func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) { h := &header.H{} for { p := c.f.outside.Get(true) if err := h.Parse(p.Data); err != nil { panic(err) } pipeTo.InjectUDPPacket(p) if h.Type == msgType && h.Subtype == subType { return } } } // WaitForTypeByIndex is similar to WaitForType except it adds an index check // Useful if you have many nodes communicating and want to wait to find a specific nodes packet func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) { h := &header.H{} for { p := c.f.outside.Get(true) if err := h.Parse(p.Data); err != nil { panic(err) } pipeTo.InjectUDPPacket(p) if h.RemoteIndex == toIndex && h.Type == msgType && h.Subtype == subType { return } } } // InjectLightHouseAddr will push toAddr into the local lighthouse cache for the vpnIp // This is necessary if you did not configure static hosts or are not running a lighthouse func (c *Control) InjectLightHouseAddr(vpnIp net.IP, toAddr *net.UDPAddr) { c.f.lightHouse.Lock() remoteList := c.f.lightHouse.unlockedGetRemoteList(iputil.Ip2VpnIp(vpnIp)) remoteList.Lock() defer remoteList.Unlock() c.f.lightHouse.Unlock() iVpnIp := iputil.Ip2VpnIp(vpnIp) if v4 := toAddr.IP.To4(); v4 != nil { remoteList.unlockedPrependV4(iVpnIp, NewIp4AndPort(v4, uint32(toAddr.Port))) } else { remoteList.unlockedPrependV6(iVpnIp, NewIp6AndPort(toAddr.IP, uint32(toAddr.Port))) } } // InjectRelays will push relayVpnIps into the local lighthouse cache for the vpnIp // This is necessary to inform an initiator of possible relays for communicating with a responder func (c *Control) InjectRelays(vpnIp net.IP, relayVpnIps []net.IP) { c.f.lightHouse.Lock() remoteList := c.f.lightHouse.unlockedGetRemoteList(iputil.Ip2VpnIp(vpnIp)) remoteList.Lock() defer remoteList.Unlock() c.f.lightHouse.Unlock() iVpnIp := iputil.Ip2VpnIp(vpnIp) uVpnIp := []uint32{} for _, rVPnIp := range relayVpnIps { uVpnIp = append(uVpnIp, uint32(iputil.Ip2VpnIp(rVPnIp))) } remoteList.unlockedSetRelay(iVpnIp, iVpnIp, uVpnIp) } // GetFromTun will pull a packet off the tun side of nebula func (c *Control) GetFromTun(block bool) []byte { return c.f.inside.(*overlay.TestTun).Get(block) } // GetFromUDP will pull a udp packet off the udp side of nebula func (c *Control) GetFromUDP(block bool) *udp.Packet { return c.f.outside.Get(block) } func (c *Control) GetUDPTxChan() <-chan *udp.Packet { return c.f.outside.TxPackets } func (c *Control) GetTunTxChan() <-chan []byte { return c.f.inside.(*overlay.TestTun).TxPackets } // InjectUDPPacket will inject a packet into the udp side of nebula func (c *Control) InjectUDPPacket(p *udp.Packet) { c.f.outside.Send(p) } // InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol func (c *Control) InjectTunUDPPacket(toIp net.IP, toPort uint16, fromPort uint16, data []byte) { ip := layers.IPv4{ Version: 4, TTL: 64, Protocol: layers.IPProtocolUDP, SrcIP: c.f.inside.Cidr().IP, DstIP: toIp, } udp := layers.UDP{ SrcPort: layers.UDPPort(fromPort), DstPort: layers.UDPPort(toPort), } err := udp.SetNetworkLayerForChecksum(&ip) if err != nil { panic(err) } buffer := gopacket.NewSerializeBuffer() opt := gopacket.SerializeOptions{ ComputeChecksums: true, FixLengths: true, } err = gopacket.SerializeLayers(buffer, opt, &ip, &udp, gopacket.Payload(data)) if err != nil { panic(err) } c.f.inside.(*overlay.TestTun).Send(buffer.Bytes()) } func (c *Control) GetVpnIp() iputil.VpnIp { return c.f.myVpnIp } func (c *Control) GetUDPAddr() string { return c.f.outside.Addr.String() } func (c *Control) KillPendingTunnel(vpnIp net.IP) bool { hostinfo, ok := c.f.handshakeManager.pendingHostMap.Hosts[iputil.Ip2VpnIp(vpnIp)] if !ok { return false } c.f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo) return true } nebula-1.6.1+dfsg/dist/000077500000000000000000000000001434072716400146575ustar00rootroot00000000000000nebula-1.6.1+dfsg/dist/arch/000077500000000000000000000000001434072716400155745ustar00rootroot00000000000000nebula-1.6.1+dfsg/dist/arch/nebula.service000066400000000000000000000004571434072716400204320ustar00rootroot00000000000000[Unit] Description=nebula Wants=basic.target network-online.target After=basic.target network.target network-online.target [Service] SyslogIdentifier=nebula ExecReload=/bin/kill -HUP $MAINPID ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml Restart=always [Install] WantedBy=multi-user.target nebula-1.6.1+dfsg/dist/fedora/000077500000000000000000000000001434072716400161175ustar00rootroot00000000000000nebula-1.6.1+dfsg/dist/fedora/nebula.service000066400000000000000000000005341434072716400207510ustar00rootroot00000000000000[Unit] Description=Nebula overlay networking tool After=basic.target network.target network-online.target Before=sshd.service Wants=basic.target network-online.target [Service] ExecReload=/bin/kill -HUP $MAINPID ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml Restart=always SyslogIdentifier=nebula [Install] WantedBy=multi-user.target nebula-1.6.1+dfsg/dist/wireshark/000077500000000000000000000000001434072716400166565ustar00rootroot00000000000000nebula-1.6.1+dfsg/dist/wireshark/nebula.lua000066400000000000000000000107061434072716400206330ustar00rootroot00000000000000local nebula = Proto("nebula", "nebula") local default_settings = { port = 4242, all_ports = false, } nebula.prefs.port = Pref.uint("Port number", default_settings.port, "The UDP port number for Nebula") nebula.prefs.all_ports = Pref.bool("All ports", default_settings.all_ports, "Assume nebula packets on any port, useful when dealing with hole punching") local pf_version = ProtoField.new("version", "nebula.version", ftypes.UINT8, nil, base.DEC, 0xF0) local pf_type = ProtoField.new("type", "nebula.type", ftypes.UINT8, { [0] = "handshake", [1] = "message", [2] = "recvError", [3] = "lightHouse", [4] = "test", [5] = "closeTunnel", }, base.DEC, 0x0F) local pf_subtype = ProtoField.new("subtype", "nebula.subtype", ftypes.UINT8, nil, base.DEC) local pf_subtype_test = ProtoField.new("subtype", "nebula.subtype", ftypes.UINT8, { [0] = "request", [1] = "reply", }, base.DEC) local pf_subtype_handshake = ProtoField.new("subtype", "nebula.subtype", ftypes.UINT8, { [0] = "ix_psk0", }, base.DEC) local pf_reserved = ProtoField.new("reserved", "nebula.reserved", ftypes.UINT16, nil, base.HEX) local pf_remote_index = ProtoField.new("remote index", "nebula.remote_index", ftypes.UINT32, nil, base.DEC) local pf_message_counter = ProtoField.new("counter", "nebula.counter", ftypes.UINT64, nil, base.DEC) local pf_payload = ProtoField.new("payload", "nebula.payload", ftypes.BYTES, nil, base.NONE) nebula.fields = { pf_version, pf_type, pf_subtype, pf_subtype_handshake, pf_subtype_test, pf_reserved, pf_remote_index, pf_message_counter, pf_payload } local ef_holepunch = ProtoExpert.new("nebula.holepunch.expert", "Nebula hole punch packet", expert.group.PROTOCOL, expert.severity.NOTE) local ef_punchy = ProtoExpert.new("nebula.punchy.expert", "Nebula punchy keepalive packet", expert.group.PROTOCOL, expert.severity.NOTE) nebula.experts = { ef_holepunch, ef_punchy } local type_field = Field.new("nebula.type") local subtype_field = Field.new("nebula.subtype") function nebula.dissector(tvbuf, pktinfo, root) -- set the protocol column to show our protocol name pktinfo.cols.protocol:set("NEBULA") local pktlen = tvbuf:reported_length_remaining() local tree = root:add(nebula, tvbuf:range(0,pktlen)) if pktlen == 0 then tree:add_proto_expert_info(ef_holepunch) pktinfo.cols.info:append(" (holepunch)") return elseif pktlen == 1 then tree:add_proto_expert_info(ef_punchy) pktinfo.cols.info:append(" (punchy)") return end tree:add(pf_version, tvbuf:range(0,1)) local type = tree:add(pf_type, tvbuf:range(0,1)) local nebula_type = bit32.band(tvbuf:range(0,1):uint(), 0x0F) if nebula_type == 0 then local stage = tvbuf(8,8):uint64() tree:add(pf_subtype_handshake, tvbuf:range(1,1)) type:append_text(" stage " .. stage) pktinfo.cols.info:append(" (" .. type_field().display .. ", stage " .. stage .. ", " .. subtype_field().display .. ")") elseif nebula_type == 4 then tree:add(pf_subtype_test, tvbuf:range(1,1)) pktinfo.cols.info:append(" (" .. type_field().display .. ", " .. subtype_field().display .. ")") else tree:add(pf_subtype, tvbuf:range(1,1)) pktinfo.cols.info:append(" (" .. type_field().display .. ")") end tree:add(pf_reserved, tvbuf:range(2,2)) tree:add(pf_remote_index, tvbuf:range(4,4)) tree:add(pf_message_counter, tvbuf:range(8,8)) tree:add(pf_payload, tvbuf:range(16,tvbuf:len() - 16)) end function nebula.prefs_changed() if default_settings.all_ports == nebula.prefs.all_ports and default_settings.port == nebula.prefs.port then -- Nothing changed, bail return end -- Remove our old dissector DissectorTable.get("udp.port"):remove_all(nebula) if nebula.prefs.all_ports and default_settings.all_ports ~= nebula.prefs.all_ports then default_settings.all_port = nebula.prefs.all_ports for i=0, 65535 do DissectorTable.get("udp.port"):add(i, nebula) end -- no need to establish again on specific ports return end if default_settings.all_ports ~= nebula.prefs.all_ports then -- Add our new port dissector default_settings.port = nebula.prefs.port DissectorTable.get("udp.port"):add(default_settings.port, nebula) end end DissectorTable.get("udp.port"):add(default_settings.port, nebula) nebula-1.6.1+dfsg/dns_server.go000066400000000000000000000067041434072716400164240ustar00rootroot00000000000000package nebula import ( "fmt" "net" "strconv" "sync" "github.com/miekg/dns" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/iputil" ) // This whole thing should be rewritten to use context var dnsR *dnsRecords var dnsServer *dns.Server var dnsAddr string type dnsRecords struct { sync.RWMutex dnsMap map[string]string hostMap *HostMap } func newDnsRecords(hostMap *HostMap) *dnsRecords { return &dnsRecords{ dnsMap: make(map[string]string), hostMap: hostMap, } } func (d *dnsRecords) Query(data string) string { d.RLock() if r, ok := d.dnsMap[data]; ok { d.RUnlock() return r } d.RUnlock() return "" } func (d *dnsRecords) QueryCert(data string) string { ip := net.ParseIP(data[:len(data)-1]) if ip == nil { return "" } iip := iputil.Ip2VpnIp(ip) hostinfo, err := d.hostMap.QueryVpnIp(iip) if err != nil { return "" } q := hostinfo.GetCert() if q == nil { return "" } cert := q.Details c := fmt.Sprintf("\"Name: %s\" \"Ips: %s\" \"Subnets %s\" \"Groups %s\" \"NotBefore %s\" \"NotAFter %s\" \"PublicKey %x\" \"IsCA %t\" \"Issuer %s\"", cert.Name, cert.Ips, cert.Subnets, cert.Groups, cert.NotBefore, cert.NotAfter, cert.PublicKey, cert.IsCA, cert.Issuer) return c } func (d *dnsRecords) Add(host, data string) { d.Lock() d.dnsMap[host] = data d.Unlock() } func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) { for _, q := range m.Question { switch q.Qtype { case dns.TypeA: l.Debugf("Query for A %s", q.Name) ip := dnsR.Query(q.Name) if ip != "" { rr, err := dns.NewRR(fmt.Sprintf("%s A %s", q.Name, ip)) if err == nil { m.Answer = append(m.Answer, rr) } } case dns.TypeTXT: a, _, _ := net.SplitHostPort(w.RemoteAddr().String()) b := net.ParseIP(a) // We don't answer these queries from non nebula nodes or localhost //l.Debugf("Does %s contain %s", b, dnsR.hostMap.vpnCIDR) if !dnsR.hostMap.vpnCIDR.Contains(b) && a != "127.0.0.1" { return } l.Debugf("Query for TXT %s", q.Name) ip := dnsR.QueryCert(q.Name) if ip != "" { rr, err := dns.NewRR(fmt.Sprintf("%s TXT %s", q.Name, ip)) if err == nil { m.Answer = append(m.Answer, rr) } } } } } func handleDnsRequest(l *logrus.Logger, w dns.ResponseWriter, r *dns.Msg) { m := new(dns.Msg) m.SetReply(r) m.Compress = false switch r.Opcode { case dns.OpcodeQuery: parseQuery(l, m, w) } w.WriteMsg(m) } func dnsMain(l *logrus.Logger, hostMap *HostMap, c *config.C) func() { dnsR = newDnsRecords(hostMap) // attach request handler func dns.HandleFunc(".", func(w dns.ResponseWriter, r *dns.Msg) { handleDnsRequest(l, w, r) }) c.RegisterReloadCallback(func(c *config.C) { reloadDns(l, c) }) return func() { startDns(l, c) } } func getDnsServerAddr(c *config.C) string { return c.GetString("lighthouse.dns.host", "") + ":" + strconv.Itoa(c.GetInt("lighthouse.dns.port", 53)) } func startDns(l *logrus.Logger, c *config.C) { dnsAddr = getDnsServerAddr(c) dnsServer = &dns.Server{Addr: dnsAddr, Net: "udp"} l.WithField("dnsListener", dnsAddr).Info("Starting DNS responder") err := dnsServer.ListenAndServe() defer dnsServer.Shutdown() if err != nil { l.Errorf("Failed to start server: %s\n ", err.Error()) } } func reloadDns(l *logrus.Logger, c *config.C) { if dnsAddr == getDnsServerAddr(c) { l.Debug("No DNS server config change detected") return } l.Debug("Restarting DNS server") dnsServer.Shutdown() go startDns(l, c) } nebula-1.6.1+dfsg/dns_server_test.go000066400000000000000000000004701434072716400174550ustar00rootroot00000000000000package nebula import ( "testing" "github.com/miekg/dns" ) func TestParsequery(t *testing.T) { //TODO: This test is basically pointless hostMap := &HostMap{} ds := newDnsRecords(hostMap) ds.Add("test.com.com", "1.2.3.4") m := new(dns.Msg) m.SetQuestion("test.com.com", dns.TypeA) //parseQuery(m) } nebula-1.6.1+dfsg/e2e/000077500000000000000000000000001434072716400143675ustar00rootroot00000000000000nebula-1.6.1+dfsg/e2e/doc.go000066400000000000000000000001721434072716400154630ustar00rootroot00000000000000package e2e // This file exists to allow `go fmt` to traverse here on its own. The build tags were keeping it out before nebula-1.6.1+dfsg/e2e/handshakes_test.go000066400000000000000000000227001434072716400200670ustar00rootroot00000000000000//go:build e2e_testing // +build e2e_testing package e2e import ( "net" "testing" "time" "github.com/slackhq/nebula" "github.com/slackhq/nebula/e2e/router" "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/udp" "github.com/stretchr/testify/assert" ) func BenchmarkHotPath(b *testing.B) { ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) myControl, _, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil) theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) // Put their info in our lighthouse myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr) // Start the servers myControl.Start() theirControl.Start() r := router.NewR(b, myControl, theirControl) r.CancelFlowLogs() for n := 0; n < b.N; n++ { myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me")) _ = r.RouteForAllUntilTxTun(theirControl) } myControl.Stop() theirControl.Stop() } func TestGoodHandshake(t *testing.T) { ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil) theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) // Put their info in our lighthouse myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr) // Start the servers myControl.Start() theirControl.Start() t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side") myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me")) t.Log("Have them consume my stage 0 packet. They have a tunnel now") theirControl.InjectUDPPacket(myControl.GetFromUDP(true)) t.Log("Get their stage 1 packet so that we can play with it") stage1Packet := theirControl.GetFromUDP(true) t.Log("I consume a garbage packet with a proper nebula header for our tunnel") // this should log a statement and get ignored, allowing the real handshake packet to complete the tunnel badPacket := stage1Packet.Copy() badPacket.Data = badPacket.Data[:len(badPacket.Data)-header.Len] myControl.InjectUDPPacket(badPacket) t.Log("Have me consume their real stage 1 packet. I have a tunnel now") myControl.InjectUDPPacket(stage1Packet) t.Log("Wait until we see my cached packet come through") myControl.WaitForType(1, 0, theirControl) t.Log("Make sure our host infos are correct") assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl) t.Log("Get that cached packet and make sure it looks right") myCachedPacket := theirControl.GetFromTun(true) assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80) t.Log("Do a bidirectional tunnel test") r := router.NewR(t, myControl, theirControl) defer r.RenderFlow() assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r) myControl.Stop() theirControl.Stop() //TODO: assert hostmaps } func TestWrongResponderHandshake(t *testing.T) { ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) // The IPs here are chosen on purpose: // The current remote handling will sort by preference, public, and then lexically. // So we need them to have a higher address than evil (we could apply a preference though) myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 100}, nil) theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 99}, nil) evilControl, evilVpnIp, evilUdpAddr := newSimpleServer(ca, caKey, "evil", net.IP{10, 0, 0, 2}, nil) // Add their real udp addr, which should be tried after evil. myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr) // Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse. myControl.InjectLightHouseAddr(theirVpnIp, evilUdpAddr) // Build a router so we don't have to reason who gets which packet r := router.NewR(t, myControl, theirControl, evilControl) defer r.RenderFlow() // Start the servers myControl.Start() theirControl.Start() evilControl.Start() t.Log("Start the handshake process, we will route until we see our cached packet get sent to them") myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me")) r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType { h := &header.H{} err := h.Parse(p.Data) if err != nil { panic(err) } if p.ToIp.Equal(theirUdpAddr.IP) && p.ToPort == uint16(theirUdpAddr.Port) && h.Type == 1 { return router.RouteAndExit } return router.KeepRouting }) //TODO: Assert pending hostmap - I should have a correct hostinfo for them now t.Log("My cached packet should be received by them") myCachedPacket := theirControl.GetFromTun(true) assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80) t.Log("Test the tunnel with them") assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl) assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r) t.Log("Flush all packets from all controllers") r.FlushAll() t.Log("Ensure ensure I don't have any hostinfo artifacts from evil") assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp), true), "My pending hostmap should not contain evil") assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp), false), "My main hostmap should not contain evil") //NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete //TODO: assert hostmaps for everyone t.Log("Success!") myControl.Stop() theirControl.Stop() } func Test_Case1_Stage1Race(t *testing.T) { ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil) theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) // Put their info in our lighthouse and vice versa myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr) theirControl.InjectLightHouseAddr(myVpnIp, myUdpAddr) // Build a router so we don't have to reason who gets which packet r := router.NewR(t, myControl, theirControl) defer r.RenderFlow() // Start the servers myControl.Start() theirControl.Start() t.Log("Trigger a handshake to start on both me and them") myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me")) theirControl.InjectTunUDPPacket(myVpnIp, 80, 80, []byte("Hi from them")) t.Log("Get both stage 1 handshake packets") myHsForThem := myControl.GetFromUDP(true) theirHsForMe := theirControl.GetFromUDP(true) r.Log("Now inject both stage 1 handshake packets") r.InjectUDPPacket(theirControl, myControl, theirHsForMe) r.InjectUDPPacket(myControl, theirControl, myHsForThem) //TODO: they should win, grab their index for me and make sure I use it in the end. r.Log("They should not have a stage 2 (won the race) but I should send one") r.InjectUDPPacket(myControl, theirControl, myControl.GetFromUDP(true)) r.Log("Route for me until I send a message packet to them") r.RouteForAllUntilAfterMsgTypeTo(theirControl, header.Message, header.MessageNone) t.Log("My cached packet should be received by them") myCachedPacket := theirControl.GetFromTun(true) assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80) t.Log("Route for them until I send a message packet to me") theirControl.WaitForType(1, 0, myControl) t.Log("Their cached packet should be received by me") theirCachedPacket := myControl.GetFromTun(true) assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIp, myVpnIp, 80, 80) t.Log("Do a bidirectional tunnel test") assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r) myControl.Stop() theirControl.Stop() //TODO: assert hostmaps } func TestRelays(t *testing.T) { ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) myControl, myVpnIp, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}}) relayControl, relayVpnIp, relayUdpAddr := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}}) theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}}) // Teach my how to get to the relay and that their can be reached via the relay myControl.InjectLightHouseAddr(relayVpnIp, relayUdpAddr) myControl.InjectRelays(theirVpnIp, []net.IP{relayVpnIp}) relayControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr) // Build a router so we don't have to reason who gets which packet r := router.NewR(t, myControl, relayControl, theirControl) defer r.RenderFlow() // Start the servers myControl.Start() relayControl.Start() theirControl.Start() t.Log("Trigger a handshake from me to them via the relay") myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me")) p := r.RouteForAllUntilTxTun(theirControl) assertUdpPacket(t, []byte("Hi from me"), p, myVpnIp, theirVpnIp, 80, 80) //TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it } //TODO: add a test with many lies nebula-1.6.1+dfsg/e2e/helpers_test.go000066400000000000000000000210701434072716400174170ustar00rootroot00000000000000//go:build e2e_testing // +build e2e_testing package e2e import ( "crypto/rand" "fmt" "io" "net" "os" "testing" "time" "github.com/google/gopacket" "github.com/google/gopacket/layers" "github.com/imdario/mergo" "github.com/sirupsen/logrus" "github.com/slackhq/nebula" "github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/e2e/router" "github.com/slackhq/nebula/iputil" "github.com/stretchr/testify/assert" "golang.org/x/crypto/curve25519" "golang.org/x/crypto/ed25519" "gopkg.in/yaml.v2" ) type m map[string]interface{} // newSimpleServer creates a nebula instance with many assumptions func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, net.IP, *net.UDPAddr) { l := NewTestLogger() vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}} copy(vpnIpNet.IP, udpIp) vpnIpNet.IP[1] += 128 udpAddr := net.UDPAddr{ IP: udpIp, Port: 4242, } _, _, myPrivKey, myPEM := newTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{}) caB, err := caCrt.MarshalToPEM() if err != nil { panic(err) } mc := m{ "pki": m{ "ca": string(caB), "cert": string(myPEM), "key": string(myPrivKey), }, //"tun": m{"disabled": true}, "firewall": m{ "outbound": []m{{ "proto": "any", "port": "any", "host": "any", }}, "inbound": []m{{ "proto": "any", "port": "any", "host": "any", }}, }, //"handshakes": m{ // "try_interval": "1s", //}, "listen": m{ "host": udpAddr.IP.String(), "port": udpAddr.Port, }, "logging": m{ "timestamp_format": fmt.Sprintf("%v 15:04:05.000000", name), "level": l.Level.String(), }, } if overrides != nil { err = mergo.Merge(&overrides, mc, mergo.WithAppendSlice) if err != nil { panic(err) } mc = overrides } cb, err := yaml.Marshal(mc) if err != nil { panic(err) } c := config.NewC(l) c.LoadString(string(cb)) control, err := nebula.Main(c, false, "e2e-test", l, nil) if err != nil { panic(err) } return control, vpnIpNet.IP, &udpAddr } // newTestCaCert will generate a CA cert func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) { pub, priv, err := ed25519.GenerateKey(rand.Reader) if before.IsZero() { before = time.Now().Add(time.Second * -60).Round(time.Second) } if after.IsZero() { after = time.Now().Add(time.Second * 60).Round(time.Second) } nc := &cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "test ca", NotBefore: time.Unix(before.Unix(), 0), NotAfter: time.Unix(after.Unix(), 0), PublicKey: pub, IsCA: true, InvertedGroups: make(map[string]struct{}), }, } if len(ips) > 0 { nc.Details.Ips = ips } if len(subnets) > 0 { nc.Details.Subnets = subnets } if len(groups) > 0 { nc.Details.Groups = groups } err = nc.Sign(priv) if err != nil { panic(err) } pem, err := nc.MarshalToPEM() if err != nil { panic(err) } return nc, pub, priv, pem } // newTestCert will generate a signed certificate with the provided details. // Expiry times are defaulted if you do not pass them in func newTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip *net.IPNet, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) { issuer, err := ca.Sha256Sum() if err != nil { panic(err) } if before.IsZero() { before = time.Now().Add(time.Second * -60).Round(time.Second) } if after.IsZero() { after = time.Now().Add(time.Second * 60).Round(time.Second) } pub, rawPriv := x25519Keypair() nc := &cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: name, Ips: []*net.IPNet{ip}, Subnets: subnets, Groups: groups, NotBefore: time.Unix(before.Unix(), 0), NotAfter: time.Unix(after.Unix(), 0), PublicKey: pub, IsCA: false, Issuer: issuer, InvertedGroups: make(map[string]struct{}), }, } err = nc.Sign(key) if err != nil { panic(err) } pem, err := nc.MarshalToPEM() if err != nil { panic(err) } return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem } func x25519Keypair() ([]byte, []byte) { privkey := make([]byte, 32) if _, err := io.ReadFull(rand.Reader, privkey); err != nil { panic(err) } pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint) if err != nil { panic(err) } return pubkey, privkey } type doneCb func() func deadline(t *testing.T, seconds time.Duration) doneCb { timeout := time.After(seconds * time.Second) done := make(chan bool) go func() { select { case <-timeout: t.Fatal("Test did not finish in time") case <-done: } }() return func() { done <- true } } func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) { // Send a packet from them to me controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B")) bPacket := r.RouteUntilTxTun(controlB, controlA) assertUdpPacket(t, []byte("Hi from B"), bPacket, vpnIpB, vpnIpA, 90, 80) // And once more from me to them controlA.InjectTunUDPPacket(vpnIpB, 80, 90, []byte("Hello from A")) aPacket := r.RouteUntilTxTun(controlA, controlB) assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80) } func assertHostInfoPair(t *testing.T, addrA, addrB *net.UDPAddr, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control) { // Get both host infos hBinA := controlA.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpB), false) assert.NotNil(t, hBinA, "Host B was not found by vpnIp in controlA") hAinB := controlB.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpA), false) assert.NotNil(t, hAinB, "Host A was not found by vpnIp in controlB") // Check that both vpn and real addr are correct assert.Equal(t, vpnIpB, hBinA.VpnIp, "Host B VpnIp is wrong in control A") assert.Equal(t, vpnIpA, hAinB.VpnIp, "Host A VpnIp is wrong in control B") assert.Equal(t, addrB.IP.To16(), hBinA.CurrentRemote.IP.To16(), "Host B remote ip is wrong in control A") assert.Equal(t, addrA.IP.To16(), hAinB.CurrentRemote.IP.To16(), "Host A remote ip is wrong in control B") assert.Equal(t, addrB.Port, int(hBinA.CurrentRemote.Port), "Host B remote port is wrong in control A") assert.Equal(t, addrA.Port, int(hAinB.CurrentRemote.Port), "Host A remote port is wrong in control B") // Check that our indexes match assert.Equal(t, hBinA.LocalIndex, hAinB.RemoteIndex, "Host B local index does not match host A remote index") assert.Equal(t, hBinA.RemoteIndex, hAinB.LocalIndex, "Host B remote index does not match host A local index") //TODO: Would be nice to assert this memory //checkIndexes := func(name string, hm *HostMap, hi *HostInfo) { // hBbyIndex := hmA.Indexes[hBinA.localIndexId] // assert.NotNil(t, hBbyIndex, "Could not host info by local index in %s", name) // assert.Equal(t, &hBbyIndex, &hBinA, "%s Indexes map did not point to the right host info", name) // // //TODO: remote indexes are susceptible to collision // hBbyRemoteIndex := hmA.RemoteIndexes[hBinA.remoteIndexId] // assert.NotNil(t, hBbyIndex, "Could not host info by remote index in %s", name) // assert.Equal(t, &hBbyRemoteIndex, &hBinA, "%s RemoteIndexes did not point to the right host info", name) //} // //// Check hostmap indexes too //checkIndexes("hmA", hmA, hBinA) //checkIndexes("hmB", hmB, hAinB) } func assertUdpPacket(t *testing.T, expected, b []byte, fromIp, toIp net.IP, fromPort, toPort uint16) { packet := gopacket.NewPacket(b, layers.LayerTypeIPv4, gopacket.Lazy) v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4) assert.NotNil(t, v4, "No ipv4 data found") assert.Equal(t, fromIp, v4.SrcIP, "Source ip was incorrect") assert.Equal(t, toIp, v4.DstIP, "Dest ip was incorrect") udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP) assert.NotNil(t, udp, "No udp data found") assert.Equal(t, fromPort, uint16(udp.SrcPort), "Source port was incorrect") assert.Equal(t, toPort, uint16(udp.DstPort), "Dest port was incorrect") data := packet.ApplicationLayer() assert.NotNil(t, data) assert.Equal(t, expected, data.Payload(), "Data was incorrect") } func NewTestLogger() *logrus.Logger { l := logrus.New() v := os.Getenv("TEST_LOGS") if v == "" { l.SetOutput(io.Discard) l.SetLevel(logrus.PanicLevel) return l } switch v { case "2": l.SetLevel(logrus.DebugLevel) case "3": l.SetLevel(logrus.TraceLevel) default: l.SetLevel(logrus.InfoLevel) } return l } nebula-1.6.1+dfsg/e2e/router/000077500000000000000000000000001434072716400157075ustar00rootroot00000000000000nebula-1.6.1+dfsg/e2e/router/doc.go000066400000000000000000000001751434072716400170060ustar00rootroot00000000000000package router // This file exists to allow `go fmt` to traverse here on its own. The build tags were keeping it out before nebula-1.6.1+dfsg/e2e/router/router.go000066400000000000000000000404301434072716400175570ustar00rootroot00000000000000//go:build e2e_testing // +build e2e_testing package router import ( "context" "fmt" "net" "os" "path/filepath" "reflect" "strconv" "strings" "sync" "testing" "time" "github.com/google/gopacket" "github.com/google/gopacket/layers" "github.com/slackhq/nebula" "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/udp" ) type R struct { // Simple map of the ip:port registered on a control to the control // Basically a router, right? controls map[string]*nebula.Control // A map for inbound packets for a control that doesn't know about this address inNat map[string]*nebula.Control // A last used map, if an inbound packet hit the inNat map then // all return packets should use the same last used inbound address for the outbound sender // map[from address + ":" + to address] => ip:port to rewrite in the udp packet to receiver outNat map[string]net.UDPAddr // A map of vpn ip to the nebula control it belongs to vpnControls map[iputil.VpnIp]*nebula.Control flow []flowEntry // All interactions are locked to help serialize behavior sync.Mutex fn string cancelRender context.CancelFunc t testing.TB } type flowEntry struct { note string packet *packet } type packet struct { from *nebula.Control to *nebula.Control packet *udp.Packet tun bool // a packet pulled off a tun device rx bool // the packet was received by a udp device } func (p *packet) WasReceived() { if p != nil { p.rx = true } } type ExitType int const ( // KeepRouting the function will get called again on the next packet KeepRouting ExitType = 0 // ExitNow does not route this packet and exits immediately ExitNow ExitType = 1 // RouteAndExit routes this packet and exits immediately afterwards RouteAndExit ExitType = 2 ) type ExitFunc func(packet *udp.Packet, receiver *nebula.Control) ExitType // NewR creates a new router to pass packets in a controlled fashion between the provided controllers. // The packet flow will be recorded in a file within the mermaid directory under the same name as the test. // Renders will occur automatically, roughly every 100ms, until a call to RenderFlow() is made func NewR(t testing.TB, controls ...*nebula.Control) *R { ctx, cancel := context.WithCancel(context.Background()) if err := os.MkdirAll("mermaid", 0755); err != nil { panic(err) } r := &R{ controls: make(map[string]*nebula.Control), vpnControls: make(map[iputil.VpnIp]*nebula.Control), inNat: make(map[string]*nebula.Control), outNat: make(map[string]net.UDPAddr), flow: []flowEntry{}, fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())), t: t, cancelRender: cancel, } // Try to remove our render file os.Remove(r.fn) for _, c := range controls { addr := c.GetUDPAddr() if _, ok := r.controls[addr]; ok { panic("Duplicate listen address: " + addr) } r.vpnControls[c.GetVpnIp()] = c r.controls[addr] = c } // Spin the renderer in case we go nuts and the test never completes go func() { clockSource := time.NewTicker(time.Millisecond * 100) defer clockSource.Stop() for { select { case <-ctx.Done(): return case <-clockSource.C: r.renderFlow() } } }() return r } // AddRoute will place the nebula controller at the ip and port specified. // It does not look at the addr attached to the instance. // If a route is used, this will behave like a NAT for the return path. // Rewriting the source ip:port to what was last sent to from the origin func (r *R) AddRoute(ip net.IP, port uint16, c *nebula.Control) { r.Lock() defer r.Unlock() inAddr := net.JoinHostPort(ip.String(), fmt.Sprintf("%v", port)) if _, ok := r.inNat[inAddr]; ok { panic("Duplicate listen address inNat: " + inAddr) } r.inNat[inAddr] = c } // RenderFlow renders the packet flow seen up until now and stops further automatic renders from happening. func (r *R) RenderFlow() { r.cancelRender() r.renderFlow() } // CancelFlowLogs stops flow logs from being tracked and destroys any logs already collected func (r *R) CancelFlowLogs() { r.cancelRender() r.flow = nil } func (r *R) renderFlow() { if r.flow == nil { return } f, err := os.OpenFile(r.fn, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644) if err != nil { panic(err) } var participants = map[string]struct{}{} var participantsVals []string fmt.Fprintln(f, "```mermaid") fmt.Fprintln(f, "sequenceDiagram") // Assemble participants for _, e := range r.flow { if e.packet == nil { continue } addr := e.packet.from.GetUDPAddr() if _, ok := participants[addr]; ok { continue } participants[addr] = struct{}{} sanAddr := strings.Replace(addr, ":", "#58;", 1) participantsVals = append(participantsVals, sanAddr) fmt.Fprintf( f, " participant %s as Nebula: %s
UDP: %s\n", sanAddr, e.packet.from.GetVpnIp(), sanAddr, ) } // Print packets h := &header.H{} for _, e := range r.flow { if e.packet == nil { fmt.Fprintf(f, " note over %s: %s\n", strings.Join(participantsVals, ", "), e.note) continue } p := e.packet if p.tun { fmt.Fprintln(f, r.formatUdpPacket(p)) } else { if err := h.Parse(p.packet.Data); err != nil { panic(err) } line := "--x" if p.rx { line = "->>" } fmt.Fprintf(f, " %s%s%s: %s(%s), counter: %v\n", strings.Replace(p.from.GetUDPAddr(), ":", "#58;", 1), line, strings.Replace(p.to.GetUDPAddr(), ":", "#58;", 1), h.TypeName(), h.SubTypeName(), h.MessageCounter, ) } } fmt.Fprintln(f, "```") } // InjectFlow can be used to record packet flow if the test is handling the routing on its own. // The packet is assumed to have been received func (r *R) InjectFlow(from, to *nebula.Control, p *udp.Packet) { r.Lock() defer r.Unlock() r.unlockedInjectFlow(from, to, p, false) } func (r *R) Log(arg ...any) { if r.flow == nil { return } r.Lock() r.flow = append(r.flow, flowEntry{note: fmt.Sprint(arg...)}) r.t.Log(arg...) r.Unlock() } func (r *R) Logf(format string, arg ...any) { if r.flow == nil { return } r.Lock() r.flow = append(r.flow, flowEntry{note: fmt.Sprintf(format, arg...)}) r.t.Logf(format, arg...) r.Unlock() } // unlockedInjectFlow is used by the router to record a packet has been transmitted, the packet is returned and // should be marked as received AFTER it has been placed on the receivers channel. // If flow logs have been disabled this function will return nil func (r *R) unlockedInjectFlow(from, to *nebula.Control, p *udp.Packet, tun bool) *packet { if r.flow == nil { return nil } fp := &packet{ from: from, to: to, packet: p.Copy(), tun: tun, } r.flow = append(r.flow, flowEntry{packet: fp}) return fp } // OnceFrom will route a single packet from sender then return // If the router doesn't have the nebula controller for that address, we panic func (r *R) OnceFrom(sender *nebula.Control) { r.RouteExitFunc(sender, func(*udp.Packet, *nebula.Control) ExitType { return RouteAndExit }) } // RouteUntilTxTun will route for sender and return when a packet is seen on receivers tun // If the router doesn't have the nebula controller for that address, we panic func (r *R) RouteUntilTxTun(sender *nebula.Control, receiver *nebula.Control) []byte { tunTx := receiver.GetTunTxChan() udpTx := sender.GetUDPTxChan() for { select { // Maybe we already have something on the tun for us case b := <-tunTx: r.Lock() np := udp.Packet{Data: make([]byte, len(b))} copy(np.Data, b) r.unlockedInjectFlow(receiver, receiver, &np, true) r.Unlock() return b // Nope, lets push the sender along case p := <-udpTx: outAddr := sender.GetUDPAddr() r.Lock() inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort)) c := r.getControl(outAddr, inAddr, p) if c == nil { r.Unlock() panic("No control for udp tx") } fp := r.unlockedInjectFlow(sender, c, p, false) c.InjectUDPPacket(p) fp.WasReceived() r.Unlock() } } } // RouteForAllUntilTxTun will route for everyone and return when a packet is seen on receivers tun // If the router doesn't have the nebula controller for that address, we panic func (r *R) RouteForAllUntilTxTun(receiver *nebula.Control) []byte { sc := make([]reflect.SelectCase, len(r.controls)+1) cm := make([]*nebula.Control, len(r.controls)+1) i := 0 sc[i] = reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(receiver.GetTunTxChan()), Send: reflect.Value{}, } cm[i] = receiver i++ for _, c := range r.controls { sc[i] = reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(c.GetUDPTxChan()), Send: reflect.Value{}, } cm[i] = c i++ } for { x, rx, _ := reflect.Select(sc) r.Lock() if x == 0 { // we are the tun tx, we can exit p := rx.Interface().([]byte) np := udp.Packet{Data: make([]byte, len(p))} copy(np.Data, p) r.unlockedInjectFlow(cm[x], cm[x], &np, true) r.Unlock() return p } else { // we are a udp tx, route and continue p := rx.Interface().(*udp.Packet) outAddr := cm[x].GetUDPAddr() inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort)) c := r.getControl(outAddr, inAddr, p) if c == nil { r.Unlock() panic("No control for udp tx") } fp := r.unlockedInjectFlow(cm[x], c, p, false) c.InjectUDPPacket(p) fp.WasReceived() } r.Unlock() } } // RouteExitFunc will call the whatDo func with each udp packet from sender. // whatDo can return: // - exitNow: the packet will not be routed and this call will return immediately // - routeAndExit: this call will return immediately after routing the last packet from sender // - keepRouting: the packet will be routed and whatDo will be called again on the next packet from sender func (r *R) RouteExitFunc(sender *nebula.Control, whatDo ExitFunc) { h := &header.H{} for { p := sender.GetFromUDP(true) r.Lock() if err := h.Parse(p.Data); err != nil { panic(err) } outAddr := sender.GetUDPAddr() inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort)) receiver := r.getControl(outAddr, inAddr, p) if receiver == nil { r.Unlock() panic("Can't route for host: " + inAddr) } e := whatDo(p, receiver) switch e { case ExitNow: r.Unlock() return case RouteAndExit: fp := r.unlockedInjectFlow(sender, receiver, p, false) receiver.InjectUDPPacket(p) fp.WasReceived() r.Unlock() return case KeepRouting: fp := r.unlockedInjectFlow(sender, receiver, p, false) receiver.InjectUDPPacket(p) fp.WasReceived() default: panic(fmt.Sprintf("Unknown exitFunc return: %v", e)) } r.Unlock() } } // RouteUntilAfterMsgType will route for sender until a message type is seen and sent from sender // If the router doesn't have the nebula controller for that address, we panic func (r *R) RouteUntilAfterMsgType(sender *nebula.Control, msgType header.MessageType, subType header.MessageSubType) { h := &header.H{} r.RouteExitFunc(sender, func(p *udp.Packet, r *nebula.Control) ExitType { if err := h.Parse(p.Data); err != nil { panic(err) } if h.Type == msgType && h.Subtype == subType { return RouteAndExit } return KeepRouting }) } func (r *R) RouteForAllUntilAfterMsgTypeTo(receiver *nebula.Control, msgType header.MessageType, subType header.MessageSubType) { h := &header.H{} r.RouteForAllExitFunc(func(p *udp.Packet, r *nebula.Control) ExitType { if r != receiver { return KeepRouting } if err := h.Parse(p.Data); err != nil { panic(err) } if h.Type == msgType && h.Subtype == subType { return RouteAndExit } return KeepRouting }) } func (r *R) InjectUDPPacket(sender, receiver *nebula.Control, packet *udp.Packet) { r.Lock() defer r.Unlock() fp := r.unlockedInjectFlow(sender, receiver, packet, false) receiver.InjectUDPPacket(packet) fp.WasReceived() } // RouteForUntilAfterToAddr will route for sender and return only after it sees and sends a packet destined for toAddr // finish can be any of the exitType values except `keepRouting`, the default value is `routeAndExit` // If the router doesn't have the nebula controller for that address, we panic func (r *R) RouteForUntilAfterToAddr(sender *nebula.Control, toAddr *net.UDPAddr, finish ExitType) { if finish == KeepRouting { finish = RouteAndExit } r.RouteExitFunc(sender, func(p *udp.Packet, r *nebula.Control) ExitType { if p.ToIp.Equal(toAddr.IP) && p.ToPort == uint16(toAddr.Port) { return finish } return KeepRouting }) } // RouteForAllExitFunc will route for every registered controller and calls the whatDo func with each udp packet from // whatDo can return: // - exitNow: the packet will not be routed and this call will return immediately // - routeAndExit: this call will return immediately after routing the last packet from sender // - keepRouting: the packet will be routed and whatDo will be called again on the next packet from sender func (r *R) RouteForAllExitFunc(whatDo ExitFunc) { sc := make([]reflect.SelectCase, len(r.controls)) cm := make([]*nebula.Control, len(r.controls)) i := 0 for _, c := range r.controls { sc[i] = reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(c.GetUDPTxChan()), Send: reflect.Value{}, } cm[i] = c i++ } for { x, rx, _ := reflect.Select(sc) r.Lock() p := rx.Interface().(*udp.Packet) outAddr := cm[x].GetUDPAddr() inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort)) receiver := r.getControl(outAddr, inAddr, p) if receiver == nil { r.Unlock() panic("Can't route for host: " + inAddr) } e := whatDo(p, receiver) switch e { case ExitNow: r.Unlock() return case RouteAndExit: fp := r.unlockedInjectFlow(cm[x], receiver, p, false) receiver.InjectUDPPacket(p) fp.WasReceived() r.Unlock() return case KeepRouting: fp := r.unlockedInjectFlow(cm[x], receiver, p, false) receiver.InjectUDPPacket(p) fp.WasReceived() default: panic(fmt.Sprintf("Unknown exitFunc return: %v", e)) } r.Unlock() } } // FlushAll will route for every registered controller, exiting once there are no packets left to route func (r *R) FlushAll() { sc := make([]reflect.SelectCase, len(r.controls)) cm := make([]*nebula.Control, len(r.controls)) i := 0 for _, c := range r.controls { sc[i] = reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(c.GetUDPTxChan()), Send: reflect.Value{}, } cm[i] = c i++ } // Add a default case to exit when nothing is left to send sc = append(sc, reflect.SelectCase{ Dir: reflect.SelectDefault, Chan: reflect.Value{}, Send: reflect.Value{}, }) for { x, rx, ok := reflect.Select(sc) if !ok { return } r.Lock() p := rx.Interface().(*udp.Packet) outAddr := cm[x].GetUDPAddr() inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort)) receiver := r.getControl(outAddr, inAddr, p) if receiver == nil { r.Unlock() panic("Can't route for host: " + inAddr) } r.Unlock() } } // getControl performs or seeds NAT translation and returns the control for toAddr, p from fields may change // This is an internal router function, the caller must hold the lock func (r *R) getControl(fromAddr, toAddr string, p *udp.Packet) *nebula.Control { if newAddr, ok := r.outNat[fromAddr+":"+toAddr]; ok { p.FromIp = newAddr.IP p.FromPort = uint16(newAddr.Port) } c, ok := r.inNat[toAddr] if ok { sHost, sPort, err := net.SplitHostPort(toAddr) if err != nil { panic(err) } port, err := strconv.Atoi(sPort) if err != nil { panic(err) } r.outNat[c.GetUDPAddr()+":"+fromAddr] = net.UDPAddr{ IP: net.ParseIP(sHost), Port: port, } return c } return r.controls[toAddr] } func (r *R) formatUdpPacket(p *packet) string { packet := gopacket.NewPacket(p.packet.Data, layers.LayerTypeIPv4, gopacket.Lazy) v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4) if v4 == nil { panic("not an ipv4 packet") } from := "unknown" if c, ok := r.vpnControls[iputil.Ip2VpnIp(v4.SrcIP)]; ok { from = c.GetUDPAddr() } udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP) if udp == nil { panic("not a udp packet") } data := packet.ApplicationLayer() return fmt.Sprintf( " %s-->>%s: src port: %v
dest port: %v
data: \"%v\"\n", strings.Replace(from, ":", "#58;", 1), strings.Replace(p.to.GetUDPAddr(), ":", "#58;", 1), udp.SrcPort, udp.DstPort, string(data.Payload()), ) } nebula-1.6.1+dfsg/examples/000077500000000000000000000000001434072716400155325ustar00rootroot00000000000000nebula-1.6.1+dfsg/examples/config.yml000066400000000000000000000332711434072716400175300ustar00rootroot00000000000000# This is the nebula example configuration file. You must edit, at a minimum, the static_host_map, lighthouse, and firewall sections # Some options in this file are HUPable, including the pki section. (A HUP will reload credentials from disk without affecting existing tunnels) # PKI defines the location of credentials for this node. Each of these can also be inlined by using the yaml ": |" syntax. pki: # The CAs that are accepted by this node. Must contain one or more certificates created by 'nebula-cert ca' ca: /etc/nebula/ca.crt cert: /etc/nebula/host.crt key: /etc/nebula/host.key # blocklist is a list of certificate fingerprints that we will refuse to talk to #blocklist: # - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72 # disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid. #disconnect_invalid: false # The static host map defines a set of hosts with fixed IP addresses on the internet (or any network). # A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel. # The syntax is: # "{nebula ip}": ["{routable ip/dns name}:{routable port}"] # Example, if your lighthouse has the nebula IP of 192.168.100.1 and has the real ip address of 100.64.22.11 and runs on port 4242: static_host_map: "192.168.100.1": ["100.64.22.11:4242"] lighthouse: # am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes # you have configured to be lighthouses in your network am_lighthouse: false # serve_dns optionally starts a dns listener that responds to various queries and can even be # delegated to for resolution #serve_dns: false #dns: # The DNS host defines the IP to bind the dns listener to. This also allows binding to the nebula node IP. #host: 0.0.0.0 #port: 53 # interval is the number of seconds between updates from this node to a lighthouse. # during updates, a node sends information about its current IP addresses to each node. interval: 60 # hosts is a list of lighthouse hosts this node should report to and query from # IMPORTANT: THIS SHOULD BE EMPTY ON LIGHTHOUSE NODES # IMPORTANT2: THIS SHOULD BE LIGHTHOUSES' NEBULA IPs, NOT LIGHTHOUSES' REAL ROUTABLE IPs hosts: - "192.168.100.1" # remote_allow_list allows you to control ip ranges that this node will # consider when handshaking to another node. By default, any remote IPs are # allowed. You can provide CIDRs here with `true` to allow and `false` to # deny. The most specific CIDR rule applies to each remote. If all rules are # "allow", the default will be "deny", and vice-versa. If both "allow" and # "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the # default. #remote_allow_list: # Example to block IPs from this subnet from being used for remote IPs. #"172.16.0.0/12": false # A more complicated example, allow public IPs but only private IPs from a specific subnet #"0.0.0.0/0": true #"10.0.0.0/8": false #"10.42.42.0/24": true # EXPERIMENTAL: This option my change or disappear in the future. # Optionally allows the definition of remote_allow_list blocks # specific to an inside VPN IP CIDR. #remote_allow_ranges: # This rule would only allow only private IPs for this VPN range #"10.42.42.0/24": #"192.168.0.0/16": true # local_allow_list allows you to filter which local IP addresses we advertise # to the lighthouses. This uses the same logic as `remote_allow_list`, but # additionally, you can specify an `interfaces` map of regular expressions # to match against interface names. The regexp must match the entire name. # All interface rules must be either true or false (and the default will be # the inverse). CIDR rules are matched after interface name rules. # Default is all local IP addresses. #local_allow_list: # Example to block tun0 and all docker interfaces. #interfaces: #tun0: false #'docker.*': false # Example to only advertise this subnet to the lighthouse. #"10.0.0.0/8": true # advertise_addrs are routable addresses that will be included along with discovered addresses to report to the # lighthouse, the format is "ip:port". `port` can be `0`, in which case the actual listening port will be used in its # place, useful if `listen.port` is set to 0. # This option is mainly useful when there are static ip addresses the host can be reached at that nebula can not # typically discover on its own. Examples being port forwarding or multiple paths to the internet. #advertise_addrs: #- "1.1.1.1:4242" #- "1.2.3.4:0" # port will be replaced with the real listening port # Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined, # however using port 0 will dynamically assign a port and is recommended for roaming nodes. listen: # To listen on both any ipv4 and ipv6 use "[::]" host: 0.0.0.0 port: 4242 # Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg) # default is 64, does not support reload #batch: 64 # Configure socket buffers for the udp side (outside), leave unset to use the system defaults. Values will be doubled by the kernel # Default is net.core.rmem_default and net.core.wmem_default (/proc/sys/net/core/rmem_default and /proc/sys/net/core/rmem_default) # Maximum is limited by memory in the system, SO_RCVBUFFORCE and SO_SNDBUFFORCE is used to avoid having to raise the system wide # max, net.core.rmem_max and net.core.wmem_max #read_buffer: 10485760 #write_buffer: 10485760 # By default, Nebula replies to packets it has no tunnel for with a "recv_error" packet. This packet helps speed up reconnection # in the case that Nebula on either side did not shut down cleanly. This response can be abused as a way to discover if Nebula is running # on a host though. This option lets you configure if you want to send "recv_error" packets always, never, or only to private network remotes. # valid values: always, never, private # This setting is reloadable. #send_recv_error: always # Routines is the number of thread pairs to run that consume from the tun and UDP queues. # Currently, this defaults to 1 which means we have 1 tun queue reader and 1 # UDP queue reader. Setting this above one will set IFF_MULTI_QUEUE on the tun # device and SO_REUSEPORT on the UDP socket to allow multiple queues. # This option is only supported on Linux. #routines: 1 punchy: # Continues to punch inbound/outbound at a regular interval to avoid expiration of firewall nat mappings punch: true # respond means that a node you are trying to reach will connect back out to you if your hole punching fails # this is extremely useful if one node is behind a difficult nat, such as a symmetric NAT # Default is false #respond: true # delays a punch response for misbehaving NATs, default is 1 second, respond must be true to take effect #delay: 1s # Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes # IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously! #cipher: chachapoly # Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest # path to a network adjacent nebula node. # NOTE: the previous option "local_range" only allowed definition of a single range # and has been deprecated for "preferred_ranges" #preferred_ranges: ["172.16.0.0/24"] # sshd can expose informational and administrative functions via ssh this is a #sshd: # Toggles the feature #enabled: true # Host and port to listen on, port 22 is not allowed for your safety #listen: 127.0.0.1:2222 # A file containing the ssh host private key to use # A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null #host_key: ./ssh_host_ed25519_key # A file containing a list of authorized public keys #authorized_users: #- user: steeeeve # keys can be an array of strings or single string #keys: #- "ssh public key string" # EXPERIMENTAL: relay support for networks that can't establish direct connections. relay: # Relays are a list of Nebula IP's that peers can use to relay packets to me. # IPs in this list must have am_relay set to true in their configs, otherwise # they will reject relay requests. #relays: #- 192.168.100.1 #- # Set am_relay to true to permit other hosts to list my IP in their relays config. Default false. am_relay: false # Set use_relays to false to prevent this instance from attempting to establish connections through relays. # default true use_relays: true # Configure the private interface. Note: addr is baked into the nebula certificate tun: # When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root) disabled: false # Name of the device. If not set, a default will be chosen by the OS. # For macOS: if set, must be in the form `utun[0-9]+`. # For FreeBSD: Required to be set, must be in the form `tun[0-9]+`. dev: nebula1 # Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert drop_local_broadcast: false # Toggles forwarding of multicast packets drop_multicast: false # Sets the transmit queue length, if you notice lots of transmit drops on the tun it may help to raise this number. Default is 500 tx_queue: 500 # Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic mtu: 1300 # Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here routes: #- mtu: 8800 # route: 10.0.0.0/16 # Unsafe routes allows you to route traffic over nebula to non-nebula nodes # Unsafe routes should be avoided unless you have hosts/services that cannot run nebula # NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate # `mtu` will default to tun mtu if this option is not specified # `metric` will default to 0 if this option is not specified unsafe_routes: #- route: 172.16.1.0/24 # via: 192.168.100.99 # mtu: 1300 # metric: 100 # TODO # Configure logging level logging: # panic, fatal, error, warning, info, or debug. Default is info level: info # json or text formats currently available. Default is text format: text # Disable timestamp logging. useful when output is redirected to logging system that already adds timestamps. Default is false #disable_timestamp: true # timestamp format is specified in Go time format, see: # https://golang.org/pkg/time/#pkg-constants # default when `format: json`: "2006-01-02T15:04:05Z07:00" (RFC3339) # default when `format: text`: # when TTY attached: seconds since beginning of execution # otherwise: "2006-01-02T15:04:05Z07:00" (RFC3339) # As an example, to log as RFC3339 with millisecond precision, set to: #timestamp_format: "2006-01-02T15:04:05.000Z07:00" #stats: #type: graphite #prefix: nebula #protocol: tcp #host: 127.0.0.1:9999 #interval: 10s #type: prometheus #listen: 127.0.0.1:8080 #path: /metrics #namespace: prometheusns #subsystem: nebula #interval: 10s # enables counter metrics for meta packets # e.g.: `messages.tx.handshake` # NOTE: `message.{tx,rx}.recv_error` is always emitted #message_metrics: false # enables detailed counter metrics for lighthouse packets # e.g.: `lighthouse.rx.HostQuery` #lighthouse_metrics: false # Handshake Manager Settings #handshakes: # Handshakes are sent to all known addresses at each interval with a linear backoff, # Wait try_interval after the 1st attempt, 2 * try_interval after the 2nd, etc, until the handshake is older than timeout # A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out #try_interval: 100ms #retries: 20 # trigger_buffer is the size of the buffer channel for quickly sending handshakes # after receiving the response for lighthouse queries #trigger_buffer: 64 # Nebula security group configuration firewall: conntrack: tcp_timeout: 12m udp_timeout: 3m default_timeout: 10m # The firewall is default deny. There is no way to write a deny rule. # Rules are comprised of a protocol, port, and one or more of host, group, or CIDR # Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr) # - port: Takes `0` or `any` as any, a single number `80`, a range `200-901`, or `fragment` to match second and further fragments of fragmented packets (since there is no port available). # code: same as port but makes more sense when talking about ICMP, TODO: this is not currently implemented in a way that works, use `any` # proto: `any`, `tcp`, `udp`, or `icmp` # host: `any` or a literal hostname, ie `test-host` # group: `any` or a literal group name, ie `default-group` # groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass # cidr: a CIDR, `0.0.0.0/0` is any. # ca_name: An issuing CA name # ca_sha: An issuing CA shasum outbound: # Allow all outbound traffic from this node - port: any proto: any host: any inbound: # Allow icmp between any nebula hosts - port: any proto: icmp host: any # Allow tcp/443 from any host with BOTH laptop and home group - port: 443 proto: tcp groups: - laptop - home nebula-1.6.1+dfsg/examples/quickstart-vagrant/000077500000000000000000000000001434072716400213645ustar00rootroot00000000000000nebula-1.6.1+dfsg/examples/quickstart-vagrant/README.md000066400000000000000000000070111434072716400226420ustar00rootroot00000000000000# Quickstart Guide This guide is intended to bring up a vagrant environment with 1 lighthouse and 2 generic hosts running nebula. ## Creating the virtualenv for ansible Within the `quickstart/` directory, do the following ``` # make a virtual environment virtualenv venv # get into the virtualenv source venv/bin/activate # install ansible pip install -r requirements.yml ``` ## Bringing up the vagrant environment A plugin that is used for the Vagrant environment is `vagrant-hostmanager` To install, run ``` vagrant plugin install vagrant-hostmanager ``` All hosts within the Vagrantfile are brought up with `vagrant up` Once the boxes are up, go into the `ansible/` directory and deploy the playbook by running `ansible-playbook playbook.yml -i inventory -u vagrant` ## Testing within the vagrant env Once the ansible run is done, hop onto a vagrant box `vagrant ssh generic1.vagrant` or specifically `ssh vagrant@` (password for the vagrant user on the boxes is `vagrant`) See `/etc/nebula/config.yml` on a box for firewall rules. To see full handshakes and hostmaps, change the logging config of `/etc/nebula/config.yml` on the vagrant boxes from info to debug. You can watch nebula logs by running ``` sudo journalctl -fu nebula ``` Refer to the nebula src code directory's README for further instructions on configuring nebula. ## Troubleshooting ### Is nebula up and running? Run and verify that ``` ifconfig ``` shows you an interface with the name `nebula1` being up. ``` vagrant@generic1:~$ ifconfig nebula1 nebula1: flags=4305 mtu 1300 inet 10.168.91.210 netmask 255.128.0.0 destination 10.168.91.210 inet6 fe80::aeaf:b105:e6dc:936c prefixlen 64 scopeid 0x20 unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC) RX packets 2 bytes 168 (168.0 B) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 11 bytes 600 (600.0 B) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 ``` ### Connectivity Are you able to ping other boxes on the private nebula network? The following are the private nebula ip addresses of the vagrant env ``` generic1.vagrant [nebula_ip] 10.168.91.210 generic2.vagrant [nebula_ip] 10.168.91.220 lighthouse1.vagrant [nebula_ip] 10.168.91.230 ``` Try pinging generic1.vagrant to and from any other box using its nebula ip above. Double check the nebula firewall rules under /etc/nebula/config.yml to make sure that connectivity is allowed for your use-case if on a specific port. ``` vagrant@lighthouse1:~$ grep -A21 firewall /etc/nebula/config.yml firewall: conntrack: tcp_timeout: 12m udp_timeout: 3m default_timeout: 10m inbound: - proto: icmp port: any host: any - proto: any port: 22 host: any - proto: any port: 53 host: any outbound: - proto: any port: any host: any ``` nebula-1.6.1+dfsg/examples/quickstart-vagrant/Vagrantfile000066400000000000000000000026741434072716400235620ustar00rootroot00000000000000Vagrant.require_version ">= 2.2.6" nodes = [ { :hostname => 'generic1.vagrant', :ip => '172.11.91.210', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1}, { :hostname => 'generic2.vagrant', :ip => '172.11.91.220', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1}, { :hostname => 'lighthouse1.vagrant', :ip => '172.11.91.230', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1}, ] Vagrant.configure("2") do |config| config.ssh.insert_key = false if Vagrant.has_plugin?('vagrant-cachier') config.cache.enable :apt else printf("** Install vagrant-cachier plugin to speedup deploy: `vagrant plugin install vagrant-cachier`.**\n") end if Vagrant.has_plugin?('vagrant-hostmanager') config.hostmanager.enabled = true config.hostmanager.manage_host = true config.hostmanager.include_offline = true else config.vagrant.plugins = "vagrant-hostmanager" end nodes.each do |node| config.vm.define node[:hostname] do |node_config| node_config.vm.box = node[:box] node_config.vm.hostname = node[:hostname] node_config.vm.network :private_network, ip: node[:ip] node_config.vm.provider :virtualbox do |vb| vb.memory = node[:ram] vb.cpus = node[:cpus] vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] vb.customize ['guestproperty', 'set', :id, '/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold', 10000] end end end end nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/000077500000000000000000000000001434072716400230015ustar00rootroot00000000000000nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/ansible.cfg000066400000000000000000000001471434072716400251010ustar00rootroot00000000000000[defaults] host_key_checking = False private_key_file = ~/.vagrant.d/insecure_private_key become = yes nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/filter_plugins/000077500000000000000000000000001434072716400260275ustar00rootroot00000000000000nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/filter_plugins/to_nebula_ip.py000066400000000000000000000010641434072716400310420ustar00rootroot00000000000000#!/usr/bin/python class FilterModule(object): def filters(self): return { 'to_nebula_ip': self.to_nebula_ip, 'map_to_nebula_ips': self.map_to_nebula_ips, } def to_nebula_ip(self, ip_str): ip_list = list(map(int, ip_str.split("."))) ip_list[0] = 10 ip_list[1] = 168 ip = '.'.join(map(str, ip_list)) return ip def map_to_nebula_ips(self, ip_strs): ip_list = [ self.to_nebula_ip(ip_str) for ip_str in ip_strs ] ips = ', '.join(ip_list) return ips nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/inventory000066400000000000000000000002141434072716400247560ustar00rootroot00000000000000[all] generic1.vagrant generic2.vagrant lighthouse1.vagrant [generic] generic1.vagrant generic2.vagrant [lighthouse] lighthouse1.vagrant nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/playbook.yml000066400000000000000000000006731434072716400253520ustar00rootroot00000000000000--- - name: test connection to vagrant boxes hosts: all tasks: - debug: msg=ok - name: build nebula binaries locally connection: local hosts: localhost tasks: - command: chdir=../../../ make build/linux-amd64/"{{ item }}" with_items: - nebula - nebula-cert tags: - build-nebula - name: install nebula on all vagrant hosts hosts: all become: yes gather_facts: yes roles: - nebula nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/000077500000000000000000000000001434072716400241255ustar00rootroot00000000000000nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/nebula/000077500000000000000000000000001434072716400253735ustar00rootroot00000000000000nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/nebula/defaults/000077500000000000000000000000001434072716400272025ustar00rootroot00000000000000nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/nebula/defaults/main.yml000066400000000000000000000001071434072716400306470ustar00rootroot00000000000000--- # defaults file for nebula nebula_config_directory: "/etc/nebula/" nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/nebula/files/000077500000000000000000000000001434072716400264755ustar00rootroot00000000000000nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/nebula/files/systemd.nebula.service000066400000000000000000000004111434072716400330100ustar00rootroot00000000000000[Unit] Description=nebula Wants=basic.target After=basic.target network.target [Service] SyslogIdentifier=nebula ExecReload=/bin/kill -HUP $MAINPID ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml Restart=always [Install] WantedBy=multi-user.target nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.crt000066400000000000000000000003671434072716400322150ustar00rootroot00000000000000-----BEGIN NEBULA CERTIFICATE----- CkAKDm5lYnVsYSB0ZXN0IENBKNXC1NYFMNXIhO0GOiCmVYeZ9tkB4WEnawmkrca+ hsAg9otUFhpAowZeJ33KVEABEkAORybHQUUyVFbKYzw0JHfVzAQOHA4kwB1yP9IV KpiTw9+ADz+wA+R5tn9B+L8+7+Apc+9dem4BQULjA5mRaoYN -----END NEBULA CERTIFICATE----- nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.key000066400000000000000000000002561434072716400322120ustar00rootroot00000000000000-----BEGIN NEBULA ED25519 PRIVATE KEY----- FEXZKMSmg8CgIODR0ymUeNT3nbnVpMi7nD79UgkCRHWmVYeZ9tkB4WEnawmkrca+ hsAg9otUFhpAowZeJ33KVA== -----END NEBULA ED25519 PRIVATE KEY----- nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/nebula/handlers/000077500000000000000000000000001434072716400271735ustar00rootroot00000000000000nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/nebula/handlers/main.yml000066400000000000000000000001361434072716400306420ustar00rootroot00000000000000--- # handlers file for nebula - name: restart nebula service: name=nebula state=restarted nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/nebula/tasks/000077500000000000000000000000001434072716400265205ustar00rootroot00000000000000nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/nebula/tasks/main.yml000066400000000000000000000040531434072716400301710ustar00rootroot00000000000000--- # tasks file for nebula - name: get the vagrant network interface and set fact set_fact: vagrant_ifce: "ansible_{{ ansible_interfaces | difference(['lo',ansible_default_ipv4.alias]) | sort | first }}" tags: - nebula-conf - name: install built nebula binary copy: src="../../../../../build/linux-amd64/{{ item }}" dest="/usr/local/bin" mode=0755 with_items: - nebula - nebula-cert - name: create nebula config directory file: path="{{ nebula_config_directory }}" state=directory mode=0755 - name: temporarily copy over root.crt and root.key to sign copy: src={{ item }} dest=/opt/{{ item }} with_items: - vagrant-test-ca.key - vagrant-test-ca.crt - name: remove previously signed host certificate file: dest=/etc/nebula/{{ item }} state=absent with_items: - host.crt - host.key - name: sign using the root key command: nebula-cert sign -ca-crt /opt/vagrant-test-ca.crt -ca-key /opt/vagrant-test-ca.key -duration 4320h -groups vagrant -ip {{ hostvars[inventory_hostname][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}/9 -name {{ ansible_hostname }}.nebula -out-crt /etc/nebula/host.crt -out-key /etc/nebula/host.key - name: remove root.key used to sign file: dest=/opt/{{ item }} state=absent with_items: - vagrant-test-ca.key - name: write the content of the trusted ca certificate copy: src="vagrant-test-ca.crt" dest="/etc/nebula/vagrant-test-ca.crt" notify: restart nebula - name: Create config directory file: path="{{ nebula_config_directory }}" owner=root group=root mode=0755 state=directory - name: nebula config template: src=config.yml.j2 dest="/etc/nebula/config.yml" mode=0644 owner=root group=root notify: restart nebula tags: - nebula-conf - name: nebula systemd copy: src=systemd.nebula.service dest="/etc/systemd/system/nebula.service" mode=0644 owner=root group=root register: addconf notify: restart nebula - name: maybe reload systemd shell: systemctl daemon-reload when: addconf.changed - name: nebula running service: name="nebula" state=started enabled=yes nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/nebula/templates/000077500000000000000000000000001434072716400273715ustar00rootroot00000000000000nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/nebula/templates/config.yml.j2000066400000000000000000000034361434072716400317010ustar00rootroot00000000000000pki: ca: /etc/nebula/vagrant-test-ca.crt cert: /etc/nebula/host.crt key: /etc/nebula/host.key # Port Nebula will be listening on listen: host: 0.0.0.0 port: 4242 # sshd can expose informational and administrative functions via ssh sshd: # Toggles the feature enabled: true # Host and port to listen on listen: 127.0.0.1:2222 # A file containing the ssh host private key to use host_key: /etc/ssh/ssh_host_ed25519_key # A file containing a list of authorized public keys authorized_users: {% for user in nebula_users %} - user: {{ user.name }} keys: {% for key in user.ssh_auth_keys %} - "{{ key }}" {% endfor %} {% endfor %} local_range: 10.168.0.0/16 static_host_map: # lighthouse {{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}: ["{{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address']}}:4242"] default_route: "0.0.0.0" lighthouse: {% if 'lighthouse' in group_names %} am_lighthouse: true serve_dns: true {% else %} am_lighthouse: false {% endif %} interval: 60 {% if 'generic' in group_names %} hosts: - {{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }} {% endif %} # Configure the private interface tun: dev: nebula1 # Sets MTU of the tun dev. # MTU of the tun must be smaller than the MTU of the eth0 interface mtu: 1300 # TODO # Configure logging level logging: level: info format: json firewall: conntrack: tcp_timeout: 12m udp_timeout: 3m default_timeout: 10m inbound: - proto: icmp port: any host: any - proto: any port: 22 host: any {% if "lighthouse" in groups %} - proto: any port: 53 host: any {% endif %} outbound: - proto: any port: any host: any nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/nebula/vars/000077500000000000000000000000001434072716400263465ustar00rootroot00000000000000nebula-1.6.1+dfsg/examples/quickstart-vagrant/ansible/roles/nebula/vars/main.yml000066400000000000000000000001771434072716400300220ustar00rootroot00000000000000--- # vars file for nebula nebula_users: - name: user1 ssh_auth_keys: - "ed25519 place-your-ssh-public-key-here" nebula-1.6.1+dfsg/examples/quickstart-vagrant/requirements.yml000066400000000000000000000000101434072716400246210ustar00rootroot00000000000000ansible nebula-1.6.1+dfsg/examples/service_scripts/000077500000000000000000000000001434072716400207415ustar00rootroot00000000000000nebula-1.6.1+dfsg/examples/service_scripts/nebula.init.d.sh000066400000000000000000000020471434072716400237320ustar00rootroot00000000000000#!/bin/sh ### BEGIN INIT INFO # Provides: nebula # Required-Start: $local_fs $network # Required-Stop: $local_fs $network # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Description: nebula mesh vpn client ### END INIT INFO SCRIPT="/usr/local/bin/nebula -config /etc/nebula/config.yml" RUNAS=root PIDFILE=/var/run/nebula.pid LOGFILE=/var/log/nebula.log start() { if [ -f $PIDFILE ] && kill -0 $(cat $PIDFILE); then echo 'Service already running' >&2 return 1 fi echo 'Starting nebula service…' >&2 local CMD="$SCRIPT &> \"$LOGFILE\" & echo \$!" su -c "$CMD" $RUNAS > "$PIDFILE" echo 'Service started' >&2 } stop() { if [ ! -f "$PIDFILE" ] || ! kill -0 $(cat "$PIDFILE"); then echo 'Service not running' >&2 return 1 fi echo 'Stopping nebula service…' >&2 kill -15 $(cat "$PIDFILE") && rm -f "$PIDFILE" echo 'Service stopped' >&2 } case "$1" in start) start ;; stop) stop ;; restart) stop start ;; *) echo "Usage: $0 {start|stop|restart}" esac nebula-1.6.1+dfsg/examples/service_scripts/nebula.service000066400000000000000000000004351434072716400235730ustar00rootroot00000000000000[Unit] Description=nebula Wants=basic.target After=basic.target network.target Before=sshd.service [Service] SyslogIdentifier=nebula ExecReload=/bin/kill -HUP $MAINPID ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml Restart=always [Install] WantedBy=multi-user.target nebula-1.6.1+dfsg/firewall.go000066400000000000000000000537141434072716400160620ustar00rootroot00000000000000package nebula import ( "crypto/sha256" "encoding/binary" "encoding/hex" "errors" "fmt" "net" "reflect" "strconv" "strings" "sync" "time" "github.com/rcrowley/go-metrics" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cidr" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/firewall" ) const tcpACK = 0x10 const tcpFIN = 0x01 type FirewallInterface interface { AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error } type conn struct { Expires time.Time // Time when this conntrack entry will expire Sent time.Time // If tcp rtt tracking is enabled this will be when Seq was last set Seq uint32 // If tcp rtt tracking is enabled this will be the seq we are looking for an ack // record why the original connection passed the firewall, so we can re-validate // after ruleset changes. Note, rulesVersion is a uint16 so that these two // fields pack for free after the uint32 above incoming bool rulesVersion uint16 } // TODO: need conntrack max tracked connections handling type Firewall struct { Conntrack *FirewallConntrack InRules *FirewallTable OutRules *FirewallTable //TODO: we should have many more options for TCP, an option for ICMP, and mimic the kernel a bit better // https://www.kernel.org/doc/Documentation/networking/nf_conntrack-sysctl.txt TCPTimeout time.Duration //linux: 5 days max UDPTimeout time.Duration //linux: 180s max DefaultTimeout time.Duration //linux: 600s // Used to ensure we don't emit local packets for ips we don't own localIps *cidr.Tree4 rules string rulesVersion uint16 trackTCPRTT bool metricTCPRTT metrics.Histogram incomingMetrics firewallMetrics outgoingMetrics firewallMetrics l *logrus.Logger } type firewallMetrics struct { droppedLocalIP metrics.Counter droppedRemoteIP metrics.Counter droppedNoRule metrics.Counter } type FirewallConntrack struct { sync.Mutex Conns map[firewall.Packet]*conn TimerWheel *TimerWheel } type FirewallTable struct { TCP firewallPort UDP firewallPort ICMP firewallPort AnyProto firewallPort } func newFirewallTable() *FirewallTable { return &FirewallTable{ TCP: firewallPort{}, UDP: firewallPort{}, ICMP: firewallPort{}, AnyProto: firewallPort{}, } } type FirewallCA struct { Any *FirewallRule CANames map[string]*FirewallRule CAShas map[string]*FirewallRule } type FirewallRule struct { // Any makes Hosts, Groups, and CIDR irrelevant Any bool Hosts map[string]struct{} Groups [][]string CIDR *cidr.Tree4 } // Even though ports are uint16, int32 maps are faster for lookup // Plus we can use `-1` for fragment rules type firewallPort map[int32]*FirewallCA // NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts. func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.NebulaCertificate) *Firewall { //TODO: error on 0 duration var min, max time.Duration if tcpTimeout < UDPTimeout { min = tcpTimeout max = UDPTimeout } else { min = UDPTimeout max = tcpTimeout } if defaultTimeout < min { min = defaultTimeout } else if defaultTimeout > max { max = defaultTimeout } localIps := cidr.NewTree4() for _, ip := range c.Details.Ips { localIps.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{}) } for _, n := range c.Details.Subnets { localIps.AddCIDR(n, struct{}{}) } return &Firewall{ Conntrack: &FirewallConntrack{ Conns: make(map[firewall.Packet]*conn), TimerWheel: NewTimerWheel(min, max), }, InRules: newFirewallTable(), OutRules: newFirewallTable(), TCPTimeout: tcpTimeout, UDPTimeout: UDPTimeout, DefaultTimeout: defaultTimeout, localIps: localIps, l: l, metricTCPRTT: metrics.GetOrRegisterHistogram("network.tcp.rtt", nil, metrics.NewExpDecaySample(1028, 0.015)), incomingMetrics: firewallMetrics{ droppedLocalIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.local_ip", nil), droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.remote_ip", nil), droppedNoRule: metrics.GetOrRegisterCounter("firewall.incoming.dropped.no_rule", nil), }, outgoingMetrics: firewallMetrics{ droppedLocalIP: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.local_ip", nil), droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.remote_ip", nil), droppedNoRule: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.no_rule", nil), }, } } func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *config.C) (*Firewall, error) { fw := NewFirewall( l, c.GetDuration("firewall.conntrack.tcp_timeout", time.Minute*12), c.GetDuration("firewall.conntrack.udp_timeout", time.Minute*3), c.GetDuration("firewall.conntrack.default_timeout", time.Minute*10), nc, //TODO: max_connections ) err := AddFirewallRulesFromConfig(l, false, c, fw) if err != nil { return nil, err } err = AddFirewallRulesFromConfig(l, true, c, fw) if err != nil { return nil, err } return fw, nil } // AddRule properly creates the in memory rule structure for a firewall table. func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error { // Under gomobile, stringing a nil pointer with fmt causes an abort in debug mode for iOS // https://github.com/golang/go/issues/14131 sIp := "" if ip != nil { sIp = ip.String() } // We need this rule string because we generate a hash. Removing this will break firewall reload. ruleString := fmt.Sprintf( "incoming: %v, proto: %v, startPort: %v, endPort: %v, groups: %v, host: %v, ip: %v, caName: %v, caSha: %s", incoming, proto, startPort, endPort, groups, host, sIp, caName, caSha, ) f.rules += ruleString + "\n" direction := "incoming" if !incoming { direction = "outgoing" } f.l.WithField("firewallRule", m{"direction": direction, "proto": proto, "startPort": startPort, "endPort": endPort, "groups": groups, "host": host, "ip": sIp, "caName": caName, "caSha": caSha}). Info("Firewall rule added") var ( ft *FirewallTable fp firewallPort ) if incoming { ft = f.InRules } else { ft = f.OutRules } switch proto { case firewall.ProtoTCP: fp = ft.TCP case firewall.ProtoUDP: fp = ft.UDP case firewall.ProtoICMP: fp = ft.ICMP case firewall.ProtoAny: fp = ft.AnyProto default: return fmt.Errorf("unknown protocol %v", proto) } return fp.addRule(startPort, endPort, groups, host, ip, caName, caSha) } // GetRuleHash returns a hash representation of all inbound and outbound rules func (f *Firewall) GetRuleHash() string { sum := sha256.Sum256([]byte(f.rules)) return hex.EncodeToString(sum[:]) } func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw FirewallInterface) error { var table string if inbound { table = "firewall.inbound" } else { table = "firewall.outbound" } r := c.Get(table) if r == nil { return nil } rs, ok := r.([]interface{}) if !ok { return fmt.Errorf("%s failed to parse, should be an array of rules", table) } for i, t := range rs { var groups []string r, err := convertRule(l, t, table, i) if err != nil { return fmt.Errorf("%s rule #%v; %s", table, i, err) } if r.Code != "" && r.Port != "" { return fmt.Errorf("%s rule #%v; only one of port or code should be provided", table, i) } if r.Host == "" && len(r.Groups) == 0 && r.Group == "" && r.Cidr == "" && r.CAName == "" && r.CASha == "" { return fmt.Errorf("%s rule #%v; at least one of host, group, cidr, ca_name, or ca_sha must be provided", table, i) } if len(r.Groups) > 0 { groups = r.Groups } if r.Group != "" { // Check if we have both groups and group provided in the rule config if len(groups) > 0 { return fmt.Errorf("%s rule #%v; only one of group or groups should be defined, both provided", table, i) } groups = []string{r.Group} } var sPort, errPort string if r.Code != "" { errPort = "code" sPort = r.Code } else { errPort = "port" sPort = r.Port } startPort, endPort, err := parsePort(sPort) if err != nil { return fmt.Errorf("%s rule #%v; %s %s", table, i, errPort, err) } var proto uint8 switch r.Proto { case "any": proto = firewall.ProtoAny case "tcp": proto = firewall.ProtoTCP case "udp": proto = firewall.ProtoUDP case "icmp": proto = firewall.ProtoICMP default: return fmt.Errorf("%s rule #%v; proto was not understood; `%s`", table, i, r.Proto) } var cidr *net.IPNet if r.Cidr != "" { _, cidr, err = net.ParseCIDR(r.Cidr) if err != nil { return fmt.Errorf("%s rule #%v; cidr did not parse; %s", table, i, err) } } err = fw.AddRule(inbound, proto, startPort, endPort, groups, r.Host, cidr, r.CAName, r.CASha) if err != nil { return fmt.Errorf("%s rule #%v; `%s`", table, i, err) } } return nil } var ErrInvalidRemoteIP = errors.New("remote IP is not in remote certificate subnets") var ErrInvalidLocalIP = errors.New("local IP is not in list of handled local IPs") var ErrNoMatchingRule = errors.New("no matching rule in firewall table") // Drop returns an error if the packet should be dropped, explaining why. It // returns nil if the packet should not be dropped. func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) error { // Check if we spoke to this tuple, if we did then allow this packet if f.inConns(packet, fp, incoming, h, caPool, localCache) { return nil } // Make sure remote address matches nebula certificate if remoteCidr := h.remoteCidr; remoteCidr != nil { if remoteCidr.Contains(fp.RemoteIP) == nil { f.metrics(incoming).droppedRemoteIP.Inc(1) return ErrInvalidRemoteIP } } else { // Simple case: Certificate has one IP and no subnets if fp.RemoteIP != h.vpnIp { f.metrics(incoming).droppedRemoteIP.Inc(1) return ErrInvalidRemoteIP } } // Make sure we are supposed to be handling this local ip address if f.localIps.Contains(fp.LocalIP) == nil { f.metrics(incoming).droppedLocalIP.Inc(1) return ErrInvalidLocalIP } table := f.OutRules if incoming { table = f.InRules } // We now know which firewall table to check against if !table.match(fp, incoming, h.ConnectionState.peerCert, caPool) { f.metrics(incoming).droppedNoRule.Inc(1) return ErrNoMatchingRule } // We always want to conntrack since it is a faster operation f.addConn(packet, fp, incoming) return nil } func (f *Firewall) metrics(incoming bool) firewallMetrics { if incoming { return f.incomingMetrics } else { return f.outgoingMetrics } } // Destroy cleans up any known cyclical references so the object can be free'd my GC. This should be called if a new // firewall object is created func (f *Firewall) Destroy() { //TODO: clean references if/when needed } func (f *Firewall) EmitStats() { conntrack := f.Conntrack conntrack.Lock() conntrackCount := len(conntrack.Conns) conntrack.Unlock() metrics.GetOrRegisterGauge("firewall.conntrack.count", nil).Update(int64(conntrackCount)) metrics.GetOrRegisterGauge("firewall.rules.version", nil).Update(int64(f.rulesVersion)) } func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool { if localCache != nil { if _, ok := localCache[fp]; ok { return true } } conntrack := f.Conntrack conntrack.Lock() // Purge every time we test ep, has := conntrack.TimerWheel.Purge() if has { f.evict(ep) } c, ok := conntrack.Conns[fp] if !ok { conntrack.Unlock() return false } if c.rulesVersion != f.rulesVersion { // This conntrack entry was for an older rule set, validate // it still passes with the current rule set table := f.OutRules if c.incoming { table = f.InRules } // We now know which firewall table to check against if !table.match(fp, c.incoming, h.ConnectionState.peerCert, caPool) { if f.l.Level >= logrus.DebugLevel { h.logger(f.l). WithField("fwPacket", fp). WithField("incoming", c.incoming). WithField("rulesVersion", f.rulesVersion). WithField("oldRulesVersion", c.rulesVersion). Debugln("dropping old conntrack entry, does not match new ruleset") } delete(conntrack.Conns, fp) conntrack.Unlock() return false } if f.l.Level >= logrus.DebugLevel { h.logger(f.l). WithField("fwPacket", fp). WithField("incoming", c.incoming). WithField("rulesVersion", f.rulesVersion). WithField("oldRulesVersion", c.rulesVersion). Debugln("keeping old conntrack entry, does match new ruleset") } c.rulesVersion = f.rulesVersion } switch fp.Protocol { case firewall.ProtoTCP: c.Expires = time.Now().Add(f.TCPTimeout) if incoming { f.checkTCPRTT(c, packet) } else { setTCPRTTTracking(c, packet) } case firewall.ProtoUDP: c.Expires = time.Now().Add(f.UDPTimeout) default: c.Expires = time.Now().Add(f.DefaultTimeout) } conntrack.Unlock() if localCache != nil { localCache[fp] = struct{}{} } return true } func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) { var timeout time.Duration c := &conn{} switch fp.Protocol { case firewall.ProtoTCP: timeout = f.TCPTimeout if !incoming { setTCPRTTTracking(c, packet) } case firewall.ProtoUDP: timeout = f.UDPTimeout default: timeout = f.DefaultTimeout } conntrack := f.Conntrack conntrack.Lock() if _, ok := conntrack.Conns[fp]; !ok { conntrack.TimerWheel.Add(fp, timeout) } // Record which rulesVersion allowed this connection, so we can retest after // firewall reload c.incoming = incoming c.rulesVersion = f.rulesVersion c.Expires = time.Now().Add(timeout) conntrack.Conns[fp] = c conntrack.Unlock() } // Evict checks if a conntrack entry has expired, if so it is removed, if not it is re-added to the wheel // Caller must own the connMutex lock! func (f *Firewall) evict(p firewall.Packet) { //TODO: report a stat if the tcp rtt tracking was never resolved? // Are we still tracking this conn? conntrack := f.Conntrack t, ok := conntrack.Conns[p] if !ok { return } newT := t.Expires.Sub(time.Now()) // Timeout is in the future, re-add the timer if newT > 0 { conntrack.TimerWheel.Add(p, newT) return } // This conn is done delete(conntrack.Conns, p) } func (ft *FirewallTable) match(p firewall.Packet, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool { if ft.AnyProto.match(p, incoming, c, caPool) { return true } switch p.Protocol { case firewall.ProtoTCP: if ft.TCP.match(p, incoming, c, caPool) { return true } case firewall.ProtoUDP: if ft.UDP.match(p, incoming, c, caPool) { return true } case firewall.ProtoICMP: if ft.ICMP.match(p, incoming, c, caPool) { return true } } return false } func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error { if startPort > endPort { return fmt.Errorf("start port was lower than end port") } for i := startPort; i <= endPort; i++ { if _, ok := fp[i]; !ok { fp[i] = &FirewallCA{ CANames: make(map[string]*FirewallRule), CAShas: make(map[string]*FirewallRule), } } if err := fp[i].addRule(groups, host, ip, caName, caSha); err != nil { return err } } return nil } func (fp firewallPort) match(p firewall.Packet, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool { // We don't have any allowed ports, bail if fp == nil { return false } var port int32 if p.Fragment { port = firewall.PortFragment } else if incoming { port = int32(p.LocalPort) } else { port = int32(p.RemotePort) } if fp[port].match(p, c, caPool) { return true } return fp[firewall.PortAny].match(p, c, caPool) } func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caName, caSha string) error { fr := func() *FirewallRule { return &FirewallRule{ Hosts: make(map[string]struct{}), Groups: make([][]string, 0), CIDR: cidr.NewTree4(), } } if caSha == "" && caName == "" { if fc.Any == nil { fc.Any = fr() } return fc.Any.addRule(groups, host, ip) } if caSha != "" { if _, ok := fc.CAShas[caSha]; !ok { fc.CAShas[caSha] = fr() } err := fc.CAShas[caSha].addRule(groups, host, ip) if err != nil { return err } } if caName != "" { if _, ok := fc.CANames[caName]; !ok { fc.CANames[caName] = fr() } err := fc.CANames[caName].addRule(groups, host, ip) if err != nil { return err } } return nil } func (fc *FirewallCA) match(p firewall.Packet, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool { if fc == nil { return false } if fc.Any.match(p, c) { return true } if t, ok := fc.CAShas[c.Details.Issuer]; ok { if t.match(p, c) { return true } } s, err := caPool.GetCAForCert(c) if err != nil { return false } return fc.CANames[s.Details.Name].match(p, c) } func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet) error { if fr.Any { return nil } if fr.isAny(groups, host, ip) { fr.Any = true // If it's any we need to wipe out any pre-existing rules to save on memory fr.Groups = make([][]string, 0) fr.Hosts = make(map[string]struct{}) fr.CIDR = cidr.NewTree4() } else { if len(groups) > 0 { fr.Groups = append(fr.Groups, groups) } if host != "" { fr.Hosts[host] = struct{}{} } if ip != nil { fr.CIDR.AddCIDR(ip, struct{}{}) } } return nil } func (fr *FirewallRule) isAny(groups []string, host string, ip *net.IPNet) bool { if len(groups) == 0 && host == "" && ip == nil { return true } for _, group := range groups { if group == "any" { return true } } if host == "any" { return true } if ip != nil && ip.Contains(net.IPv4(0, 0, 0, 0)) { return true } return false } func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool { if fr == nil { return false } // Shortcut path for if groups, hosts, or cidr contained an `any` if fr.Any { return true } // Need any of group, host, or cidr to match for _, sg := range fr.Groups { found := false for _, g := range sg { if _, ok := c.Details.InvertedGroups[g]; !ok { found = false break } found = true } if found { return true } } if fr.Hosts != nil { if _, ok := fr.Hosts[c.Details.Name]; ok { return true } } if fr.CIDR != nil && fr.CIDR.Contains(p.RemoteIP) != nil { return true } // No host, group, or cidr matched, bye bye return false } type rule struct { Port string Code string Proto string Host string Group string Groups []string Cidr string CAName string CASha string } func convertRule(l *logrus.Logger, p interface{}, table string, i int) (rule, error) { r := rule{} m, ok := p.(map[interface{}]interface{}) if !ok { return r, errors.New("could not parse rule") } toString := func(k string, m map[interface{}]interface{}) string { v, ok := m[k] if !ok { return "" } return fmt.Sprintf("%v", v) } r.Port = toString("port", m) r.Code = toString("code", m) r.Proto = toString("proto", m) r.Host = toString("host", m) r.Cidr = toString("cidr", m) r.CAName = toString("ca_name", m) r.CASha = toString("ca_sha", m) // Make sure group isn't an array if v, ok := m["group"].([]interface{}); ok { if len(v) > 1 { return r, errors.New("group should contain a single value, an array with more than one entry was provided") } l.Warnf("%s rule #%v; group was an array with a single value, converting to simple value", table, i) m["group"] = v[0] } r.Group = toString("group", m) if rg, ok := m["groups"]; ok { switch reflect.TypeOf(rg).Kind() { case reflect.Slice: v := reflect.ValueOf(rg) r.Groups = make([]string, v.Len()) for i := 0; i < v.Len(); i++ { r.Groups[i] = v.Index(i).Interface().(string) } case reflect.String: r.Groups = []string{rg.(string)} default: r.Groups = []string{fmt.Sprintf("%v", rg)} } } return r, nil } func parsePort(s string) (startPort, endPort int32, err error) { if s == "any" { startPort = firewall.PortAny endPort = firewall.PortAny } else if s == "fragment" { startPort = firewall.PortFragment endPort = firewall.PortFragment } else if strings.Contains(s, `-`) { sPorts := strings.SplitN(s, `-`, 2) sPorts[0] = strings.Trim(sPorts[0], " ") sPorts[1] = strings.Trim(sPorts[1], " ") if len(sPorts) != 2 || sPorts[0] == "" || sPorts[1] == "" { return 0, 0, fmt.Errorf("appears to be a range but could not be parsed; `%s`", s) } rStartPort, err := strconv.Atoi(sPorts[0]) if err != nil { return 0, 0, fmt.Errorf("beginning range was not a number; `%s`", sPorts[0]) } rEndPort, err := strconv.Atoi(sPorts[1]) if err != nil { return 0, 0, fmt.Errorf("ending range was not a number; `%s`", sPorts[1]) } startPort = int32(rStartPort) endPort = int32(rEndPort) if startPort == firewall.PortAny { endPort = firewall.PortAny } } else { rPort, err := strconv.Atoi(s) if err != nil { return 0, 0, fmt.Errorf("was not a number; `%s`", s) } startPort = int32(rPort) endPort = startPort } return } //TODO: write tests for these func setTCPRTTTracking(c *conn, p []byte) { if c.Seq != 0 { return } ihl := int(p[0]&0x0f) << 2 // Don't track FIN packets if p[ihl+13]&tcpFIN != 0 { return } c.Seq = binary.BigEndian.Uint32(p[ihl+4 : ihl+8]) c.Sent = time.Now() } func (f *Firewall) checkTCPRTT(c *conn, p []byte) bool { if c.Seq == 0 { return false } ihl := int(p[0]&0x0f) << 2 if p[ihl+13]&tcpACK == 0 { return false } // Deal with wrap around, signed int cuts the ack window in half // 0 is a bad ack, no data acknowledged // positive number is a bad ack, ack is over half the window away if int32(c.Seq-binary.BigEndian.Uint32(p[ihl+8:ihl+12])) >= 0 { return false } f.metricTCPRTT.Update(time.Since(c.Sent).Nanoseconds()) c.Seq = 0 return true } nebula-1.6.1+dfsg/firewall/000077500000000000000000000000001434072716400155215ustar00rootroot00000000000000nebula-1.6.1+dfsg/firewall/cache.go000066400000000000000000000022071434072716400171140ustar00rootroot00000000000000package firewall import ( "sync/atomic" "time" "github.com/sirupsen/logrus" ) // ConntrackCache is used as a local routine cache to know if a given flow // has been seen in the conntrack table. type ConntrackCache map[Packet]struct{} type ConntrackCacheTicker struct { cacheV uint64 cacheTick uint64 cache ConntrackCache } func NewConntrackCacheTicker(d time.Duration) *ConntrackCacheTicker { if d == 0 { return nil } c := &ConntrackCacheTicker{ cache: ConntrackCache{}, } go c.tick(d) return c } func (c *ConntrackCacheTicker) tick(d time.Duration) { for { time.Sleep(d) atomic.AddUint64(&c.cacheTick, 1) } } // Get checks if the cache ticker has moved to the next version before returning // the map. If it has moved, we reset the map. func (c *ConntrackCacheTicker) Get(l *logrus.Logger) ConntrackCache { if c == nil { return nil } if tick := atomic.LoadUint64(&c.cacheTick); tick != c.cacheV { c.cacheV = tick if ll := len(c.cache); ll > 0 { if l.Level == logrus.DebugLevel { l.WithField("len", ll).Debug("resetting conntrack cache") } c.cache = make(ConntrackCache, ll) } } return c.cache } nebula-1.6.1+dfsg/firewall/packet.go000066400000000000000000000023071434072716400173210ustar00rootroot00000000000000package firewall import ( "encoding/json" "fmt" "github.com/slackhq/nebula/iputil" ) type m map[string]interface{} const ( ProtoAny = 0 // When we want to handle HOPOPT (0) we can change this, if ever ProtoTCP = 6 ProtoUDP = 17 ProtoICMP = 1 PortAny = 0 // Special value for matching `port: any` PortFragment = -1 // Special value for matching `port: fragment` ) type Packet struct { LocalIP iputil.VpnIp RemoteIP iputil.VpnIp LocalPort uint16 RemotePort uint16 Protocol uint8 Fragment bool } func (fp *Packet) Copy() *Packet { return &Packet{ LocalIP: fp.LocalIP, RemoteIP: fp.RemoteIP, LocalPort: fp.LocalPort, RemotePort: fp.RemotePort, Protocol: fp.Protocol, Fragment: fp.Fragment, } } func (fp Packet) MarshalJSON() ([]byte, error) { var proto string switch fp.Protocol { case ProtoTCP: proto = "tcp" case ProtoICMP: proto = "icmp" case ProtoUDP: proto = "udp" default: proto = fmt.Sprintf("unknown %v", fp.Protocol) } return json.Marshal(m{ "LocalIP": fp.LocalIP.String(), "RemoteIP": fp.RemoteIP.String(), "LocalPort": fp.LocalPort, "RemotePort": fp.RemotePort, "Protocol": proto, "Fragment": fp.Fragment, }) } nebula-1.6.1+dfsg/firewall_test.go000066400000000000000000000751231434072716400171170ustar00rootroot00000000000000package nebula import ( "bytes" "encoding/binary" "errors" "math" "net" "testing" "time" "github.com/rcrowley/go-metrics" "github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/firewall" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/test" "github.com/stretchr/testify/assert" ) func TestNewFirewall(t *testing.T) { l := test.NewLogger() c := &cert.NebulaCertificate{} fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c) conntrack := fw.Conntrack assert.NotNil(t, conntrack) assert.NotNil(t, conntrack.Conns) assert.NotNil(t, conntrack.TimerWheel) assert.NotNil(t, fw.InRules) assert.NotNil(t, fw.OutRules) assert.Equal(t, time.Second, fw.TCPTimeout) assert.Equal(t, time.Minute, fw.UDPTimeout) assert.Equal(t, time.Hour, fw.DefaultTimeout) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) fw = NewFirewall(l, time.Second, time.Hour, time.Minute, c) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) fw = NewFirewall(l, time.Hour, time.Second, time.Minute, c) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) fw = NewFirewall(l, time.Hour, time.Minute, time.Second, c) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) fw = NewFirewall(l, time.Minute, time.Hour, time.Second, c) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) fw = NewFirewall(l, time.Minute, time.Second, time.Hour, c) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen) } func TestFirewall_AddRule(t *testing.T) { l := test.NewLogger() ob := &bytes.Buffer{} l.SetOutput(ob) c := &cert.NebulaCertificate{} fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c) assert.NotNil(t, fw.InRules) assert.NotNil(t, fw.OutRules) _, ti, _ := net.ParseCIDR("1.2.3.4/32") assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", nil, "", "")) // An empty rule is any assert.True(t, fw.InRules.TCP[1].Any.Any) assert.Empty(t, fw.InRules.TCP[1].Any.Groups) assert.Empty(t, fw.InRules.TCP[1].Any.Hosts) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, "", "")) assert.False(t, fw.InRules.UDP[1].Any.Any) assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0], "g1") assert.Empty(t, fw.InRules.UDP[1].Any.Hosts) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", nil, "", "")) assert.False(t, fw.InRules.ICMP[1].Any.Any) assert.Empty(t, fw.InRules.ICMP[1].Any.Groups) assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1") fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, "", "")) assert.False(t, fw.OutRules.AnyProto[1].Any.Any) assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups) assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts) assert.NotNil(t, fw.OutRules.AnyProto[1].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP))) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, "ca-name", "")) assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name") fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, "", "ca-sha")) assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha") // Set any and clear fields fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, "", "")) assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0]) assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1") assert.NotNil(t, fw.OutRules.AnyProto[0].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP))) // run twice just to make sure //TODO: these ANY rules should clear the CA firewall portion assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, "", "")) assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, "", "")) assert.True(t, fw.OutRules.AnyProto[0].Any.Any) assert.Empty(t, fw.OutRules.AnyProto[0].Any.Groups) assert.Empty(t, fw.OutRules.AnyProto[0].Any.Hosts) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, "", "")) assert.True(t, fw.OutRules.AnyProto[0].Any.Any) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) _, anyIp, _ := net.ParseCIDR("0.0.0.0/0") assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, "", "")) assert.True(t, fw.OutRules.AnyProto[0].Any.Any) // Test error conditions fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", nil, "", "")) assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", nil, "", "")) } func TestFirewall_Drop(t *testing.T) { l := test.NewLogger() ob := &bytes.Buffer{} l.SetOutput(ob) p := firewall.Packet{ iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), 10, 90, firewall.ProtoUDP, false, } ipNet := net.IPNet{ IP: net.IPv4(1, 2, 3, 4), Mask: net.IPMask{255, 255, 255, 0}, } c := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "host1", Ips: []*net.IPNet{&ipNet}, Groups: []string{"default-group"}, InvertedGroups: map[string]struct{}{"default-group": {}}, Issuer: "signer-shasum", }, } h := HostInfo{ ConnectionState: &ConnectionState{ peerCert: &c, }, vpnIp: iputil.Ip2VpnIp(ipNet.IP), } h.CreateRemoteCIDR(&c) fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, "", "")) cp := cert.NewCAPool() // Drop outbound assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule) // Allow inbound resetConntrack(fw) assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil)) // Allow outbound because conntrack assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil)) // test remote mismatch oldRemote := p.RemoteIP p.RemoteIP = iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 10)) assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrInvalidRemoteIP) p.RemoteIP = oldRemote // ensure signer doesn't get in the way of group checks fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum-bad")) assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule) // test caSha doesn't drop on match fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum-bad")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum")) assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil)) // ensure ca name doesn't get in the way of group checks cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}} fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good-bad", "")) assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule) // test caName doesn't drop on match cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}} fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good-bad", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good", "")) assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil)) } func BenchmarkFirewallTable_match(b *testing.B) { ft := FirewallTable{ TCP: firewallPort{}, } _, n, _ := net.ParseCIDR("172.1.1.1/32") _ = ft.TCP.addRule(10, 10, []string{"good-group"}, "good-host", n, "", "") _ = ft.TCP.addRule(10, 10, []string{"good-group2"}, "good-host", n, "", "") _ = ft.TCP.addRule(10, 10, []string{"good-group3"}, "good-host", n, "", "") _ = ft.TCP.addRule(10, 10, []string{"good-group4"}, "good-host", n, "", "") _ = ft.TCP.addRule(10, 10, []string{"good-group, good-group1"}, "good-host", n, "", "") cp := cert.NewCAPool() b.Run("fail on proto", func(b *testing.B) { c := &cert.NebulaCertificate{} for n := 0; n < b.N; n++ { ft.match(firewall.Packet{Protocol: firewall.ProtoUDP}, true, c, cp) } }) b.Run("fail on port", func(b *testing.B) { c := &cert.NebulaCertificate{} for n := 0; n < b.N; n++ { ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 1}, true, c, cp) } }) b.Run("fail all group, name, and cidr", func(b *testing.B) { _, ip, _ := net.ParseCIDR("9.254.254.254/32") c := &cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ InvertedGroups: map[string]struct{}{"nope": {}}, Name: "nope", Ips: []*net.IPNet{ip}, }, } for n := 0; n < b.N; n++ { ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp) } }) b.Run("pass on group", func(b *testing.B) { c := &cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ InvertedGroups: map[string]struct{}{"good-group": {}}, Name: "nope", }, } for n := 0; n < b.N; n++ { ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp) } }) b.Run("pass on name", func(b *testing.B) { c := &cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ InvertedGroups: map[string]struct{}{"nope": {}}, Name: "good-host", }, } for n := 0; n < b.N; n++ { ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp) } }) b.Run("pass on ip", func(b *testing.B) { ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1)) c := &cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ InvertedGroups: map[string]struct{}{"nope": {}}, Name: "good-host", }, } for n := 0; n < b.N; n++ { ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, RemoteIP: ip}, true, c, cp) } }) _ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, "", "") b.Run("pass on ip with any port", func(b *testing.B) { ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1)) c := &cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ InvertedGroups: map[string]struct{}{"nope": {}}, Name: "good-host", }, } for n := 0; n < b.N; n++ { ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp) } }) } func TestFirewall_Drop2(t *testing.T) { l := test.NewLogger() ob := &bytes.Buffer{} l.SetOutput(ob) p := firewall.Packet{ iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), 10, 90, firewall.ProtoUDP, false, } ipNet := net.IPNet{ IP: net.IPv4(1, 2, 3, 4), Mask: net.IPMask{255, 255, 255, 0}, } c := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "host1", Ips: []*net.IPNet{&ipNet}, InvertedGroups: map[string]struct{}{"default-group": {}, "test-group": {}}, }, } h := HostInfo{ ConnectionState: &ConnectionState{ peerCert: &c, }, vpnIp: iputil.Ip2VpnIp(ipNet.IP), } h.CreateRemoteCIDR(&c) c1 := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "host1", Ips: []*net.IPNet{&ipNet}, InvertedGroups: map[string]struct{}{"default-group": {}, "test-group-not": {}}, }, } h1 := HostInfo{ ConnectionState: &ConnectionState{ peerCert: &c1, }, } h1.CreateRemoteCIDR(&c1) fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", nil, "", "")) cp := cert.NewCAPool() // h1/c1 lacks the proper groups assert.Error(t, fw.Drop([]byte{}, p, true, &h1, cp, nil), ErrNoMatchingRule) // c has the proper groups resetConntrack(fw) assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil)) } func TestFirewall_Drop3(t *testing.T) { l := test.NewLogger() ob := &bytes.Buffer{} l.SetOutput(ob) p := firewall.Packet{ iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), 1, 1, firewall.ProtoUDP, false, } ipNet := net.IPNet{ IP: net.IPv4(1, 2, 3, 4), Mask: net.IPMask{255, 255, 255, 0}, } c := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "host-owner", Ips: []*net.IPNet{&ipNet}, }, } c1 := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "host1", Ips: []*net.IPNet{&ipNet}, Issuer: "signer-sha-bad", }, } h1 := HostInfo{ ConnectionState: &ConnectionState{ peerCert: &c1, }, vpnIp: iputil.Ip2VpnIp(ipNet.IP), } h1.CreateRemoteCIDR(&c1) c2 := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "host2", Ips: []*net.IPNet{&ipNet}, Issuer: "signer-sha", }, } h2 := HostInfo{ ConnectionState: &ConnectionState{ peerCert: &c2, }, vpnIp: iputil.Ip2VpnIp(ipNet.IP), } h2.CreateRemoteCIDR(&c2) c3 := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "host3", Ips: []*net.IPNet{&ipNet}, Issuer: "signer-sha-bad", }, } h3 := HostInfo{ ConnectionState: &ConnectionState{ peerCert: &c3, }, vpnIp: iputil.Ip2VpnIp(ipNet.IP), } h3.CreateRemoteCIDR(&c3) fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", nil, "", "signer-sha")) cp := cert.NewCAPool() // c1 should pass because host match assert.NoError(t, fw.Drop([]byte{}, p, true, &h1, cp, nil)) // c2 should pass because ca sha match resetConntrack(fw) assert.NoError(t, fw.Drop([]byte{}, p, true, &h2, cp, nil)) // c3 should fail because no match resetConntrack(fw) assert.Equal(t, fw.Drop([]byte{}, p, true, &h3, cp, nil), ErrNoMatchingRule) } func TestFirewall_DropConntrackReload(t *testing.T) { l := test.NewLogger() ob := &bytes.Buffer{} l.SetOutput(ob) p := firewall.Packet{ iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), 10, 90, firewall.ProtoUDP, false, } ipNet := net.IPNet{ IP: net.IPv4(1, 2, 3, 4), Mask: net.IPMask{255, 255, 255, 0}, } c := cert.NebulaCertificate{ Details: cert.NebulaCertificateDetails{ Name: "host1", Ips: []*net.IPNet{&ipNet}, Groups: []string{"default-group"}, InvertedGroups: map[string]struct{}{"default-group": {}}, Issuer: "signer-shasum", }, } h := HostInfo{ ConnectionState: &ConnectionState{ peerCert: &c, }, vpnIp: iputil.Ip2VpnIp(ipNet.IP), } h.CreateRemoteCIDR(&c) fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, "", "")) cp := cert.NewCAPool() // Drop outbound assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule) // Allow inbound resetConntrack(fw) assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil)) // Allow outbound because conntrack assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil)) oldFw := fw fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", nil, "", "")) fw.Conntrack = oldFw.Conntrack fw.rulesVersion = oldFw.rulesVersion + 1 // Allow outbound because conntrack and new rules allow port 10 assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil)) oldFw = fw fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", nil, "", "")) fw.Conntrack = oldFw.Conntrack fw.rulesVersion = oldFw.rulesVersion + 1 // Drop outbound because conntrack doesn't match new ruleset assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule) } func BenchmarkLookup(b *testing.B) { ml := func(m map[string]struct{}, a [][]string) { for n := 0; n < b.N; n++ { for _, sg := range a { found := false for _, g := range sg { if _, ok := m[g]; !ok { found = false break } found = true } if found { return } } } } b.Run("array to map best", func(b *testing.B) { m := map[string]struct{}{ "1ne": {}, "2wo": {}, "3hr": {}, "4ou": {}, "5iv": {}, "6ix": {}, } a := [][]string{ {"1ne", "2wo", "3hr", "4ou", "5iv", "6ix"}, {"one", "2wo", "3hr", "4ou", "5iv", "6ix"}, {"one", "two", "3hr", "4ou", "5iv", "6ix"}, {"one", "two", "thr", "4ou", "5iv", "6ix"}, {"one", "two", "thr", "fou", "5iv", "6ix"}, {"one", "two", "thr", "fou", "fiv", "6ix"}, {"one", "two", "thr", "fou", "fiv", "six"}, } for n := 0; n < b.N; n++ { ml(m, a) } }) b.Run("array to map worst", func(b *testing.B) { m := map[string]struct{}{ "one": {}, "two": {}, "thr": {}, "fou": {}, "fiv": {}, "six": {}, } a := [][]string{ {"1ne", "2wo", "3hr", "4ou", "5iv", "6ix"}, {"one", "2wo", "3hr", "4ou", "5iv", "6ix"}, {"one", "two", "3hr", "4ou", "5iv", "6ix"}, {"one", "two", "thr", "4ou", "5iv", "6ix"}, {"one", "two", "thr", "fou", "5iv", "6ix"}, {"one", "two", "thr", "fou", "fiv", "6ix"}, {"one", "two", "thr", "fou", "fiv", "six"}, } for n := 0; n < b.N; n++ { ml(m, a) } }) //TODO: only way array lookup in array will help is if both are sorted, then maybe it's faster } func Test_parsePort(t *testing.T) { _, _, err := parsePort("") assert.EqualError(t, err, "was not a number; ``") _, _, err = parsePort(" ") assert.EqualError(t, err, "was not a number; ` `") _, _, err = parsePort("-") assert.EqualError(t, err, "appears to be a range but could not be parsed; `-`") _, _, err = parsePort(" - ") assert.EqualError(t, err, "appears to be a range but could not be parsed; ` - `") _, _, err = parsePort("a-b") assert.EqualError(t, err, "beginning range was not a number; `a`") _, _, err = parsePort("1-b") assert.EqualError(t, err, "ending range was not a number; `b`") s, e, err := parsePort(" 1 - 2 ") assert.Equal(t, int32(1), s) assert.Equal(t, int32(2), e) assert.Nil(t, err) s, e, err = parsePort("0-1") assert.Equal(t, int32(0), s) assert.Equal(t, int32(0), e) assert.Nil(t, err) s, e, err = parsePort("9919") assert.Equal(t, int32(9919), s) assert.Equal(t, int32(9919), e) assert.Nil(t, err) s, e, err = parsePort("any") assert.Equal(t, int32(0), s) assert.Equal(t, int32(0), e) assert.Nil(t, err) } func TestNewFirewallFromConfig(t *testing.T) { l := test.NewLogger() // Test a bad rule definition c := &cert.NebulaCertificate{} conf := config.NewC(l) conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": "asdf"} _, err := NewFirewallFromConfig(l, c, conf) assert.EqualError(t, err, "firewall.outbound failed to parse, should be an array of rules") // Test both port and code conf = config.NewC(l) conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "code": "2"}}} _, err = NewFirewallFromConfig(l, c, conf) assert.EqualError(t, err, "firewall.outbound rule #0; only one of port or code should be provided") // Test missing host, group, cidr, ca_name and ca_sha conf = config.NewC(l) conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{}}} _, err = NewFirewallFromConfig(l, c, conf) assert.EqualError(t, err, "firewall.outbound rule #0; at least one of host, group, cidr, ca_name, or ca_sha must be provided") // Test code/port error conf = config.NewC(l) conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "a", "host": "testh"}}} _, err = NewFirewallFromConfig(l, c, conf) assert.EqualError(t, err, "firewall.outbound rule #0; code was not a number; `a`") conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "a", "host": "testh"}}} _, err = NewFirewallFromConfig(l, c, conf) assert.EqualError(t, err, "firewall.outbound rule #0; port was not a number; `a`") // Test proto error conf = config.NewC(l) conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "host": "testh"}}} _, err = NewFirewallFromConfig(l, c, conf) assert.EqualError(t, err, "firewall.outbound rule #0; proto was not understood; ``") // Test cidr parse error conf = config.NewC(l) conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "cidr": "testh", "proto": "any"}}} _, err = NewFirewallFromConfig(l, c, conf) assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; invalid CIDR address: testh") // Test both group and groups conf = config.NewC(l) conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}} _, err = NewFirewallFromConfig(l, c, conf) assert.EqualError(t, err, "firewall.inbound rule #0; only one of group or groups should be defined, both provided") } func TestAddFirewallRulesFromConfig(t *testing.T) { l := test.NewLogger() // Test adding tcp rule conf := config.NewC(l) mf := &mockFirewall{} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}} assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf)) assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall) // Test adding udp rule conf = config.NewC(l) mf = &mockFirewall{} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}} assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf)) assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall) // Test adding icmp rule conf = config.NewC(l) mf = &mockFirewall{} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}} assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf)) assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall) // Test adding any rule conf = config.NewC(l) mf = &mockFirewall{} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}} assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall) // Test adding rule with ca_sha conf = config.NewC(l) mf = &mockFirewall{} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}} assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caSha: "12312313123"}, mf.lastCall) // Test adding rule with ca_name conf = config.NewC(l) mf = &mockFirewall{} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}} assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caName: "root01"}, mf.lastCall) // Test single group conf = config.NewC(l) mf = &mockFirewall{} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}} assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall) // Test single groups conf = config.NewC(l) mf = &mockFirewall{} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}} assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall) // Test multiple AND groups conf = config.NewC(l) mf = &mockFirewall{} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}} assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: nil}, mf.lastCall) // Test Add error conf = config.NewC(l) mf = &mockFirewall{} mf.nextCallReturn = errors.New("test error") conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}} assert.EqualError(t, AddFirewallRulesFromConfig(l, true, conf, mf), "firewall.inbound rule #0; `test error`") } func TestTCPRTTTracking(t *testing.T) { b := make([]byte, 200) // Max ip IHL (60 bytes) and tcp IHL (60 bytes) b[0] = 15 b[60+12] = 15 << 4 f := Firewall{ metricTCPRTT: metrics.GetOrRegisterHistogram("nope", nil, metrics.NewExpDecaySample(1028, 0.015)), } // Set SEQ to 1 binary.BigEndian.PutUint32(b[60+4:60+8], 1) c := &conn{} setTCPRTTTracking(c, b) assert.Equal(t, uint32(1), c.Seq) // Bad ack - no ack flag binary.BigEndian.PutUint32(b[60+8:60+12], 80) assert.False(t, f.checkTCPRTT(c, b)) // Bad ack, number is too low binary.BigEndian.PutUint32(b[60+8:60+12], 0) b[60+13] = uint8(0x10) assert.False(t, f.checkTCPRTT(c, b)) // Good ack binary.BigEndian.PutUint32(b[60+8:60+12], 80) assert.True(t, f.checkTCPRTT(c, b)) assert.Equal(t, uint32(0), c.Seq) // Set SEQ to 1 binary.BigEndian.PutUint32(b[60+4:60+8], 1) c = &conn{} setTCPRTTTracking(c, b) assert.Equal(t, uint32(1), c.Seq) // Good acks binary.BigEndian.PutUint32(b[60+8:60+12], 81) assert.True(t, f.checkTCPRTT(c, b)) assert.Equal(t, uint32(0), c.Seq) // Set SEQ to max uint32 - 20 binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0)-20) c = &conn{} setTCPRTTTracking(c, b) assert.Equal(t, ^uint32(0)-20, c.Seq) // Good acks binary.BigEndian.PutUint32(b[60+8:60+12], 81) assert.True(t, f.checkTCPRTT(c, b)) assert.Equal(t, uint32(0), c.Seq) // Set SEQ to max uint32 / 2 binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0)/2) c = &conn{} setTCPRTTTracking(c, b) assert.Equal(t, ^uint32(0)/2, c.Seq) // Below binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2-1) assert.False(t, f.checkTCPRTT(c, b)) assert.Equal(t, ^uint32(0)/2, c.Seq) // Halfway below binary.BigEndian.PutUint32(b[60+8:60+12], uint32(0)) assert.False(t, f.checkTCPRTT(c, b)) assert.Equal(t, ^uint32(0)/2, c.Seq) // Halfway above is ok binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)) assert.True(t, f.checkTCPRTT(c, b)) assert.Equal(t, uint32(0), c.Seq) // Set SEQ to max uint32 binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0)) c = &conn{} setTCPRTTTracking(c, b) assert.Equal(t, ^uint32(0), c.Seq) // Halfway + 1 above binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2+1) assert.False(t, f.checkTCPRTT(c, b)) assert.Equal(t, ^uint32(0), c.Seq) // Halfway above binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2) assert.True(t, f.checkTCPRTT(c, b)) assert.Equal(t, uint32(0), c.Seq) } func TestFirewall_convertRule(t *testing.T) { l := test.NewLogger() ob := &bytes.Buffer{} l.SetOutput(ob) // Ensure group array of 1 is converted and a warning is printed c := map[interface{}]interface{}{ "group": []interface{}{"group1"}, } r, err := convertRule(l, c, "test", 1) assert.Contains(t, ob.String(), "test rule #1; group was an array with a single value, converting to simple value") assert.Nil(t, err) assert.Equal(t, "group1", r.Group) // Ensure group array of > 1 is errord ob.Reset() c = map[interface{}]interface{}{ "group": []interface{}{"group1", "group2"}, } r, err = convertRule(l, c, "test", 1) assert.Equal(t, "", ob.String()) assert.Error(t, err, "group should contain a single value, an array with more than one entry was provided") // Make sure a well formed group is alright ob.Reset() c = map[interface{}]interface{}{ "group": "group1", } r, err = convertRule(l, c, "test", 1) assert.Nil(t, err) assert.Equal(t, "group1", r.Group) } type addRuleCall struct { incoming bool proto uint8 startPort int32 endPort int32 groups []string host string ip *net.IPNet caName string caSha string } type mockFirewall struct { lastCall addRuleCall nextCallReturn error } func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error { mf.lastCall = addRuleCall{ incoming: incoming, proto: proto, startPort: startPort, endPort: endPort, groups: groups, host: host, ip: ip, caName: caName, caSha: caSha, } err := mf.nextCallReturn mf.nextCallReturn = nil return err } func resetConntrack(fw *Firewall) { fw.Conntrack.Lock() fw.Conntrack.Conns = map[firewall.Packet]*conn{} fw.Conntrack.Unlock() } nebula-1.6.1+dfsg/go.mod000066400000000000000000000037721434072716400150330ustar00rootroot00000000000000module github.com/slackhq/nebula go 1.18 require ( github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be github.com/armon/go-radix v1.0.0 github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 github.com/flynn/noise v1.0.0 github.com/gogo/protobuf v1.3.2 github.com/google/gopacket v1.1.19 github.com/imdario/mergo v0.3.8 github.com/kardianos/service v1.2.1 github.com/miekg/dns v1.1.48 github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f github.com/prometheus/client_golang v1.12.1 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/sirupsen/logrus v1.8.1 github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 github.com/stretchr/testify v1.7.1 github.com/vishvananda/netlink v1.1.0 golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 golang.org/x/net v0.0.0-20220403103023-749bd193bc2b golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71 golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 golang.zx2c4.com/wireguard/windows v0.5.3 google.golang.org/protobuf v1.28.0 gopkg.in/yaml.v2 v2.4.0 ) require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.33.0 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/tools v0.1.10 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) nebula-1.6.1+dfsg/go.sum000066400000000000000000001603301434072716400150520ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps= github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kardianos/service v1.2.1 h1:AYndMsehS+ywIS6RB9KOlcXzteWUzxgMgBymJD7+BYk= github.com/kardianos/service v1.2.1/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ= github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.1.48 h1:Ucfr7IIVyMBz4lRE8qmGUuZ4Wt3/ZGu9hmcMT3Uu4tQ= github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f h1:8dM0ilqKL0Uzl42GABzzC4Oqlc3kGRILz0vgoff7nwg= github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f/go.mod h1:nwPd6pDNId/Xi16qtKrFHrauSwMNuvk+zcjk89wrnlA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.33.0 h1:rHgav/0a6+uYgGdNt3jwz8FNSesO/Hsang3O0T9A5SE= github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 h1:tkVvjkPTB7pnW3jnid7kNyAMPVWllTNOf/qKDze4p9o= golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220403103023-749bd193bc2b h1:vI32FkLJNAWtGD4BwkThwEy6XS7ZLLMHkSkYfF8M0W0= golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71 h1:PRD0hj6tTuUnCFD08vkvjkYFbQg/9lV8KIxe1y4/cvU= golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 h1:Ug9qvr1myri/zFN6xL17LSCBGFDnphBBhzmILHsM5TY= golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI= golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE= golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= nebula-1.6.1+dfsg/handshake.go000066400000000000000000000015471434072716400162000ustar00rootroot00000000000000package nebula import ( "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/udp" ) func HandleIncomingHandshake(f *Interface, addr *udp.Addr, via interface{}, packet []byte, h *header.H, hostinfo *HostInfo) { // First remote allow list check before we know the vpnIp if addr != nil { if !f.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.IP) { f.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake") return } } switch h.Subtype { case header.HandshakeIXPSK0: switch h.MessageCounter { case 1: ixHandshakeStage1(f, addr, via, packet, h) case 2: newHostinfo, _ := f.handshakeManager.QueryIndex(h.RemoteIndex) tearDown := ixHandshakeStage2(f, addr, via, newHostinfo, packet, h) if tearDown && newHostinfo != nil { f.handshakeManager.DeleteHostInfo(newHostinfo) } } } } nebula-1.6.1+dfsg/handshake_ix.go000066400000000000000000000476101434072716400167010ustar00rootroot00000000000000package nebula import ( "sync/atomic" "time" "github.com/flynn/noise" "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/udp" ) // NOISE IX Handshakes // This function constructs a handshake packet, but does not actually send it // Sending is done by the handshake manager func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) { // This queries the lighthouse if we don't know a remote for the host // We do it here to provoke the lighthouse to preempt our timer wheel and trigger the stage 1 packet to send // more quickly, effect is a quicker handshake. if hostinfo.remote == nil { f.lightHouse.QueryServer(vpnIp, f) } err := f.handshakeManager.AddIndexHostInfo(hostinfo) if err != nil { f.l.WithError(err).WithField("vpnIp", vpnIp). WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index") return } ci := hostinfo.ConnectionState hsProto := &NebulaHandshakeDetails{ InitiatorIndex: hostinfo.localIndexId, Time: uint64(time.Now().UnixNano()), Cert: ci.certState.rawCertificateNoKey, } hsBytes := []byte{} hs := &NebulaHandshake{ Details: hsProto, } hsBytes, err = hs.Marshal() if err != nil { f.l.WithError(err).WithField("vpnIp", vpnIp). WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message") return } h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1) atomic.AddUint64(&ci.atomicMessageCounter, 1) msg, _, _, err := ci.H.WriteMessage(h, hsBytes) if err != nil { f.l.WithError(err).WithField("vpnIp", vpnIp). WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage") return } // We are sending handshake packet 1, so we don't expect to receive // handshake packet 1 from the responder ci.window.Update(f.l, 1) hostinfo.HandshakePacket[0] = msg hostinfo.HandshakeReady = true hostinfo.handshakeStart = time.Now() } func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []byte, h *header.H) { ci := f.newConnectionState(f.l, false, noise.HandshakeIX, []byte{}, 0) // Mark packet 1 as seen so it doesn't show up as missed ci.window.Update(f.l, 1) msg, _, _, err := ci.H.ReadMessage(nil, packet[header.Len:]) if err != nil { f.l.WithError(err).WithField("udpAddr", addr). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.ReadMessage") return } hs := &NebulaHandshake{} err = hs.Unmarshal(msg) /* l.Debugln("GOT INDEX: ", hs.Details.InitiatorIndex) */ if err != nil || hs.Details == nil { f.l.WithError(err).WithField("udpAddr", addr). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed unmarshal handshake message") return } remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool) if err != nil { f.l.WithError(err).WithField("udpAddr", addr). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert). Info("Invalid certificate from host") return } vpnIp := iputil.Ip2VpnIp(remoteCert.Details.Ips[0].IP) certName := remoteCert.Details.Name fingerprint, _ := remoteCert.Sha256Sum() issuer := remoteCert.Details.Issuer if vpnIp == f.myVpnIp { f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr). WithField("certName", certName). WithField("fingerprint", fingerprint). WithField("issuer", issuer). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Refusing to handshake with myself") return } if addr != nil { if !f.lightHouse.GetRemoteAllowList().Allow(vpnIp, addr.IP) { f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake") return } } myIndex, err := generateIndex(f.l) if err != nil { f.l.WithError(err).WithField("vpnIp", vpnIp).WithField("udpAddr", addr). WithField("certName", certName). WithField("fingerprint", fingerprint). WithField("issuer", issuer). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to generate index") return } hostinfo := &HostInfo{ ConnectionState: ci, localIndexId: myIndex, remoteIndexId: hs.Details.InitiatorIndex, vpnIp: vpnIp, HandshakePacket: make(map[uint8][]byte, 0), lastHandshakeTime: hs.Details.Time, relayState: RelayState{ relays: map[iputil.VpnIp]struct{}{}, relayForByIp: map[iputil.VpnIp]*Relay{}, relayForByIdx: map[uint32]*Relay{}, }, } hostinfo.Lock() defer hostinfo.Unlock() f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr). WithField("certName", certName). WithField("fingerprint", fingerprint). WithField("issuer", issuer). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). Info("Handshake message received") hs.Details.ResponderIndex = myIndex hs.Details.Cert = ci.certState.rawCertificateNoKey // Update the time in case their clock is way off from ours hs.Details.Time = uint64(time.Now().UnixNano()) hsBytes, err := hs.Marshal() if err != nil { f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). WithField("certName", certName). WithField("fingerprint", fingerprint). WithField("issuer", issuer). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to marshal handshake message") return } nh := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, hs.Details.InitiatorIndex, 2) msg, dKey, eKey, err := ci.H.WriteMessage(nh, hsBytes) if err != nil { f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). WithField("certName", certName). WithField("fingerprint", fingerprint). WithField("issuer", issuer). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage") return } else if dKey == nil || eKey == nil { f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). WithField("certName", certName). WithField("fingerprint", fingerprint). WithField("issuer", issuer). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Noise did not arrive at a key") return } hostinfo.HandshakePacket[0] = make([]byte, len(packet[header.Len:])) copy(hostinfo.HandshakePacket[0], packet[header.Len:]) // Regardless of whether you are the sender or receiver, you should arrive here // and complete standing up the connection. hostinfo.HandshakePacket[2] = make([]byte, len(msg)) copy(hostinfo.HandshakePacket[2], msg) // We are sending handshake packet 2, so we don't expect to receive // handshake packet 2 from the initiator. ci.window.Update(f.l, 2) ci.peerCert = remoteCert ci.dKey = NewNebulaCipherState(dKey) ci.eKey = NewNebulaCipherState(eKey) hostinfo.remotes = f.lightHouse.QueryCache(vpnIp) hostinfo.SetRemote(addr) hostinfo.CreateRemoteCIDR(remoteCert) // Only overwrite existing record if we should win the handshake race overwrite := vpnIp > f.myVpnIp existing, err := f.handshakeManager.CheckAndComplete(hostinfo, 0, overwrite, f) if err != nil { switch err { case ErrAlreadySeen: // Update remote if preferred (Note we have to switch to locking // the existing hostinfo, and then switch back so the defer Unlock // higher in this function still works) hostinfo.Unlock() existing.Lock() // Update remote if preferred if existing.SetRemoteIfPreferred(f.hostMap, addr) { // Send a test packet to ensure the other side has also switched to // the preferred remote f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu)) } existing.Unlock() hostinfo.Lock() msg = existing.HandshakePacket[2] f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1) if addr != nil { err := f.outside.WriteTo(msg, addr) if err != nil { f.l.WithField("vpnIp", existing.vpnIp).WithField("udpAddr", addr). WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true). WithError(err).Error("Failed to send handshake message") } else { f.l.WithField("vpnIp", existing.vpnIp).WithField("udpAddr", addr). WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true). Info("Handshake message sent") } return } else { via2 := via.(*ViaSender) if via2 == nil { f.l.Error("Handshake send failed: both addr and via are nil.") return } hostinfo.relayState.InsertRelayTo(via2.relayHI.vpnIp) f.SendVia(via2.relayHI, via2.relay, msg, make([]byte, 12), make([]byte, mtu), false) f.l.WithField("vpnIp", existing.vpnIp).WithField("relay", via2.relayHI.vpnIp). WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true). Info("Handshake message sent") return } case ErrExistingHostInfo: // This means there was an existing tunnel and this handshake was older than the one we are currently based on f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr). WithField("certName", certName). WithField("oldHandshakeTime", existing.lastHandshakeTime). WithField("newHandshakeTime", hostinfo.lastHandshakeTime). WithField("fingerprint", fingerprint). WithField("issuer", issuer). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). Info("Handshake too old") // Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu)) return case ErrLocalIndexCollision: // This means we failed to insert because of collision on localIndexId. Just let the next handshake packet retry f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr). WithField("certName", certName). WithField("fingerprint", fingerprint). WithField("issuer", issuer). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). WithField("localIndex", hostinfo.localIndexId).WithField("collision", existing.vpnIp). Error("Failed to add HostInfo due to localIndex collision") return case ErrExistingHandshake: // We have a race where both parties think they are an initiator and this tunnel lost, let the other one finish f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr). WithField("certName", certName). WithField("fingerprint", fingerprint). WithField("issuer", issuer). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). Error("Prevented a pending handshake race") return default: // Shouldn't happen, but just in case someone adds a new error type to CheckAndComplete // And we forget to update it here f.l.WithError(err).WithField("vpnIp", vpnIp).WithField("udpAddr", addr). WithField("certName", certName). WithField("fingerprint", fingerprint). WithField("issuer", issuer). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). Error("Failed to add HostInfo to HostMap") return } } // Do the send f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1) if addr != nil { err = f.outside.WriteTo(msg, addr) if err != nil { f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr). WithField("certName", certName). WithField("fingerprint", fingerprint). WithField("issuer", issuer). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). WithError(err).Error("Failed to send handshake") } else { f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr). WithField("certName", certName). WithField("fingerprint", fingerprint). WithField("issuer", issuer). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). WithField("sentCachedPackets", len(hostinfo.packetStore)). Info("Handshake message sent") } } else { via2 := via.(*ViaSender) if via2 == nil { f.l.Error("Handshake send failed: both addr and via are nil.") return } hostinfo.relayState.InsertRelayTo(via2.relayHI.vpnIp) f.SendVia(via2.relayHI, via2.relay, msg, make([]byte, 12), make([]byte, mtu), false) f.l.WithField("vpnIp", vpnIp).WithField("relay", via2.relayHI.vpnIp). WithField("certName", certName). WithField("fingerprint", fingerprint). WithField("issuer", issuer). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). WithField("sentCachedPackets", len(hostinfo.packetStore)). Info("Handshake message sent") } hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics) return } func ixHandshakeStage2(f *Interface, addr *udp.Addr, via interface{}, hostinfo *HostInfo, packet []byte, h *header.H) bool { if hostinfo == nil { // Nothing here to tear down, got a bogus stage 2 packet return true } hostinfo.Lock() defer hostinfo.Unlock() if addr != nil { if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) { f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake") return false } } ci := hostinfo.ConnectionState if ci.ready { f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h). Info("Handshake is already complete") // Update remote if preferred if hostinfo.SetRemoteIfPreferred(f.hostMap, addr) { // Send a test packet to ensure the other side has also switched to // the preferred remote f.SendMessageToVpnIp(header.Test, header.TestRequest, hostinfo.vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu)) } // We already have a complete tunnel, there is nothing that can be done by processing further stage 1 packets return false } msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[header.Len:]) if err != nil { f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h). Error("Failed to call noise.ReadMessage") // We don't want to tear down the connection on a bad ReadMessage because it could be an attacker trying // to DOS us. Every other error condition after should to allow a possible good handshake to complete in the // near future return false } else if dKey == nil || eKey == nil { f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). Error("Noise did not arrive at a key") // This should be impossible in IX but just in case, if we get here then there is no chance to recover // the handshake state machine. Tear it down return true } hs := &NebulaHandshake{} err = hs.Unmarshal(msg) if err != nil || hs.Details == nil { f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).Error("Failed unmarshal handshake message") // The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again return true } remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool) if err != nil { f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). WithField("cert", remoteCert).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). Error("Invalid certificate from host") // The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again return true } vpnIp := iputil.Ip2VpnIp(remoteCert.Details.Ips[0].IP) certName := remoteCert.Details.Name fingerprint, _ := remoteCert.Sha256Sum() issuer := remoteCert.Details.Issuer // Ensure the right host responded if vpnIp != hostinfo.vpnIp { f.l.WithField("intendedVpnIp", hostinfo.vpnIp).WithField("haveVpnIp", vpnIp). WithField("udpAddr", addr).WithField("certName", certName). WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). Info("Incorrect host responded to handshake") // Release our old handshake from pending, it should not continue f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo) // Create a new hostinfo/handshake for the intended vpn ip //TODO: this adds it to the timer wheel in a way that aggressively retries newHostInfo := f.getOrHandshake(hostinfo.vpnIp) newHostInfo.Lock() // Block the current used address newHostInfo.remotes = hostinfo.remotes newHostInfo.remotes.BlockRemote(addr) // Get the correct remote list for the host we did handshake with hostinfo.remotes = f.lightHouse.QueryCache(vpnIp) f.l.WithField("blockedUdpAddrs", newHostInfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp). WithField("remotes", newHostInfo.remotes.CopyAddrs(f.hostMap.preferredRanges)). Info("Blocked addresses for handshakes") // Swap the packet store to benefit the original intended recipient hostinfo.ConnectionState.queueLock.Lock() newHostInfo.packetStore = hostinfo.packetStore hostinfo.packetStore = []*cachedPacket{} hostinfo.ConnectionState.queueLock.Unlock() // Finally, put the correct vpn ip in the host info, tell them to close the tunnel, and return true to tear down hostinfo.vpnIp = vpnIp f.sendCloseTunnel(hostinfo) newHostInfo.Unlock() return true } // Mark packet 2 as seen so it doesn't show up as missed ci.window.Update(f.l, 2) duration := time.Since(hostinfo.handshakeStart).Nanoseconds() f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr). WithField("certName", certName). WithField("fingerprint", fingerprint). WithField("issuer", issuer). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). WithField("durationNs", duration). WithField("sentCachedPackets", len(hostinfo.packetStore)). Info("Handshake message received") hostinfo.remoteIndexId = hs.Details.ResponderIndex hostinfo.lastHandshakeTime = hs.Details.Time // Store their cert and our symmetric keys ci.peerCert = remoteCert ci.dKey = NewNebulaCipherState(dKey) ci.eKey = NewNebulaCipherState(eKey) // Make sure the current udpAddr being used is set for responding if addr != nil { hostinfo.SetRemote(addr) } else { via2 := via.(*ViaSender) hostinfo.relayState.InsertRelayTo(via2.relayHI.vpnIp) } // Build up the radix for the firewall if we have subnets in the cert hostinfo.CreateRemoteCIDR(remoteCert) // Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp //TODO: Complete here does not do a race avoidance, it will just take the new tunnel. Is this ok? f.handshakeManager.Complete(hostinfo, f) hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics) f.metricHandshakes.Update(duration) return false } nebula-1.6.1+dfsg/handshake_manager.go000066400000000000000000000417201434072716400176670ustar00rootroot00000000000000package nebula import ( "bytes" "context" "crypto/rand" "encoding/binary" "errors" "net" "time" "github.com/rcrowley/go-metrics" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/udp" ) const ( DefaultHandshakeTryInterval = time.Millisecond * 100 DefaultHandshakeRetries = 10 DefaultHandshakeTriggerBuffer = 64 DefaultUseRelays = true ) var ( defaultHandshakeConfig = HandshakeConfig{ tryInterval: DefaultHandshakeTryInterval, retries: DefaultHandshakeRetries, triggerBuffer: DefaultHandshakeTriggerBuffer, useRelays: DefaultUseRelays, } ) type HandshakeConfig struct { tryInterval time.Duration retries int triggerBuffer int useRelays bool messageMetrics *MessageMetrics } type HandshakeManager struct { pendingHostMap *HostMap mainHostMap *HostMap lightHouse *LightHouse outside *udp.Conn config HandshakeConfig OutboundHandshakeTimer *SystemTimerWheel messageMetrics *MessageMetrics metricInitiated metrics.Counter metricTimedOut metrics.Counter l *logrus.Logger // can be used to trigger outbound handshake for the given vpnIp trigger chan iputil.VpnIp } func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges []*net.IPNet, mainHostMap *HostMap, lightHouse *LightHouse, outside *udp.Conn, config HandshakeConfig) *HandshakeManager { return &HandshakeManager{ pendingHostMap: NewHostMap(l, "pending", tunCidr, preferredRanges), mainHostMap: mainHostMap, lightHouse: lightHouse, outside: outside, config: config, trigger: make(chan iputil.VpnIp, config.triggerBuffer), OutboundHandshakeTimer: NewSystemTimerWheel(config.tryInterval, hsTimeout(config.retries, config.tryInterval)), messageMetrics: config.messageMetrics, metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil), metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil), l: l, } } func (c *HandshakeManager) Run(ctx context.Context, f udp.EncWriter) { clockSource := time.NewTicker(c.config.tryInterval) defer clockSource.Stop() for { select { case <-ctx.Done(): return case vpnIP := <-c.trigger: c.handleOutbound(vpnIP, f, true) case now := <-clockSource.C: c.NextOutboundHandshakeTimerTick(now, f) } } } func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f udp.EncWriter) { c.OutboundHandshakeTimer.advance(now) for { ep := c.OutboundHandshakeTimer.Purge() if ep == nil { break } vpnIp := ep.(iputil.VpnIp) c.handleOutbound(vpnIp, f, false) } } func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, lighthouseTriggered bool) { hostinfo, err := c.pendingHostMap.QueryVpnIp(vpnIp) if err != nil { return } hostinfo.Lock() defer hostinfo.Unlock() // We may have raced to completion but now that we have a lock we should ensure we have not yet completed. if hostinfo.HandshakeComplete { // Ensure we don't exist in the pending hostmap anymore since we have completed c.pendingHostMap.DeleteHostInfo(hostinfo) return } // Check if we have a handshake packet to transmit yet if !hostinfo.HandshakeReady { // There is currently a slight race in getOrHandshake due to ConnectionState not being part of the HostInfo directly // Our hostinfo here was added to the pending map and the wheel may have ticked to us before we created ConnectionState c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter)) return } // If we are out of time, clean up if hostinfo.HandshakeCounter >= c.config.retries { hostinfo.logger(c.l).WithField("udpAddrs", hostinfo.remotes.CopyAddrs(c.pendingHostMap.preferredRanges)). WithField("initiatorIndex", hostinfo.localIndexId). WithField("remoteIndex", hostinfo.remoteIndexId). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). WithField("durationNs", time.Since(hostinfo.handshakeStart).Nanoseconds()). Info("Handshake timed out") c.metricTimedOut.Inc(1) c.pendingHostMap.DeleteHostInfo(hostinfo) return } // We only care about a lighthouse trigger before the first handshake transmit attempt. This is a very specific // optimization for a fast lighthouse reply //TODO: it would feel better to do this once, anytime, as our delay increases over time if lighthouseTriggered && hostinfo.HandshakeCounter > 0 { // If we didn't return here a lighthouse could cause us to aggressively send handshakes return } // Get a remotes object if we don't already have one. // This is mainly to protect us as this should never be the case // NB ^ This comment doesn't jive. It's how the thing gets intiailized. // It's the common path. Should it update every time, in case a future LH query/queries give us more info? if hostinfo.remotes == nil { hostinfo.remotes = c.lightHouse.QueryCache(vpnIp) } //TODO: this will generate a load of queries for hosts with only 1 ip (i'm not using a lighthouse, static mapped) if hostinfo.remotes.Len(c.pendingHostMap.preferredRanges) <= 1 { // If we only have 1 remote it is highly likely our query raced with the other host registered within the lighthouse // Our vpnIp here has a tunnel with a lighthouse but has yet to send a host update packet there so we only know about // the learned public ip for them. Query again to short circuit the promotion counter c.lightHouse.QueryServer(vpnIp, f) } // Send a the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply var sentTo []*udp.Addr hostinfo.remotes.ForEach(c.pendingHostMap.preferredRanges, func(addr *udp.Addr, _ bool) { c.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1) err = c.outside.WriteTo(hostinfo.HandshakePacket[0], addr) if err != nil { hostinfo.logger(c.l).WithField("udpAddr", addr). WithField("initiatorIndex", hostinfo.localIndexId). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). WithError(err).Error("Failed to send handshake message") } else { sentTo = append(sentTo, addr) } }) // Don't be too noisy or confusing if we fail to send a handshake - if we don't get through we'll eventually log a timeout if len(sentTo) > 0 { hostinfo.logger(c.l).WithField("udpAddrs", sentTo). WithField("initiatorIndex", hostinfo.localIndexId). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). Info("Handshake message sent") } if c.config.useRelays && len(hostinfo.remotes.relays) > 0 { hostinfo.logger(c.l).WithField("relayIps", hostinfo.remotes.relays).Info("Attempt to relay through hosts") // Send a RelayRequest to all known Relay IP's for _, relay := range hostinfo.remotes.relays { // Don't relay to myself, and don't relay through the host I'm trying to connect to if *relay == vpnIp || *relay == c.lightHouse.myVpnIp { continue } relayHostInfo, err := c.mainHostMap.QueryVpnIp(*relay) if err != nil || relayHostInfo.remote == nil { hostinfo.logger(c.l).WithError(err).WithField("relay", relay.String()).Info("Establish tunnel to relay target.") f.Handshake(*relay) continue } // Check the relay HostInfo to see if we already established a relay through it if existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp); ok { switch existingRelay.State { case Established: hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Send handshake via relay") f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false) case Requested: hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request") // Re-send the CreateRelay request, in case the previous one was lost. m := NebulaControl{ Type: NebulaControl_CreateRelayRequest, InitiatorRelayIndex: existingRelay.LocalIndex, RelayFromIp: uint32(c.lightHouse.myVpnIp), RelayToIp: uint32(vpnIp), } msg, err := m.Marshal() if err != nil { hostinfo.logger(c.l). WithError(err). Error("Failed to marshal Control message to create relay") } else { f.SendMessageToVpnIp(header.Control, 0, *relay, msg, make([]byte, 12), make([]byte, mtu)) } default: hostinfo.logger(c.l). WithField("vpnIp", vpnIp). WithField("state", existingRelay.State). WithField("relayVpnIp", relayHostInfo.vpnIp). Errorf("Relay unexpected state") } } else { // No relays exist or requested yet. if relayHostInfo.remote != nil { idx, err := AddRelay(c.l, relayHostInfo, c.mainHostMap, vpnIp, nil, TerminalType, Requested) if err != nil { hostinfo.logger(c.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap") } m := NebulaControl{ Type: NebulaControl_CreateRelayRequest, InitiatorRelayIndex: idx, RelayFromIp: uint32(c.lightHouse.myVpnIp), RelayToIp: uint32(vpnIp), } msg, err := m.Marshal() if err != nil { hostinfo.logger(c.l). WithError(err). Error("Failed to marshal Control message to create relay") } else { f.SendMessageToVpnIp(header.Control, 0, *relay, msg, make([]byte, 12), make([]byte, mtu)) } } } } } // Increment the counter to increase our delay, linear backoff hostinfo.HandshakeCounter++ // If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add if !lighthouseTriggered { //TODO: feel like we dupe handshake real fast in a tight loop, why? c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter)) } } func (c *HandshakeManager) AddVpnIp(vpnIp iputil.VpnIp, init func(*HostInfo)) *HostInfo { hostinfo, created := c.pendingHostMap.AddVpnIp(vpnIp, init) if created { c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval) c.metricInitiated.Inc(1) } return hostinfo } var ( ErrExistingHostInfo = errors.New("existing hostinfo") ErrAlreadySeen = errors.New("already seen") ErrLocalIndexCollision = errors.New("local index collision") ErrExistingHandshake = errors.New("existing handshake") ) // CheckAndComplete checks for any conflicts in the main and pending hostmap // before adding hostinfo to main. If err is nil, it was added. Otherwise err will be: // // ErrAlreadySeen if we already have an entry in the hostmap that has seen the // exact same handshake packet // // ErrExistingHostInfo if we already have an entry in the hostmap for this // VpnIp and the new handshake was older than the one we currently have // // ErrLocalIndexCollision if we already have an entry in the main or pending // hostmap for the hostinfo.localIndexId. func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, overwrite bool, f *Interface) (*HostInfo, error) { c.pendingHostMap.Lock() defer c.pendingHostMap.Unlock() c.mainHostMap.Lock() defer c.mainHostMap.Unlock() // Check if we already have a tunnel with this vpn ip existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp] if found && existingHostInfo != nil { // Is it just a delayed handshake packet? if bytes.Equal(hostinfo.HandshakePacket[handshakePacket], existingHostInfo.HandshakePacket[handshakePacket]) { return existingHostInfo, ErrAlreadySeen } // Is this a newer handshake? if existingHostInfo.lastHandshakeTime >= hostinfo.lastHandshakeTime { return existingHostInfo, ErrExistingHostInfo } existingHostInfo.logger(c.l).Info("Taking new handshake") } existingIndex, found := c.mainHostMap.Indexes[hostinfo.localIndexId] if found { // We have a collision, but for a different hostinfo return existingIndex, ErrLocalIndexCollision } existingIndex, found = c.pendingHostMap.Indexes[hostinfo.localIndexId] if found && existingIndex != hostinfo { // We have a collision, but for a different hostinfo return existingIndex, ErrLocalIndexCollision } existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId] if found && existingRemoteIndex != nil && existingRemoteIndex.vpnIp != hostinfo.vpnIp { // We have a collision, but this can happen since we can't control // the remote ID. Just log about the situation as a note. hostinfo.logger(c.l). WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp). Info("New host shadows existing host remoteIndex") } // Check if we are also handshaking with this vpn ip pendingHostInfo, found := c.pendingHostMap.Hosts[hostinfo.vpnIp] if found && pendingHostInfo != nil { if !overwrite { // We won, let our pending handshake win return pendingHostInfo, ErrExistingHandshake } // We lost, take this handshake and move any cached packets over so they get sent pendingHostInfo.ConnectionState.queueLock.Lock() hostinfo.packetStore = append(hostinfo.packetStore, pendingHostInfo.packetStore...) c.pendingHostMap.unlockedDeleteHostInfo(pendingHostInfo) pendingHostInfo.ConnectionState.queueLock.Unlock() pendingHostInfo.logger(c.l).Info("Handshake race lost, replacing pending handshake with completed tunnel") } if existingHostInfo != nil { // We are going to overwrite this entry, so remove the old references delete(c.mainHostMap.Hosts, existingHostInfo.vpnIp) delete(c.mainHostMap.Indexes, existingHostInfo.localIndexId) delete(c.mainHostMap.RemoteIndexes, existingHostInfo.remoteIndexId) for _, relayIdx := range existingHostInfo.relayState.CopyRelayForIdxs() { delete(c.mainHostMap.Relays, relayIdx) } } c.mainHostMap.addHostInfo(hostinfo, f) return existingHostInfo, nil } // Complete is a simpler version of CheckAndComplete when we already know we // won't have a localIndexId collision because we already have an entry in the // pendingHostMap func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) { c.pendingHostMap.Lock() defer c.pendingHostMap.Unlock() c.mainHostMap.Lock() defer c.mainHostMap.Unlock() existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp] if found && existingHostInfo != nil { // We are going to overwrite this entry, so remove the old references delete(c.mainHostMap.Hosts, existingHostInfo.vpnIp) delete(c.mainHostMap.Indexes, existingHostInfo.localIndexId) delete(c.mainHostMap.RemoteIndexes, existingHostInfo.remoteIndexId) for _, relayIdx := range existingHostInfo.relayState.CopyRelayForIdxs() { delete(c.mainHostMap.Relays, relayIdx) } } existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId] if found && existingRemoteIndex != nil { // We have a collision, but this can happen since we can't control // the remote ID. Just log about the situation as a note. hostinfo.logger(c.l). WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp). Info("New host shadows existing host remoteIndex") } c.mainHostMap.addHostInfo(hostinfo, f) c.pendingHostMap.unlockedDeleteHostInfo(hostinfo) } // AddIndexHostInfo generates a unique localIndexId for this HostInfo // and adds it to the pendingHostMap. Will error if we are unable to generate // a unique localIndexId func (c *HandshakeManager) AddIndexHostInfo(h *HostInfo) error { c.pendingHostMap.Lock() defer c.pendingHostMap.Unlock() c.mainHostMap.RLock() defer c.mainHostMap.RUnlock() for i := 0; i < 32; i++ { index, err := generateIndex(c.l) if err != nil { return err } _, inPending := c.pendingHostMap.Indexes[index] _, inMain := c.mainHostMap.Indexes[index] if !inMain && !inPending { h.localIndexId = index c.pendingHostMap.Indexes[index] = h return nil } } return errors.New("failed to generate unique localIndexId") } func (c *HandshakeManager) addRemoteIndexHostInfo(index uint32, h *HostInfo) { c.pendingHostMap.addRemoteIndexHostInfo(index, h) } func (c *HandshakeManager) DeleteHostInfo(hostinfo *HostInfo) { //l.Debugln("Deleting pending hostinfo :", hostinfo) c.pendingHostMap.DeleteHostInfo(hostinfo) } func (c *HandshakeManager) QueryIndex(index uint32) (*HostInfo, error) { return c.pendingHostMap.QueryIndex(index) } func (c *HandshakeManager) EmitStats() { c.pendingHostMap.EmitStats("pending") c.mainHostMap.EmitStats("main") } // Utility functions below func generateIndex(l *logrus.Logger) (uint32, error) { b := make([]byte, 4) // Let zero mean we don't know the ID, so don't generate zero var index uint32 for index == 0 { _, err := rand.Read(b) if err != nil { l.Errorln(err) return 0, err } index = binary.BigEndian.Uint32(b) } if l.Level >= logrus.DebugLevel { l.WithField("index", index). Debug("Generated index") } return index, nil } func hsTimeout(tries int, interval time.Duration) time.Duration { return time.Duration(tries / 2 * ((2 * int(interval)) + (tries-1)*int(interval))) } nebula-1.6.1+dfsg/handshake_manager_test.go000066400000000000000000000106421434072716400207250ustar00rootroot00000000000000package nebula import ( "net" "testing" "time" "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/test" "github.com/slackhq/nebula/udp" "github.com/stretchr/testify/assert" ) func Test_NewHandshakeManagerVpnIp(t *testing.T) { l := test.NewLogger() _, tuncidr, _ := net.ParseCIDR("172.1.1.1/24") _, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") _, localrange, _ := net.ParseCIDR("10.1.1.1/24") ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2")) preferredRanges := []*net.IPNet{localrange} mw := &mockEncWriter{} mainHM := NewHostMap(l, "test", vpncidr, preferredRanges) lh := &LightHouse{ atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{}), addrMap: make(map[iputil.VpnIp]*RemoteList), } blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig) now := time.Now() blah.NextOutboundHandshakeTimerTick(now, mw) var initCalled bool initFunc := func(*HostInfo) { initCalled = true } i := blah.AddVpnIp(ip, initFunc) assert.True(t, initCalled) initCalled = false i2 := blah.AddVpnIp(ip, initFunc) assert.False(t, initCalled) assert.Same(t, i, i2) i.remotes = NewRemoteList() i.HandshakeReady = true // Adding something to pending should not affect the main hostmap assert.Len(t, mainHM.Hosts, 0) // Confirm they are in the pending index list assert.Contains(t, blah.pendingHostMap.Hosts, ip) // Jump ahead `HandshakeRetries` ticks, offset by one to get the sleep logic right for i := 1; i <= DefaultHandshakeRetries+1; i++ { now = now.Add(time.Duration(i) * DefaultHandshakeTryInterval) blah.NextOutboundHandshakeTimerTick(now, mw) } // Confirm they are still in the pending index list assert.Contains(t, blah.pendingHostMap.Hosts, ip) // Tick 1 more time, a minute will certainly flush it out blah.NextOutboundHandshakeTimerTick(now.Add(time.Minute), mw) // Confirm they have been removed assert.NotContains(t, blah.pendingHostMap.Hosts, ip) } func Test_NewHandshakeManagerTrigger(t *testing.T) { l := test.NewLogger() _, tuncidr, _ := net.ParseCIDR("172.1.1.1/24") _, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") _, localrange, _ := net.ParseCIDR("10.1.1.1/24") ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2")) preferredRanges := []*net.IPNet{localrange} mw := &mockEncWriter{} mainHM := NewHostMap(l, "test", vpncidr, preferredRanges) lh := &LightHouse{ addrMap: make(map[iputil.VpnIp]*RemoteList), l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{}), } blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig) now := time.Now() blah.NextOutboundHandshakeTimerTick(now, mw) assert.Equal(t, 0, testCountTimerWheelEntries(blah.OutboundHandshakeTimer)) hi := blah.AddVpnIp(ip, nil) hi.HandshakeReady = true assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer)) assert.Equal(t, 0, hi.HandshakeCounter, "Should not have attempted a handshake yet") // Trigger the same method the channel will but, this should set our remotes pointer blah.handleOutbound(ip, mw, true) assert.Equal(t, 1, hi.HandshakeCounter, "Trigger should have done a handshake attempt") assert.NotNil(t, hi.remotes, "Manager should have set my remotes pointer") // Make sure the trigger doesn't double schedule the timer entry assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer)) uaddr := udp.NewAddrFromString("10.1.1.1:4242") hi.remotes.unlockedPrependV4(ip, NewIp4AndPort(uaddr.IP, uint32(uaddr.Port))) // We now have remotes but only the first trigger should have pushed things forward blah.handleOutbound(ip, mw, true) assert.Equal(t, 1, hi.HandshakeCounter, "Trigger should have not done a handshake attempt") assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer)) } func testCountTimerWheelEntries(tw *SystemTimerWheel) (c int) { for _, i := range tw.wheel { n := i.Head for n != nil { c++ n = n.Next } } return c } type mockEncWriter struct { } func (mw *mockEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) { return } func (mw *mockEncWriter) SendVia(via interface{}, relay interface{}, ad, nb, out []byte, nocopy bool) { return } func (mw *mockEncWriter) Handshake(vpnIP iputil.VpnIp) {} nebula-1.6.1+dfsg/header/000077500000000000000000000000001434072716400151445ustar00rootroot00000000000000nebula-1.6.1+dfsg/header/header.go000066400000000000000000000117241434072716400167300ustar00rootroot00000000000000package header import ( "encoding/binary" "encoding/json" "errors" "fmt" ) //Version 1 header: // 0 31 // |-----------------------------------------------------------------------| // | Version (uint4) | Type (uint4) | Subtype (uint8) | Reserved (uint16) | 32 // |-----------------------------------------------------------------------| // | Remote index (uint32) | 64 // |-----------------------------------------------------------------------| // | Message counter | 96 // | (uint64) | 128 // |-----------------------------------------------------------------------| // | payload... | type m map[string]interface{} const ( Version uint8 = 1 Len = 16 ) type MessageType uint8 type MessageSubType uint8 const ( Handshake MessageType = 0 Message MessageType = 1 RecvError MessageType = 2 LightHouse MessageType = 3 Test MessageType = 4 CloseTunnel MessageType = 5 Control MessageType = 6 ) var typeMap = map[MessageType]string{ Handshake: "handshake", Message: "message", RecvError: "recvError", LightHouse: "lightHouse", Test: "test", CloseTunnel: "closeTunnel", Control: "control", } const ( MessageNone MessageSubType = 0 MessageRelay MessageSubType = 1 ) const ( TestRequest MessageSubType = 0 TestReply MessageSubType = 1 ) const ( HandshakeIXPSK0 MessageSubType = 0 HandshakeXXPSK0 MessageSubType = 1 ) var ErrHeaderTooShort = errors.New("header is too short") var subTypeTestMap = map[MessageSubType]string{ TestRequest: "testRequest", TestReply: "testReply", } var subTypeNoneMap = map[MessageSubType]string{0: "none"} var subTypeMap = map[MessageType]*map[MessageSubType]string{ Message: { MessageNone: "none", MessageRelay: "relay", }, RecvError: &subTypeNoneMap, LightHouse: &subTypeNoneMap, Test: &subTypeTestMap, CloseTunnel: &subTypeNoneMap, Handshake: { HandshakeIXPSK0: "ix_psk0", }, Control: &subTypeNoneMap, } type H struct { Version uint8 Type MessageType Subtype MessageSubType Reserved uint16 RemoteIndex uint32 MessageCounter uint64 } // Encode uses the provided byte array to encode the provided header values into. // Byte array must be capped higher than HeaderLen or this will panic func Encode(b []byte, v uint8, t MessageType, st MessageSubType, ri uint32, c uint64) []byte { b = b[:Len] b[0] = v<<4 | byte(t&0x0f) b[1] = byte(st) binary.BigEndian.PutUint16(b[2:4], 0) binary.BigEndian.PutUint32(b[4:8], ri) binary.BigEndian.PutUint64(b[8:16], c) return b } // String creates a readable string representation of a header func (h *H) String() string { if h == nil { return "" } return fmt.Sprintf("ver=%d type=%s subtype=%s reserved=%#x remoteindex=%v messagecounter=%v", h.Version, h.TypeName(), h.SubTypeName(), h.Reserved, h.RemoteIndex, h.MessageCounter) } // MarshalJSON creates a json string representation of a header func (h *H) MarshalJSON() ([]byte, error) { return json.Marshal(m{ "version": h.Version, "type": h.TypeName(), "subType": h.SubTypeName(), "reserved": h.Reserved, "remoteIndex": h.RemoteIndex, "messageCounter": h.MessageCounter, }) } // Encode turns header into bytes func (h *H) Encode(b []byte) ([]byte, error) { if h == nil { return nil, errors.New("nil header") } return Encode(b, h.Version, h.Type, h.Subtype, h.RemoteIndex, h.MessageCounter), nil } // Parse is a helper function to parses given bytes into new Header struct func (h *H) Parse(b []byte) error { if len(b) < Len { return ErrHeaderTooShort } // get upper 4 bytes h.Version = uint8((b[0] >> 4) & 0x0f) // get lower 4 bytes h.Type = MessageType(b[0] & 0x0f) h.Subtype = MessageSubType(b[1]) h.Reserved = binary.BigEndian.Uint16(b[2:4]) h.RemoteIndex = binary.BigEndian.Uint32(b[4:8]) h.MessageCounter = binary.BigEndian.Uint64(b[8:16]) return nil } // TypeName will transform the headers message type into a human string func (h *H) TypeName() string { return TypeName(h.Type) } // TypeName will transform a nebula message type into a human string func TypeName(t MessageType) string { if n, ok := typeMap[t]; ok { return n } return "unknown" } // SubTypeName will transform the headers message sub type into a human string func (h *H) SubTypeName() string { return SubTypeName(h.Type, h.Subtype) } // SubTypeName will transform a nebula message sub type into a human string func SubTypeName(t MessageType, s MessageSubType) string { if n, ok := subTypeMap[t]; ok { if x, ok := (*n)[s]; ok { return x } } return "unknown" } // NewHeader turns bytes into a header func NewHeader(b []byte) (*H, error) { h := new(H) if err := h.Parse(b); err != nil { return nil, err } return h, nil } nebula-1.6.1+dfsg/header/header_test.go000066400000000000000000000055641434072716400177740ustar00rootroot00000000000000package header import ( "reflect" "testing" "github.com/stretchr/testify/assert" ) type headerTest struct { expectedBytes []byte *H } // 0001 0010 00010010 var headerBigEndianTests = []headerTest{{ expectedBytes: []byte{0x54, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9}, // 1010 0000 H: &H{ // 1111 1+2+4+8 = 15 Version: 5, Type: 4, Subtype: 0, Reserved: 0, RemoteIndex: 10, MessageCounter: 9, }, }, } func TestEncode(t *testing.T) { for _, tt := range headerBigEndianTests { b, err := tt.Encode(make([]byte, Len)) if err != nil { t.Fatal(err) } assert.Equal(t, tt.expectedBytes, b) } } func TestParse(t *testing.T) { for _, tt := range headerBigEndianTests { b := tt.expectedBytes parsedHeader := &H{} parsedHeader.Parse(b) if !reflect.DeepEqual(tt.H, parsedHeader) { t.Fatalf("got %#v; want %#v", parsedHeader, tt.H) } } } func TestTypeName(t *testing.T) { assert.Equal(t, "test", TypeName(Test)) assert.Equal(t, "test", (&H{Type: Test}).TypeName()) assert.Equal(t, "unknown", TypeName(99)) assert.Equal(t, "unknown", (&H{Type: 99}).TypeName()) } func TestSubTypeName(t *testing.T) { assert.Equal(t, "testRequest", SubTypeName(Test, TestRequest)) assert.Equal(t, "testRequest", (&H{Type: Test, Subtype: TestRequest}).SubTypeName()) assert.Equal(t, "unknown", SubTypeName(99, TestRequest)) assert.Equal(t, "unknown", (&H{Type: 99, Subtype: TestRequest}).SubTypeName()) assert.Equal(t, "unknown", SubTypeName(Test, 99)) assert.Equal(t, "unknown", (&H{Type: Test, Subtype: 99}).SubTypeName()) assert.Equal(t, "none", SubTypeName(Message, 0)) assert.Equal(t, "none", (&H{Type: Message, Subtype: 0}).SubTypeName()) } func TestTypeMap(t *testing.T) { // Force people to document this stuff assert.Equal(t, map[MessageType]string{ Handshake: "handshake", Message: "message", RecvError: "recvError", LightHouse: "lightHouse", Test: "test", CloseTunnel: "closeTunnel", Control: "control", }, typeMap) assert.Equal(t, map[MessageType]*map[MessageSubType]string{ Message: { MessageNone: "none", MessageRelay: "relay", }, RecvError: &subTypeNoneMap, LightHouse: &subTypeNoneMap, Test: &subTypeTestMap, CloseTunnel: &subTypeNoneMap, Handshake: { HandshakeIXPSK0: "ix_psk0", }, Control: &subTypeNoneMap, }, subTypeMap) } func TestHeader_String(t *testing.T) { assert.Equal( t, "ver=100 type=test subtype=testRequest reserved=0x63 remoteindex=98 messagecounter=97", (&H{100, Test, TestRequest, 99, 98, 97}).String(), ) } func TestHeader_MarshalJSON(t *testing.T) { b, err := (&H{100, Test, TestRequest, 99, 98, 97}).MarshalJSON() assert.Nil(t, err) assert.Equal( t, "{\"messageCounter\":97,\"remoteIndex\":98,\"reserved\":99,\"subType\":\"testRequest\",\"type\":\"test\",\"version\":100}", string(b), ) } nebula-1.6.1+dfsg/hostmap.go000066400000000000000000000560701434072716400157260ustar00rootroot00000000000000package nebula import ( "context" "errors" "fmt" "net" "sync" "sync/atomic" "time" "github.com/rcrowley/go-metrics" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cidr" "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/udp" ) //const ProbeLen = 100 const PromoteEvery = 1000 const ReQueryEvery = 5000 const MaxRemotes = 10 // How long we should prevent roaming back to the previous IP. // This helps prevent flapping due to packets already in flight const RoamingSuppressSeconds = 2 const ( Requested = iota Established ) const ( Unknowntype = iota ForwardingType TerminalType ) type Relay struct { Type int State int LocalIndex uint32 RemoteIndex uint32 PeerIp iputil.VpnIp } type HostMap struct { sync.RWMutex //Because we concurrently read and write to our maps name string Indexes map[uint32]*HostInfo Relays map[uint32]*HostInfo // Maps a Relay IDX to a Relay HostInfo object RemoteIndexes map[uint32]*HostInfo Hosts map[iputil.VpnIp]*HostInfo preferredRanges []*net.IPNet vpnCIDR *net.IPNet metricsEnabled bool l *logrus.Logger } type RelayState struct { sync.RWMutex relays map[iputil.VpnIp]struct{} // Set of VpnIp's of Hosts to use as relays to access this peer relayForByIp map[iputil.VpnIp]*Relay // Maps VpnIps of peers for which this HostInfo is a relay to some Relay info relayForByIdx map[uint32]*Relay // Maps a local index to some Relay info } func (rs *RelayState) DeleteRelay(ip iputil.VpnIp) { rs.Lock() defer rs.Unlock() delete(rs.relays, ip) } func (rs *RelayState) GetRelayForByIp(ip iputil.VpnIp) (*Relay, bool) { rs.RLock() defer rs.RUnlock() r, ok := rs.relayForByIp[ip] return r, ok } func (rs *RelayState) InsertRelayTo(ip iputil.VpnIp) { rs.Lock() defer rs.Unlock() rs.relays[ip] = struct{}{} } func (rs *RelayState) CopyRelayIps() []iputil.VpnIp { rs.RLock() defer rs.RUnlock() ret := make([]iputil.VpnIp, 0, len(rs.relays)) for ip := range rs.relays { ret = append(ret, ip) } return ret } func (rs *RelayState) CopyRelayForIps() []iputil.VpnIp { rs.RLock() defer rs.RUnlock() currentRelays := make([]iputil.VpnIp, 0, len(rs.relayForByIp)) for relayIp := range rs.relayForByIp { currentRelays = append(currentRelays, relayIp) } return currentRelays } func (rs *RelayState) CopyRelayForIdxs() []uint32 { rs.RLock() defer rs.RUnlock() ret := make([]uint32, 0, len(rs.relayForByIdx)) for i := range rs.relayForByIdx { ret = append(ret, i) } return ret } func (rs *RelayState) RemoveRelay(localIdx uint32) (iputil.VpnIp, bool) { rs.Lock() defer rs.Unlock() relay, ok := rs.relayForByIdx[localIdx] if !ok { return iputil.VpnIp(0), false } delete(rs.relayForByIdx, localIdx) delete(rs.relayForByIp, relay.PeerIp) return relay.PeerIp, true } func (rs *RelayState) QueryRelayForByIp(vpnIp iputil.VpnIp) (*Relay, bool) { rs.RLock() defer rs.RUnlock() r, ok := rs.relayForByIp[vpnIp] return r, ok } func (rs *RelayState) QueryRelayForByIdx(idx uint32) (*Relay, bool) { rs.RLock() defer rs.RUnlock() r, ok := rs.relayForByIdx[idx] return r, ok } func (rs *RelayState) InsertRelay(ip iputil.VpnIp, idx uint32, r *Relay) { rs.Lock() defer rs.Unlock() rs.relayForByIp[ip] = r rs.relayForByIdx[idx] = r } type HostInfo struct { sync.RWMutex remote *udp.Addr remotes *RemoteList promoteCounter uint32 ConnectionState *ConnectionState handshakeStart time.Time //todo: this an entry in the handshake manager HandshakeReady bool //todo: being in the manager means you are ready HandshakeCounter int //todo: another handshake manager entry HandshakeComplete bool //todo: this should go away in favor of ConnectionState.ready HandshakePacket map[uint8][]byte //todo: this is other handshake manager entry packetStore []*cachedPacket //todo: this is other handshake manager entry remoteIndexId uint32 localIndexId uint32 vpnIp iputil.VpnIp recvError int remoteCidr *cidr.Tree4 relayState RelayState // lastRebindCount is the other side of Interface.rebindCount, if these values don't match then we need to ask LH // for a punch from the remote end of this tunnel. The goal being to prime their conntrack for our traffic just like // with a handshake lastRebindCount int8 // lastHandshakeTime records the time the remote side told us about at the stage when the handshake was completed locally // Stage 1 packet will contain it if I am a responder, stage 2 packet if I am an initiator // This is used to avoid an attack where a handshake packet is replayed after some time lastHandshakeTime uint64 lastRoam time.Time lastRoamRemote *udp.Addr } type ViaSender struct { relayHI *HostInfo // relayHI is the host info object of the relay remoteIdx uint32 // remoteIdx is the index included in the header of the received packet relay *Relay // relay contains the rest of the relay information, including the PeerIP of the host trying to communicate with us. } type cachedPacket struct { messageType header.MessageType messageSubType header.MessageSubType callback packetCallback packet []byte } type packetCallback func(t header.MessageType, st header.MessageSubType, h *HostInfo, p, nb, out []byte) type cachedPacketMetrics struct { sent metrics.Counter dropped metrics.Counter } func NewHostMap(l *logrus.Logger, name string, vpnCIDR *net.IPNet, preferredRanges []*net.IPNet) *HostMap { h := map[iputil.VpnIp]*HostInfo{} i := map[uint32]*HostInfo{} r := map[uint32]*HostInfo{} relays := map[uint32]*HostInfo{} m := HostMap{ name: name, Indexes: i, Relays: relays, RemoteIndexes: r, Hosts: h, preferredRanges: preferredRanges, vpnCIDR: vpnCIDR, l: l, } return &m } // UpdateStats takes a name and reports host and index counts to the stats collection system func (hm *HostMap) EmitStats(name string) { hm.RLock() hostLen := len(hm.Hosts) indexLen := len(hm.Indexes) remoteIndexLen := len(hm.RemoteIndexes) relaysLen := len(hm.Relays) hm.RUnlock() metrics.GetOrRegisterGauge("hostmap."+name+".hosts", nil).Update(int64(hostLen)) metrics.GetOrRegisterGauge("hostmap."+name+".indexes", nil).Update(int64(indexLen)) metrics.GetOrRegisterGauge("hostmap."+name+".remoteIndexes", nil).Update(int64(remoteIndexLen)) metrics.GetOrRegisterGauge("hostmap."+name+".relayIndexes", nil).Update(int64(relaysLen)) } func (hm *HostMap) RemoveRelay(localIdx uint32) { hm.Lock() hiRelay, ok := hm.Relays[localIdx] if !ok { hm.Unlock() return } delete(hm.Relays, localIdx) hm.Unlock() ip, ok := hiRelay.relayState.RemoveRelay(localIdx) if !ok { return } hiPeer, err := hm.QueryVpnIp(ip) if err != nil { return } var otherPeerIdx uint32 hiPeer.relayState.DeleteRelay(hiRelay.vpnIp) relay, ok := hiPeer.relayState.GetRelayForByIp(hiRelay.vpnIp) if ok { otherPeerIdx = relay.LocalIndex } // I am a relaying host. I need to remove the other relay, too. hm.RemoveRelay(otherPeerIdx) } func (hm *HostMap) GetIndexByVpnIp(vpnIp iputil.VpnIp) (uint32, error) { hm.RLock() if i, ok := hm.Hosts[vpnIp]; ok { index := i.localIndexId hm.RUnlock() return index, nil } hm.RUnlock() return 0, errors.New("vpn IP not found") } func (hm *HostMap) Add(ip iputil.VpnIp, hostinfo *HostInfo) { hm.Lock() hm.Hosts[ip] = hostinfo hm.Unlock() } func (hm *HostMap) AddVpnIp(vpnIp iputil.VpnIp, init func(hostinfo *HostInfo)) (hostinfo *HostInfo, created bool) { hm.RLock() if h, ok := hm.Hosts[vpnIp]; !ok { hm.RUnlock() h = &HostInfo{ promoteCounter: 0, vpnIp: vpnIp, HandshakePacket: make(map[uint8][]byte, 0), relayState: RelayState{ relays: map[iputil.VpnIp]struct{}{}, relayForByIp: map[iputil.VpnIp]*Relay{}, relayForByIdx: map[uint32]*Relay{}, }, } if init != nil { init(h) } hm.Lock() hm.Hosts[vpnIp] = h hm.Unlock() return h, true } else { hm.RUnlock() return h, false } } func (hm *HostMap) DeleteVpnIp(vpnIp iputil.VpnIp) { hm.Lock() delete(hm.Hosts, vpnIp) if len(hm.Hosts) == 0 { hm.Hosts = map[iputil.VpnIp]*HostInfo{} } hm.Unlock() if hm.l.Level >= logrus.DebugLevel { hm.l.WithField("hostMap", m{"mapName": hm.name, "vpnIp": vpnIp, "mapTotalSize": len(hm.Hosts)}). Debug("Hostmap vpnIp deleted") } } // Only used by pendingHostMap when the remote index is not initially known func (hm *HostMap) addRemoteIndexHostInfo(index uint32, h *HostInfo) { hm.Lock() h.remoteIndexId = index hm.RemoteIndexes[index] = h hm.Unlock() if hm.l.Level > logrus.DebugLevel { hm.l.WithField("hostMap", m{"mapName": hm.name, "indexNumber": index, "mapTotalSize": len(hm.Indexes), "hostinfo": m{"existing": true, "localIndexId": h.localIndexId, "hostId": h.vpnIp}}). Debug("Hostmap remoteIndex added") } } func (hm *HostMap) AddVpnIpHostInfo(vpnIp iputil.VpnIp, h *HostInfo) { hm.Lock() h.vpnIp = vpnIp hm.Hosts[vpnIp] = h hm.Indexes[h.localIndexId] = h hm.RemoteIndexes[h.remoteIndexId] = h hm.Unlock() if hm.l.Level > logrus.DebugLevel { hm.l.WithField("hostMap", m{"mapName": hm.name, "vpnIp": vpnIp, "mapTotalSize": len(hm.Hosts), "hostinfo": m{"existing": true, "localIndexId": h.localIndexId, "vpnIp": h.vpnIp}}). Debug("Hostmap vpnIp added") } } // This is only called in pendingHostmap, to cleanup an inbound handshake func (hm *HostMap) DeleteIndex(index uint32) { hm.Lock() hostinfo, ok := hm.Indexes[index] if ok { delete(hm.Indexes, index) delete(hm.RemoteIndexes, hostinfo.remoteIndexId) // Check if we have an entry under hostId that matches the same hostinfo // instance. Clean it up as well if we do. hostinfo2, ok := hm.Hosts[hostinfo.vpnIp] if ok && hostinfo2 == hostinfo { delete(hm.Hosts, hostinfo.vpnIp) } } hm.Unlock() if hm.l.Level >= logrus.DebugLevel { hm.l.WithField("hostMap", m{"mapName": hm.name, "indexNumber": index, "mapTotalSize": len(hm.Indexes)}). Debug("Hostmap index deleted") } } // This is used to cleanup on recv_error func (hm *HostMap) DeleteReverseIndex(index uint32) { hm.Lock() hostinfo, ok := hm.RemoteIndexes[index] if ok { delete(hm.Indexes, hostinfo.localIndexId) delete(hm.RemoteIndexes, index) // Check if we have an entry under hostId that matches the same hostinfo // instance. Clean it up as well if we do (they might not match in pendingHostmap) var hostinfo2 *HostInfo hostinfo2, ok = hm.Hosts[hostinfo.vpnIp] if ok && hostinfo2 == hostinfo { delete(hm.Hosts, hostinfo.vpnIp) } } hm.Unlock() if hm.l.Level >= logrus.DebugLevel { hm.l.WithField("hostMap", m{"mapName": hm.name, "indexNumber": index, "mapTotalSize": len(hm.Indexes)}). Debug("Hostmap remote index deleted") } } func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) { // Delete the host itself, ensuring it's not modified anymore hm.Lock() hm.unlockedDeleteHostInfo(hostinfo) hm.Unlock() // And tear down all the relays going through this host for _, localIdx := range hostinfo.relayState.CopyRelayForIdxs() { hm.RemoveRelay(localIdx) } // And tear down the relays this deleted hostInfo was using to be reached teardownRelayIdx := []uint32{} for _, relayIp := range hostinfo.relayState.CopyRelayIps() { relayHostInfo, err := hm.QueryVpnIp(relayIp) if err != nil { hm.l.WithError(err).WithField("relay", relayIp).Info("Missing relay host in hostmap") } else { if r, ok := relayHostInfo.relayState.QueryRelayForByIp(hostinfo.vpnIp); ok { teardownRelayIdx = append(teardownRelayIdx, r.LocalIndex) } } } for _, localIdx := range teardownRelayIdx { hm.RemoveRelay(localIdx) } } func (hm *HostMap) DeleteRelayIdx(localIdx uint32) { hm.Lock() defer hm.Unlock() delete(hm.RemoteIndexes, localIdx) } func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) { // Check if this same hostId is in the hostmap with a different instance. // This could happen if we have an entry in the pending hostmap with different // index values than the one in the main hostmap. hostinfo2, ok := hm.Hosts[hostinfo.vpnIp] if ok && hostinfo2 != hostinfo { delete(hm.Hosts, hostinfo2.vpnIp) delete(hm.Indexes, hostinfo2.localIndexId) delete(hm.RemoteIndexes, hostinfo2.remoteIndexId) } delete(hm.Hosts, hostinfo.vpnIp) if len(hm.Hosts) == 0 { hm.Hosts = map[iputil.VpnIp]*HostInfo{} } delete(hm.Indexes, hostinfo.localIndexId) if len(hm.Indexes) == 0 { hm.Indexes = map[uint32]*HostInfo{} } delete(hm.RemoteIndexes, hostinfo.remoteIndexId) if len(hm.RemoteIndexes) == 0 { hm.RemoteIndexes = map[uint32]*HostInfo{} } if hm.l.Level >= logrus.DebugLevel { hm.l.WithField("hostMap", m{"mapName": hm.name, "mapTotalSize": len(hm.Hosts), "vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}). Debug("Hostmap hostInfo deleted") } } func (hm *HostMap) QueryIndex(index uint32) (*HostInfo, error) { //TODO: we probably just want to return bool instead of error, or at least a static error hm.RLock() if h, ok := hm.Indexes[index]; ok { hm.RUnlock() return h, nil } else { hm.RUnlock() return nil, errors.New("unable to find index") } } func (hm *HostMap) QueryRelayIndex(index uint32) (*HostInfo, error) { //TODO: we probably just want to return bool instead of error, or at least a static error hm.RLock() if h, ok := hm.Relays[index]; ok { hm.RUnlock() return h, nil } else { hm.RUnlock() return nil, errors.New("unable to find index") } } func (hm *HostMap) QueryReverseIndex(index uint32) (*HostInfo, error) { hm.RLock() if h, ok := hm.RemoteIndexes[index]; ok { hm.RUnlock() return h, nil } else { hm.RUnlock() return nil, fmt.Errorf("unable to find reverse index or connectionstate nil in %s hostmap", hm.name) } } func (hm *HostMap) QueryVpnIp(vpnIp iputil.VpnIp) (*HostInfo, error) { return hm.queryVpnIp(vpnIp, nil) } // PromoteBestQueryVpnIp will attempt to lazily switch to the best remote every // `PromoteEvery` calls to this function for a given host. func (hm *HostMap) PromoteBestQueryVpnIp(vpnIp iputil.VpnIp, ifce *Interface) (*HostInfo, error) { return hm.queryVpnIp(vpnIp, ifce) } func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) (*HostInfo, error) { hm.RLock() if h, ok := hm.Hosts[vpnIp]; ok { hm.RUnlock() // Do not attempt promotion if you are a lighthouse if promoteIfce != nil && !promoteIfce.lightHouse.amLighthouse { h.TryPromoteBest(hm.preferredRanges, promoteIfce) } return h, nil } hm.RUnlock() return nil, errors.New("unable to find host") } // We already have the hm Lock when this is called, so make sure to not call // any other methods that might try to grab it again func (hm *HostMap) addHostInfo(hostinfo *HostInfo, f *Interface) { if f.serveDns { remoteCert := hostinfo.ConnectionState.peerCert dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String()) } hm.Hosts[hostinfo.vpnIp] = hostinfo hm.Indexes[hostinfo.localIndexId] = hostinfo hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo if hm.l.Level >= logrus.DebugLevel { hm.l.WithField("hostMap", m{"mapName": hm.name, "vpnIp": hostinfo.vpnIp, "mapTotalSize": len(hm.Hosts), "hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}). Debug("Hostmap vpnIp added") } } // punchList assembles a list of all non nil RemoteList pointer entries in this hostmap // The caller can then do the its work outside of the read lock func (hm *HostMap) punchList(rl []*RemoteList) []*RemoteList { hm.RLock() defer hm.RUnlock() for _, v := range hm.Hosts { if v.remotes != nil { rl = append(rl, v.remotes) } } return rl } // Punchy iterates through the result of punchList() to assemble all known addresses and sends a hole punch packet to them func (hm *HostMap) Punchy(ctx context.Context, conn *udp.Conn) { var metricsTxPunchy metrics.Counter if hm.metricsEnabled { metricsTxPunchy = metrics.GetOrRegisterCounter("messages.tx.punchy", nil) } else { metricsTxPunchy = metrics.NilCounter{} } var remotes []*RemoteList b := []byte{1} clockSource := time.NewTicker(time.Second * 10) defer clockSource.Stop() for { remotes = hm.punchList(remotes[:0]) for _, rl := range remotes { //TODO: CopyAddrs generates garbage but ForEach locks for the work here, figure out which way is better for _, addr := range rl.CopyAddrs(hm.preferredRanges) { metricsTxPunchy.Inc(1) conn.WriteTo(b, addr) } } select { case <-ctx.Done(): return case <-clockSource.C: continue } } } // TryPromoteBest handles re-querying lighthouses and probing for better paths // NOTE: It is an error to call this if you are a lighthouse since they should not roam clients! func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface) { c := atomic.AddUint32(&i.promoteCounter, 1) if c%PromoteEvery == 0 { // The lock here is currently protecting i.remote access i.RLock() remote := i.remote i.RUnlock() // return early if we are already on a preferred remote if remote != nil { rIP := remote.IP for _, l := range preferredRanges { if l.Contains(rIP) { return } } } i.remotes.ForEach(preferredRanges, func(addr *udp.Addr, preferred bool) { if remote != nil && (addr == nil || !preferred) { return } // Try to send a test packet to that host, this should // cause it to detect a roaming event and switch remotes ifce.sendTo(header.Test, header.TestRequest, i.ConnectionState, i, addr, []byte(""), make([]byte, 12, 12), make([]byte, mtu)) }) } // Re query our lighthouses for new remotes occasionally if c%ReQueryEvery == 0 && ifce.lightHouse != nil { ifce.lightHouse.QueryServer(i.vpnIp, ifce) } } func (i *HostInfo) cachePacket(l *logrus.Logger, t header.MessageType, st header.MessageSubType, packet []byte, f packetCallback, m *cachedPacketMetrics) { //TODO: return the error so we can log with more context if len(i.packetStore) < 100 { tempPacket := make([]byte, len(packet)) copy(tempPacket, packet) //l.WithField("trace", string(debug.Stack())).Error("Caching packet", tempPacket) i.packetStore = append(i.packetStore, &cachedPacket{t, st, f, tempPacket}) if l.Level >= logrus.DebugLevel { i.logger(l). WithField("length", len(i.packetStore)). WithField("stored", true). Debugf("Packet store") } } else if l.Level >= logrus.DebugLevel { m.dropped.Inc(1) i.logger(l). WithField("length", len(i.packetStore)). WithField("stored", false). Debugf("Packet store") } } // handshakeComplete will set the connection as ready to communicate, as well as flush any stored packets func (i *HostInfo) handshakeComplete(l *logrus.Logger, m *cachedPacketMetrics) { //TODO: I'm not certain the distinction between handshake complete and ConnectionState being ready matters because: //TODO: HandshakeComplete means send stored packets and ConnectionState.ready means we are ready to send //TODO: if the transition from HandhsakeComplete to ConnectionState.ready happens all within this function they are identical i.ConnectionState.queueLock.Lock() i.HandshakeComplete = true //TODO: this should be managed by the handshake state machine to set it based on how many handshake were seen. // Clamping it to 2 gets us out of the woods for now atomic.StoreUint64(&i.ConnectionState.atomicMessageCounter, 2) if l.Level >= logrus.DebugLevel { i.logger(l).Debugf("Sending %d stored packets", len(i.packetStore)) } if len(i.packetStore) > 0 { nb := make([]byte, 12, 12) out := make([]byte, mtu) for _, cp := range i.packetStore { cp.callback(cp.messageType, cp.messageSubType, i, cp.packet, nb, out) } m.sent.Inc(int64(len(i.packetStore))) } i.remotes.ResetBlockedRemotes() i.packetStore = make([]*cachedPacket, 0) i.ConnectionState.ready = true i.ConnectionState.queueLock.Unlock() i.ConnectionState.certState = nil } func (i *HostInfo) GetCert() *cert.NebulaCertificate { if i.ConnectionState != nil { return i.ConnectionState.peerCert } return nil } func (i *HostInfo) SetRemote(remote *udp.Addr) { // We copy here because we likely got this remote from a source that reuses the object if !i.remote.Equals(remote) { i.remote = remote.Copy() i.remotes.LearnRemote(i.vpnIp, remote.Copy()) } } // SetRemoteIfPreferred returns true if the remote was changed. The lastRoam // time on the HostInfo will also be updated. func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote *udp.Addr) bool { if newRemote == nil { // relays have nil udp Addrs return false } currentRemote := i.remote if currentRemote == nil { i.SetRemote(newRemote) return true } // NOTE: We do this loop here instead of calling `isPreferred` in // remote_list.go so that we only have to loop over preferredRanges once. newIsPreferred := false for _, l := range hm.preferredRanges { // return early if we are already on a preferred remote if l.Contains(currentRemote.IP) { return false } if l.Contains(newRemote.IP) { newIsPreferred = true } } if newIsPreferred { // Consider this a roaming event i.lastRoam = time.Now() i.lastRoamRemote = currentRemote.Copy() i.SetRemote(newRemote) return true } return false } func (i *HostInfo) RecvErrorExceeded() bool { if i.recvError < 3 { i.recvError += 1 return false } return true } func (i *HostInfo) CreateRemoteCIDR(c *cert.NebulaCertificate) { if len(c.Details.Ips) == 1 && len(c.Details.Subnets) == 0 { // Simple case, no CIDRTree needed return } remoteCidr := cidr.NewTree4() for _, ip := range c.Details.Ips { remoteCidr.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{}) } for _, n := range c.Details.Subnets { remoteCidr.AddCIDR(n, struct{}{}) } i.remoteCidr = remoteCidr } func (i *HostInfo) logger(l *logrus.Logger) *logrus.Entry { if i == nil { return logrus.NewEntry(l) } li := l.WithField("vpnIp", i.vpnIp) if connState := i.ConnectionState; connState != nil { if peerCert := connState.peerCert; peerCert != nil { li = li.WithField("certName", peerCert.Details.Name) } } return li } // Utility functions func localIps(l *logrus.Logger, allowList *LocalAllowList) *[]net.IP { //FIXME: This function is pretty garbage var ips []net.IP ifaces, _ := net.Interfaces() for _, i := range ifaces { allow := allowList.AllowName(i.Name) if l.Level >= logrus.TraceLevel { l.WithField("interfaceName", i.Name).WithField("allow", allow).Trace("localAllowList.AllowName") } if !allow { continue } addrs, _ := i.Addrs() for _, addr := range addrs { var ip net.IP switch v := addr.(type) { case *net.IPNet: //continue ip = v.IP case *net.IPAddr: ip = v.IP } //TODO: Filtering out link local for now, this is probably the most correct thing //TODO: Would be nice to filter out SLAAC MAC based ips as well if ip.IsLoopback() == false && !ip.IsLinkLocalUnicast() { allow := allowList.Allow(ip) if l.Level >= logrus.TraceLevel { l.WithField("localIp", ip).WithField("allow", allow).Trace("localAllowList.Allow") } if !allow { continue } ips = append(ips, ip) } } } return &ips } nebula-1.6.1+dfsg/hostmap_test.go000066400000000000000000000000171434072716400167530ustar00rootroot00000000000000package nebula nebula-1.6.1+dfsg/inside.go000066400000000000000000000274161434072716400155300ustar00rootroot00000000000000package nebula import ( "sync/atomic" "github.com/flynn/noise" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/firewall" "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/udp" ) func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet, nb, out []byte, q int, localCache firewall.ConntrackCache) { err := newPacket(packet, false, fwPacket) if err != nil { if f.l.Level >= logrus.DebugLevel { f.l.WithField("packet", packet).Debugf("Error while validating outbound packet: %s", err) } return } // Ignore local broadcast packets if f.dropLocalBroadcast && fwPacket.RemoteIP == f.localBroadcast { return } if fwPacket.RemoteIP == f.myVpnIp { // Immediately forward packets from self to self. // This should only happen on Darwin-based hosts, which routes packets from // the Nebula IP to the Nebula IP through the Nebula TUN device. if immediatelyForwardToSelf { _, err := f.readers[q].Write(packet) if err != nil { f.l.WithError(err).Error("Failed to forward to tun") } } // Otherwise, drop. On linux, we should never see these packets - Linux // routes packets from the nebula IP to the nebula IP through the loopback device. return } // Ignore broadcast packets if f.dropMulticast && isMulticast(fwPacket.RemoteIP) { return } hostinfo := f.getOrHandshake(fwPacket.RemoteIP) if hostinfo == nil { if f.l.Level >= logrus.DebugLevel { f.l.WithField("vpnIp", fwPacket.RemoteIP). WithField("fwPacket", fwPacket). Debugln("dropping outbound packet, vpnIp not in our CIDR or in unsafe routes") } return } ci := hostinfo.ConnectionState if ci.ready == false { // Because we might be sending stored packets, lock here to stop new things going to // the packet queue. ci.queueLock.Lock() if !ci.ready { hostinfo.cachePacket(f.l, header.Message, 0, packet, f.sendMessageNow, f.cachedPacketMetrics) ci.queueLock.Unlock() return } ci.queueLock.Unlock() } dropReason := f.firewall.Drop(packet, *fwPacket, false, hostinfo, f.caPool, localCache) if dropReason == nil { f.sendNoMetrics(header.Message, 0, ci, hostinfo, nil, packet, nb, out, q) } else if f.l.Level >= logrus.DebugLevel { hostinfo.logger(f.l). WithField("fwPacket", fwPacket). WithField("reason", dropReason). Debugln("dropping outbound packet") } } func (f *Interface) Handshake(vpnIp iputil.VpnIp) { f.getOrHandshake(vpnIp) } // getOrHandshake returns nil if the vpnIp is not routable func (f *Interface) getOrHandshake(vpnIp iputil.VpnIp) *HostInfo { if !ipMaskContains(f.lightHouse.myVpnIp, f.lightHouse.myVpnZeros, vpnIp) { vpnIp = f.inside.RouteFor(vpnIp) if vpnIp == 0 { return nil } } hostinfo, err := f.hostMap.PromoteBestQueryVpnIp(vpnIp, f) //if err != nil || hostinfo.ConnectionState == nil { if err != nil { hostinfo, err = f.handshakeManager.pendingHostMap.QueryVpnIp(vpnIp) if err != nil { hostinfo = f.handshakeManager.AddVpnIp(vpnIp, f.initHostInfo) } } ci := hostinfo.ConnectionState if ci != nil && ci.eKey != nil && ci.ready { return hostinfo } // Handshake is not ready, we need to grab the lock now before we start the handshake process hostinfo.Lock() defer hostinfo.Unlock() // Double check, now that we have the lock ci = hostinfo.ConnectionState if ci != nil && ci.eKey != nil && ci.ready { return hostinfo } // If we have already created the handshake packet, we don't want to call the function at all. if !hostinfo.HandshakeReady { ixHandshakeStage0(f, vpnIp, hostinfo) // FIXME: Maybe make XX selectable, but probably not since psk makes it nearly pointless for us. //xx_handshakeStage0(f, ip, hostinfo) // If this is a static host, we don't need to wait for the HostQueryReply // We can trigger the handshake right now if _, ok := f.lightHouse.GetStaticHostList()[vpnIp]; ok { select { case f.handshakeManager.trigger <- vpnIp: default: } } } return hostinfo } // initHostInfo is the init function to pass to (*HandshakeManager).AddVpnIP that // will create the initial Noise ConnectionState func (f *Interface) initHostInfo(hostinfo *HostInfo) { hostinfo.ConnectionState = f.newConnectionState(f.l, true, noise.HandshakeIX, []byte{}, 0) } func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubType, hostInfo *HostInfo, p, nb, out []byte) { fp := &firewall.Packet{} err := newPacket(p, false, fp) if err != nil { f.l.Warnf("error while parsing outgoing packet for firewall check; %v", err) return } // check if packet is in outbound fw rules dropReason := f.firewall.Drop(p, *fp, false, hostInfo, f.caPool, nil) if dropReason != nil { if f.l.Level >= logrus.DebugLevel { f.l.WithField("fwPacket", fp). WithField("reason", dropReason). Debugln("dropping cached packet") } return } f.sendNoMetrics(header.Message, st, hostInfo.ConnectionState, hostInfo, nil, p, nb, out, 0) } // SendMessageToVpnIp handles real ip:port lookup and sends to the current best known address for vpnIp func (f *Interface) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) { hostInfo := f.getOrHandshake(vpnIp) if hostInfo == nil { if f.l.Level >= logrus.DebugLevel { f.l.WithField("vpnIp", vpnIp). Debugln("dropping SendMessageToVpnIp, vpnIp not in our CIDR or in unsafe routes") } return } if !hostInfo.ConnectionState.ready { // Because we might be sending stored packets, lock here to stop new things going to // the packet queue. hostInfo.ConnectionState.queueLock.Lock() if !hostInfo.ConnectionState.ready { hostInfo.cachePacket(f.l, t, st, p, f.sendMessageToVpnIp, f.cachedPacketMetrics) hostInfo.ConnectionState.queueLock.Unlock() return } hostInfo.ConnectionState.queueLock.Unlock() } f.sendMessageToVpnIp(t, st, hostInfo, p, nb, out) return } func (f *Interface) sendMessageToVpnIp(t header.MessageType, st header.MessageSubType, hostInfo *HostInfo, p, nb, out []byte) { f.send(t, st, hostInfo.ConnectionState, hostInfo, p, nb, out) } func (f *Interface) send(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, p, nb, out []byte) { f.messageMetrics.Tx(t, st, 1) f.sendNoMetrics(t, st, ci, hostinfo, nil, p, nb, out, 0) } func (f *Interface) sendTo(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote *udp.Addr, p, nb, out []byte) { f.messageMetrics.Tx(t, st, 1) f.sendNoMetrics(t, st, ci, hostinfo, remote, p, nb, out, 0) } // sendVia sends a payload through a Relay tunnel. No authentication or encryption is done // to the payload for the ultimate target host, making this a useful method for sending // handshake messages to peers through relay tunnels. // via is the HostInfo through which the message is relayed. // ad is the plaintext data to authenticate, but not encrypt // nb is a buffer used to store the nonce value, re-used for performance reasons. // out is a buffer used to store the result of the Encrypt operation // q indicates which writer to use to send the packet. func (f *Interface) SendVia(viaIfc interface{}, relayIfc interface{}, ad, nb, out []byte, nocopy bool, ) { via := viaIfc.(*HostInfo) relay := relayIfc.(*Relay) c := atomic.AddUint64(&via.ConnectionState.atomicMessageCounter, 1) out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c) f.connectionManager.Out(via.vpnIp) // Authenticate the header and payload, but do not encrypt for this message type. // The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload. if len(out)+len(ad)+via.ConnectionState.eKey.Overhead() > cap(out) { via.logger(f.l). WithField("outCap", cap(out)). WithField("payloadLen", len(ad)). WithField("headerLen", len(out)). WithField("cipherOverhead", via.ConnectionState.eKey.Overhead()). Error("SendVia out buffer not large enough for relay") return } // The header bytes are written to the 'out' slice; Grow the slice to hold the header and associated data payload. offset := len(out) out = out[:offset+len(ad)] // In one call path, the associated data _is_ already stored in out. In other call paths, the associated data must // be copied into 'out'. if !nocopy { copy(out[offset:], ad) } var err error out, err = via.ConnectionState.eKey.EncryptDanger(out, out, nil, c, nb) if err != nil { via.logger(f.l).WithError(err).Info("Failed to EncryptDanger in sendVia") return } err = f.writers[0].WriteTo(out, via.remote) if err != nil { via.logger(f.l).WithError(err).Info("Failed to WriteTo in sendVia") } } func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote *udp.Addr, p, nb, out []byte, q int) { if ci.eKey == nil { //TODO: log warning return } useRelay := remote == nil && hostinfo.remote == nil fullOut := out if useRelay { if len(out) < header.Len { // out always has a capacity of mtu, but not always a length greater than the header.Len. // Grow it to make sure the next operation works. out = out[:header.Len] } // Save a header's worth of data at the front of the 'out' buffer. out = out[header.Len:] } //TODO: enable if we do more than 1 tun queue //ci.writeLock.Lock() c := atomic.AddUint64(&ci.atomicMessageCounter, 1) //l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p) out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c) f.connectionManager.Out(hostinfo.vpnIp) // Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against // all our IPs and enable a faster roaming. if t != header.CloseTunnel && hostinfo.lastRebindCount != f.rebindCount { //NOTE: there is an update hole if a tunnel isn't used and exactly 256 rebinds occur before the tunnel is // finally used again. This tunnel would eventually be torn down and recreated if this action didn't help. f.lightHouse.QueryServer(hostinfo.vpnIp, f) hostinfo.lastRebindCount = f.rebindCount if f.l.Level >= logrus.DebugLevel { f.l.WithField("vpnIp", hostinfo.vpnIp).Debug("Lighthouse update triggered for punch due to rebind counter") } } var err error out, err = ci.eKey.EncryptDanger(out, out, p, c, nb) //TODO: see above note on lock //ci.writeLock.Unlock() if err != nil { hostinfo.logger(f.l).WithError(err). WithField("udpAddr", remote).WithField("counter", c). WithField("attemptedCounter", c). Error("Failed to encrypt outgoing packet") return } if remote != nil { err = f.writers[q].WriteTo(out, remote) if err != nil { hostinfo.logger(f.l).WithError(err). WithField("udpAddr", remote).Error("Failed to write outgoing packet") } } else if hostinfo.remote != nil { err = f.writers[q].WriteTo(out, hostinfo.remote) if err != nil { hostinfo.logger(f.l).WithError(err). WithField("udpAddr", remote).Error("Failed to write outgoing packet") } } else { // Try to send via a relay for _, relayIP := range hostinfo.relayState.CopyRelayIps() { relayHostInfo, err := f.hostMap.QueryVpnIp(relayIP) if err != nil { hostinfo.logger(f.l).WithField("relayIp", relayIP).WithError(err).Info("sendNoMetrics failed to find HostInfo") continue } relay, ok := relayHostInfo.relayState.QueryRelayForByIp(hostinfo.vpnIp) if !ok { hostinfo.logger(f.l). WithField("relayIp", relayHostInfo.vpnIp). WithField("relayTarget", hostinfo.vpnIp). Info("sendNoMetrics relay missing object for target") continue } f.SendVia(relayHostInfo, relay, out, nb, fullOut[:header.Len+len(out)], true) break } } return } func isMulticast(ip iputil.VpnIp) bool { // Class D multicast if (((ip >> 24) & 0xff) & 0xf0) == 0xe0 { return true } return false } nebula-1.6.1+dfsg/inside_darwin.go000066400000000000000000000000731434072716400170620ustar00rootroot00000000000000package nebula const immediatelyForwardToSelf bool = true nebula-1.6.1+dfsg/inside_generic.go000066400000000000000000000001421434072716400172070ustar00rootroot00000000000000//go:build !darwin // +build !darwin package nebula const immediatelyForwardToSelf bool = false nebula-1.6.1+dfsg/interface.go000066400000000000000000000250731434072716400162120ustar00rootroot00000000000000package nebula import ( "context" "errors" "fmt" "io" "net" "os" "runtime" "sync/atomic" "time" "github.com/rcrowley/go-metrics" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/firewall" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/overlay" "github.com/slackhq/nebula/udp" ) const mtu = 9001 type InterfaceConfig struct { HostMap *HostMap Outside *udp.Conn Inside overlay.Device certState *CertState Cipher string Firewall *Firewall ServeDns bool HandshakeManager *HandshakeManager lightHouse *LightHouse checkInterval int pendingDeletionInterval int DropLocalBroadcast bool DropMulticast bool routines int MessageMetrics *MessageMetrics version string caPool *cert.NebulaCAPool disconnectInvalid bool relayManager *relayManager ConntrackCacheTimeout time.Duration l *logrus.Logger } type Interface struct { hostMap *HostMap outside *udp.Conn inside overlay.Device certState *CertState cipher string firewall *Firewall connectionManager *connectionManager handshakeManager *HandshakeManager serveDns bool createTime time.Time lightHouse *LightHouse localBroadcast iputil.VpnIp myVpnIp iputil.VpnIp dropLocalBroadcast bool dropMulticast bool routines int caPool *cert.NebulaCAPool disconnectInvalid bool closed int32 relayManager *relayManager sendRecvErrorConfig sendRecvErrorConfig // rebindCount is used to decide if an active tunnel should trigger a punch notification through a lighthouse rebindCount int8 version string conntrackCacheTimeout time.Duration writers []*udp.Conn readers []io.ReadWriteCloser metricHandshakes metrics.Histogram messageMetrics *MessageMetrics cachedPacketMetrics *cachedPacketMetrics l *logrus.Logger } type sendRecvErrorConfig uint8 const ( sendRecvErrorAlways sendRecvErrorConfig = iota sendRecvErrorNever sendRecvErrorPrivate ) func (s sendRecvErrorConfig) ShouldSendRecvError(ip net.IP) bool { switch s { case sendRecvErrorPrivate: return ip.IsPrivate() case sendRecvErrorAlways: return true case sendRecvErrorNever: return false default: panic(fmt.Errorf("invalid sendRecvErrorConfig value: %d", s)) } } func (s sendRecvErrorConfig) String() string { switch s { case sendRecvErrorAlways: return "always" case sendRecvErrorNever: return "never" case sendRecvErrorPrivate: return "private" default: return fmt.Sprintf("invalid(%d)", s) } } func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) { if c.Outside == nil { return nil, errors.New("no outside connection") } if c.Inside == nil { return nil, errors.New("no inside interface (tun)") } if c.certState == nil { return nil, errors.New("no certificate state") } if c.Firewall == nil { return nil, errors.New("no firewall rules") } myVpnIp := iputil.Ip2VpnIp(c.certState.certificate.Details.Ips[0].IP) ifce := &Interface{ hostMap: c.HostMap, outside: c.Outside, inside: c.Inside, certState: c.certState, cipher: c.Cipher, firewall: c.Firewall, serveDns: c.ServeDns, handshakeManager: c.HandshakeManager, createTime: time.Now(), lightHouse: c.lightHouse, localBroadcast: myVpnIp | ^iputil.Ip2VpnIp(c.certState.certificate.Details.Ips[0].Mask), dropLocalBroadcast: c.DropLocalBroadcast, dropMulticast: c.DropMulticast, routines: c.routines, version: c.version, writers: make([]*udp.Conn, c.routines), readers: make([]io.ReadWriteCloser, c.routines), caPool: c.caPool, disconnectInvalid: c.disconnectInvalid, myVpnIp: myVpnIp, relayManager: c.relayManager, conntrackCacheTimeout: c.ConntrackCacheTimeout, metricHandshakes: metrics.GetOrRegisterHistogram("handshakes", nil, metrics.NewExpDecaySample(1028, 0.015)), messageMetrics: c.MessageMetrics, cachedPacketMetrics: &cachedPacketMetrics{ sent: metrics.GetOrRegisterCounter("hostinfo.cached_packets.sent", nil), dropped: metrics.GetOrRegisterCounter("hostinfo.cached_packets.dropped", nil), }, l: c.l, } ifce.connectionManager = newConnectionManager(ctx, c.l, ifce, c.checkInterval, c.pendingDeletionInterval) return ifce, nil } // activate creates the interface on the host. After the interface is created, any // other services that want to bind listeners to its IP may do so successfully. However, // the interface isn't going to process anything until run() is called. func (f *Interface) activate() { // actually turn on tun dev addr, err := f.outside.LocalAddr() if err != nil { f.l.WithError(err).Error("Failed to get udp listen address") } f.l.WithField("interface", f.inside.Name()).WithField("network", f.inside.Cidr().String()). WithField("build", f.version).WithField("udpAddr", addr). Info("Nebula interface is active") metrics.GetOrRegisterGauge("routines", nil).Update(int64(f.routines)) // Prepare n tun queues var reader io.ReadWriteCloser = f.inside for i := 0; i < f.routines; i++ { if i > 0 { reader, err = f.inside.NewMultiQueueReader() if err != nil { f.l.Fatal(err) } } f.readers[i] = reader } if err := f.inside.Activate(); err != nil { f.inside.Close() f.l.Fatal(err) } } func (f *Interface) run() { // Launch n queues to read packets from udp for i := 0; i < f.routines; i++ { go f.listenOut(i) } // Launch n queues to read packets from tun dev for i := 0; i < f.routines; i++ { go f.listenIn(f.readers[i], i) } } func (f *Interface) listenOut(i int) { runtime.LockOSThread() var li *udp.Conn // TODO clean this up with a coherent interface for each outside connection if i > 0 { li = f.writers[i] } else { li = f.outside } lhh := f.lightHouse.NewRequestHandler() conntrackCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout) li.ListenOut(f.readOutsidePackets, lhh.HandleRequest, conntrackCache, i) } func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) { runtime.LockOSThread() packet := make([]byte, mtu) out := make([]byte, mtu) fwPacket := &firewall.Packet{} nb := make([]byte, 12, 12) conntrackCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout) for { n, err := reader.Read(packet) if err != nil { if errors.Is(err, os.ErrClosed) && atomic.LoadInt32(&f.closed) != 0 { return } f.l.WithError(err).Error("Error while reading outbound packet") // This only seems to happen when something fatal happens to the fd, so exit. os.Exit(2) } f.consumeInsidePacket(packet[:n], fwPacket, nb, out, i, conntrackCache.Get(f.l)) } } func (f *Interface) RegisterConfigChangeCallbacks(c *config.C) { c.RegisterReloadCallback(f.reloadCA) c.RegisterReloadCallback(f.reloadCertKey) c.RegisterReloadCallback(f.reloadFirewall) c.RegisterReloadCallback(f.reloadSendRecvError) for _, udpConn := range f.writers { c.RegisterReloadCallback(udpConn.ReloadConfig) } } func (f *Interface) reloadCA(c *config.C) { // reload and check regardless // todo: need mutex? newCAs, err := loadCAFromConfig(f.l, c) if err != nil { f.l.WithError(err).Error("Could not refresh trusted CA certificates") return } f.caPool = newCAs f.l.WithField("fingerprints", f.caPool.GetFingerprints()).Info("Trusted CA certificates refreshed") } func (f *Interface) reloadCertKey(c *config.C) { // reload and check in all cases cs, err := NewCertStateFromConfig(c) if err != nil { f.l.WithError(err).Error("Could not refresh client cert") return } // did IP in cert change? if so, don't set oldIPs := f.certState.certificate.Details.Ips newIPs := cs.certificate.Details.Ips if len(oldIPs) > 0 && len(newIPs) > 0 && oldIPs[0].String() != newIPs[0].String() { f.l.WithField("new_ip", newIPs[0]).WithField("old_ip", oldIPs[0]).Error("IP in new cert was different from old") return } f.certState = cs f.l.WithField("cert", cs.certificate).Info("Client cert refreshed from disk") } func (f *Interface) reloadFirewall(c *config.C) { //TODO: need to trigger/detect if the certificate changed too if c.HasChanged("firewall") == false { f.l.Debug("No firewall config change detected") return } fw, err := NewFirewallFromConfig(f.l, f.certState.certificate, c) if err != nil { f.l.WithError(err).Error("Error while creating firewall during reload") return } oldFw := f.firewall conntrack := oldFw.Conntrack conntrack.Lock() defer conntrack.Unlock() fw.rulesVersion = oldFw.rulesVersion + 1 // If rulesVersion is back to zero, we have wrapped all the way around. Be // safe and just reset conntrack in this case. if fw.rulesVersion == 0 { f.l.WithField("firewallHash", fw.GetRuleHash()). WithField("oldFirewallHash", oldFw.GetRuleHash()). WithField("rulesVersion", fw.rulesVersion). Warn("firewall rulesVersion has overflowed, resetting conntrack") } else { fw.Conntrack = conntrack } f.firewall = fw oldFw.Destroy() f.l.WithField("firewallHash", fw.GetRuleHash()). WithField("oldFirewallHash", oldFw.GetRuleHash()). WithField("rulesVersion", fw.rulesVersion). Info("New firewall has been installed") } func (f *Interface) reloadSendRecvError(c *config.C) { if c.InitialLoad() || c.HasChanged("listen.send_recv_error") { stringValue := c.GetString("listen.send_recv_error", "always") switch stringValue { case "always": f.sendRecvErrorConfig = sendRecvErrorAlways case "never": f.sendRecvErrorConfig = sendRecvErrorNever case "private": f.sendRecvErrorConfig = sendRecvErrorPrivate default: if c.GetBool("listen.send_recv_error", true) { f.sendRecvErrorConfig = sendRecvErrorAlways } else { f.sendRecvErrorConfig = sendRecvErrorNever } } f.l.WithField("sendRecvError", f.sendRecvErrorConfig.String()). Info("Loaded send_recv_error config") } } func (f *Interface) emitStats(ctx context.Context, i time.Duration) { ticker := time.NewTicker(i) defer ticker.Stop() udpStats := udp.NewUDPStatsEmitter(f.writers) for { select { case <-ctx.Done(): return case <-ticker.C: f.firewall.EmitStats() f.handshakeManager.EmitStats() udpStats() } } } func (f *Interface) Close() error { atomic.StoreInt32(&f.closed, 1) // Release the tun device return f.inside.Close() } nebula-1.6.1+dfsg/iputil/000077500000000000000000000000001434072716400152225ustar00rootroot00000000000000nebula-1.6.1+dfsg/iputil/util.go000066400000000000000000000035551434072716400165360ustar00rootroot00000000000000package iputil import ( "encoding/binary" "fmt" "net" "net/netip" ) type VpnIp uint32 const maxIPv4StringLen = len("255.255.255.255") func (ip VpnIp) String() string { b := make([]byte, maxIPv4StringLen) n := ubtoa(b, 0, byte(ip>>24)) b[n] = '.' n++ n += ubtoa(b, n, byte(ip>>16&255)) b[n] = '.' n++ n += ubtoa(b, n, byte(ip>>8&255)) b[n] = '.' n++ n += ubtoa(b, n, byte(ip&255)) return string(b[:n]) } func (ip VpnIp) MarshalJSON() ([]byte, error) { return []byte(fmt.Sprintf("\"%s\"", ip.String())), nil } func (ip VpnIp) ToIP() net.IP { nip := make(net.IP, 4) binary.BigEndian.PutUint32(nip, uint32(ip)) return nip } func (ip VpnIp) ToNetIpAddr() netip.Addr { var nip [4]byte binary.BigEndian.PutUint32(nip[:], uint32(ip)) return netip.AddrFrom4(nip) } func Ip2VpnIp(ip []byte) VpnIp { if len(ip) == 16 { return VpnIp(binary.BigEndian.Uint32(ip[12:16])) } return VpnIp(binary.BigEndian.Uint32(ip)) } func ToNetIpAddr(ip net.IP) (netip.Addr, error) { addr, ok := netip.AddrFromSlice(ip) if !ok { return netip.Addr{}, fmt.Errorf("invalid net.IP: %v", ip) } return addr, nil } func ToNetIpPrefix(ipNet net.IPNet) (netip.Prefix, error) { addr, err := ToNetIpAddr(ipNet.IP) if err != nil { return netip.Prefix{}, err } ones, bits := ipNet.Mask.Size() if ones == 0 && bits == 0 { return netip.Prefix{}, fmt.Errorf("invalid net.IP: %v", ipNet) } return netip.PrefixFrom(addr, ones), nil } // ubtoa encodes the string form of the integer v to dst[start:] and // returns the number of bytes written to dst. The caller must ensure // that dst has sufficient length. func ubtoa(dst []byte, start int, v byte) int { if v < 10 { dst[start] = v + '0' return 1 } else if v < 100 { dst[start+1] = v%10 + '0' dst[start] = v/10 + '0' return 2 } dst[start+2] = v%10 + '0' dst[start+1] = (v/10)%10 + '0' dst[start] = v/100 + '0' return 3 } nebula-1.6.1+dfsg/iputil/util_test.go000066400000000000000000000011171434072716400175650ustar00rootroot00000000000000package iputil import ( "net" "testing" "github.com/stretchr/testify/assert" ) func TestVpnIp_String(t *testing.T) { assert.Equal(t, "255.255.255.255", Ip2VpnIp(net.ParseIP("255.255.255.255")).String()) assert.Equal(t, "1.255.255.255", Ip2VpnIp(net.ParseIP("1.255.255.255")).String()) assert.Equal(t, "1.1.255.255", Ip2VpnIp(net.ParseIP("1.1.255.255")).String()) assert.Equal(t, "1.1.1.255", Ip2VpnIp(net.ParseIP("1.1.1.255")).String()) assert.Equal(t, "1.1.1.1", Ip2VpnIp(net.ParseIP("1.1.1.1")).String()) assert.Equal(t, "0.0.0.0", Ip2VpnIp(net.ParseIP("0.0.0.0")).String()) } nebula-1.6.1+dfsg/lighthouse.go000066400000000000000000000656021434072716400164270ustar00rootroot00000000000000package nebula import ( "context" "encoding/binary" "errors" "fmt" "net" "sync" "sync/atomic" "time" "unsafe" "github.com/rcrowley/go-metrics" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/udp" "github.com/slackhq/nebula/util" ) //TODO: if a lighthouse doesn't have an answer, clients AGGRESSIVELY REQUERY.. why? handshake manager and/or getOrHandshake? //TODO: nodes are roaming lighthouses, this is bad. How are they learning? var ErrHostNotKnown = errors.New("host not known") type netIpAndPort struct { ip net.IP port uint16 } type LightHouse struct { //TODO: We need a timer wheel to kick out vpnIps that haven't reported in a long time sync.RWMutex //Because we concurrently read and write to our maps amLighthouse bool myVpnIp iputil.VpnIp myVpnZeros iputil.VpnIp myVpnNet *net.IPNet punchConn *udp.Conn punchy *Punchy // Local cache of answers from light houses // map of vpn Ip to answers addrMap map[iputil.VpnIp]*RemoteList // filters remote addresses allowed for each host // - When we are a lighthouse, this filters what addresses we store and // respond with. // - When we are not a lighthouse, this filters which addresses we accept // from lighthouses. atomicRemoteAllowList *RemoteAllowList // filters local addresses that we advertise to lighthouses atomicLocalAllowList *LocalAllowList // used to trigger the HandshakeManager when we receive HostQueryReply handshakeTrigger chan<- iputil.VpnIp // atomicStaticList exists to avoid having a bool in each addrMap entry // since static should be rare atomicStaticList map[iputil.VpnIp]struct{} atomicLighthouses map[iputil.VpnIp]struct{} atomicInterval int64 updateCancel context.CancelFunc updateParentCtx context.Context updateUdp udp.EncWriter nebulaPort uint32 // 32 bits because protobuf does not have a uint16 atomicAdvertiseAddrs []netIpAndPort // IP's of relays that can be used by peers to access me atomicRelaysForMe []iputil.VpnIp metrics *MessageMetrics metricHolepunchTx metrics.Counter l *logrus.Logger } // NewLightHouseFromConfig will build a Lighthouse struct from the values provided in the config object // addrMap should be nil unless this is during a config reload func NewLightHouseFromConfig(l *logrus.Logger, c *config.C, myVpnNet *net.IPNet, pc *udp.Conn, p *Punchy) (*LightHouse, error) { amLighthouse := c.GetBool("lighthouse.am_lighthouse", false) nebulaPort := uint32(c.GetInt("listen.port", 0)) if amLighthouse && nebulaPort == 0 { return nil, util.NewContextualError("lighthouse.am_lighthouse enabled on node but no port number is set in config", nil, nil) } // If port is dynamic, discover it if nebulaPort == 0 && pc != nil { uPort, err := pc.LocalAddr() if err != nil { return nil, util.NewContextualError("Failed to get listening port", nil, err) } nebulaPort = uint32(uPort.Port) } ones, _ := myVpnNet.Mask.Size() h := LightHouse{ amLighthouse: amLighthouse, myVpnIp: iputil.Ip2VpnIp(myVpnNet.IP), myVpnZeros: iputil.VpnIp(32 - ones), myVpnNet: myVpnNet, addrMap: make(map[iputil.VpnIp]*RemoteList), nebulaPort: nebulaPort, atomicLighthouses: make(map[iputil.VpnIp]struct{}), atomicStaticList: make(map[iputil.VpnIp]struct{}), punchConn: pc, punchy: p, l: l, } if c.GetBool("stats.lighthouse_metrics", false) { h.metrics = newLighthouseMetrics() h.metricHolepunchTx = metrics.GetOrRegisterCounter("messages.tx.holepunch", nil) } else { h.metricHolepunchTx = metrics.NilCounter{} } err := h.reload(c, true) if err != nil { return nil, err } c.RegisterReloadCallback(func(c *config.C) { err := h.reload(c, false) switch v := err.(type) { case util.ContextualError: v.Log(l) case error: l.WithError(err).Error("failed to reload lighthouse") } }) return &h, nil } func (lh *LightHouse) GetStaticHostList() map[iputil.VpnIp]struct{} { return *(*map[iputil.VpnIp]struct{})(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicStaticList)))) } func (lh *LightHouse) GetLighthouses() map[iputil.VpnIp]struct{} { return *(*map[iputil.VpnIp]struct{})(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLighthouses)))) } func (lh *LightHouse) GetRemoteAllowList() *RemoteAllowList { return (*RemoteAllowList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRemoteAllowList)))) } func (lh *LightHouse) GetLocalAllowList() *LocalAllowList { return (*LocalAllowList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLocalAllowList)))) } func (lh *LightHouse) GetAdvertiseAddrs() []netIpAndPort { return *(*[]netIpAndPort)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicAdvertiseAddrs)))) } func (lh *LightHouse) GetRelaysForMe() []iputil.VpnIp { return *(*[]iputil.VpnIp)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRelaysForMe)))) } func (lh *LightHouse) GetUpdateInterval() int64 { return atomic.LoadInt64(&lh.atomicInterval) } func (lh *LightHouse) reload(c *config.C, initial bool) error { if initial || c.HasChanged("lighthouse.advertise_addrs") { rawAdvAddrs := c.GetStringSlice("lighthouse.advertise_addrs", []string{}) advAddrs := make([]netIpAndPort, 0) for i, rawAddr := range rawAdvAddrs { fIp, fPort, err := udp.ParseIPAndPort(rawAddr) if err != nil { return util.NewContextualError("Unable to parse lighthouse.advertise_addrs entry", m{"addr": rawAddr, "entry": i + 1}, err) } if fPort == 0 { fPort = uint16(lh.nebulaPort) } if ip4 := fIp.To4(); ip4 != nil && lh.myVpnNet.Contains(fIp) { lh.l.WithField("addr", rawAddr).WithField("entry", i+1). Warn("Ignoring lighthouse.advertise_addrs report because it is within the nebula network range") continue } advAddrs = append(advAddrs, netIpAndPort{ip: fIp, port: fPort}) } atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicAdvertiseAddrs)), unsafe.Pointer(&advAddrs)) if !initial { lh.l.Info("lighthouse.advertise_addrs has changed") } } if initial || c.HasChanged("lighthouse.interval") { atomic.StoreInt64(&lh.atomicInterval, int64(c.GetInt("lighthouse.interval", 10))) if !initial { lh.l.Infof("lighthouse.interval changed to %v", lh.atomicInterval) if lh.updateCancel != nil { // May not always have a running routine lh.updateCancel() } lh.LhUpdateWorker(lh.updateParentCtx, lh.updateUdp) } } if initial || c.HasChanged("lighthouse.remote_allow_list") || c.HasChanged("lighthouse.remote_allow_ranges") { ral, err := NewRemoteAllowListFromConfig(c, "lighthouse.remote_allow_list", "lighthouse.remote_allow_ranges") if err != nil { return util.NewContextualError("Invalid lighthouse.remote_allow_list", nil, err) } atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRemoteAllowList)), unsafe.Pointer(ral)) if !initial { //TODO: a diff will be annoyingly difficult lh.l.Info("lighthouse.remote_allow_list and/or lighthouse.remote_allow_ranges has changed") } } if initial || c.HasChanged("lighthouse.local_allow_list") { lal, err := NewLocalAllowListFromConfig(c, "lighthouse.local_allow_list") if err != nil { return util.NewContextualError("Invalid lighthouse.local_allow_list", nil, err) } atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLocalAllowList)), unsafe.Pointer(lal)) if !initial { //TODO: a diff will be annoyingly difficult lh.l.Info("lighthouse.local_allow_list has changed") } } //NOTE: many things will get much simpler when we combine static_host_map and lighthouse.hosts in config if initial || c.HasChanged("static_host_map") { staticList := make(map[iputil.VpnIp]struct{}) err := lh.loadStaticMap(c, lh.myVpnNet, staticList) if err != nil { return err } atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicStaticList)), unsafe.Pointer(&staticList)) if !initial { //TODO: we should remove any remote list entries for static hosts that were removed/modified? lh.l.Info("static_host_map has changed") } } if initial || c.HasChanged("lighthouse.hosts") { lhMap := make(map[iputil.VpnIp]struct{}) err := lh.parseLighthouses(c, lh.myVpnNet, lhMap) if err != nil { return err } atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicLighthouses)), unsafe.Pointer(&lhMap)) if !initial { //NOTE: we are not tearing down existing lighthouse connections because they might be used for non lighthouse traffic lh.l.Info("lighthouse.hosts has changed") } } if initial || c.HasChanged("relay.relays") { switch c.GetBool("relay.am_relay", false) { case true: // Relays aren't allowed to specify other relays if len(c.GetStringSlice("relay.relays", nil)) > 0 { lh.l.Info("Ignoring relays from config because am_relay is true") } relaysForMe := []iputil.VpnIp{} atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRelaysForMe)), unsafe.Pointer(&relaysForMe)) case false: relaysForMe := []iputil.VpnIp{} for _, v := range c.GetStringSlice("relay.relays", nil) { lh.l.WithField("RelayIP", v).Info("Read relay from config") configRIP := net.ParseIP(v) if configRIP != nil { relaysForMe = append(relaysForMe, iputil.Ip2VpnIp(configRIP)) } } atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&lh.atomicRelaysForMe)), unsafe.Pointer(&relaysForMe)) } } return nil } func (lh *LightHouse) parseLighthouses(c *config.C, tunCidr *net.IPNet, lhMap map[iputil.VpnIp]struct{}) error { lhs := c.GetStringSlice("lighthouse.hosts", []string{}) if lh.amLighthouse && len(lhs) != 0 { lh.l.Warn("lighthouse.am_lighthouse enabled on node but upstream lighthouses exist in config") } for i, host := range lhs { ip := net.ParseIP(host) if ip == nil { return util.NewContextualError("Unable to parse lighthouse host entry", m{"host": host, "entry": i + 1}, nil) } if !tunCidr.Contains(ip) { return util.NewContextualError("lighthouse host is not in our subnet, invalid", m{"vpnIp": ip, "network": tunCidr.String()}, nil) } lhMap[iputil.Ip2VpnIp(ip)] = struct{}{} } if !lh.amLighthouse && len(lhMap) == 0 { lh.l.Warn("No lighthouse.hosts configured, this host will only be able to initiate tunnels with static_host_map entries") } staticList := lh.GetStaticHostList() for lhIP, _ := range lhMap { if _, ok := staticList[lhIP]; !ok { return fmt.Errorf("lighthouse %s does not have a static_host_map entry", lhIP) } } return nil } func (lh *LightHouse) loadStaticMap(c *config.C, tunCidr *net.IPNet, staticList map[iputil.VpnIp]struct{}) error { shm := c.GetMap("static_host_map", map[interface{}]interface{}{}) i := 0 for k, v := range shm { rip := net.ParseIP(fmt.Sprintf("%v", k)) if rip == nil { return util.NewContextualError("Unable to parse static_host_map entry", m{"host": k, "entry": i + 1}, nil) } if !tunCidr.Contains(rip) { return util.NewContextualError("static_host_map key is not in our subnet, invalid", m{"vpnIp": rip, "network": tunCidr.String(), "entry": i + 1}, nil) } vpnIp := iputil.Ip2VpnIp(rip) vals, ok := v.([]interface{}) if ok { for _, v := range vals { ip, port, err := udp.ParseIPAndPort(fmt.Sprintf("%v", v)) if err != nil { return util.NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp, "entry": i + 1}, err) } lh.addStaticRemote(vpnIp, udp.NewAddr(ip, port), staticList) } } else { ip, port, err := udp.ParseIPAndPort(fmt.Sprintf("%v", v)) if err != nil { return util.NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp, "entry": i + 1}, err) } lh.addStaticRemote(vpnIp, udp.NewAddr(ip, port), staticList) } i++ } return nil } func (lh *LightHouse) Query(ip iputil.VpnIp, f udp.EncWriter) *RemoteList { if !lh.IsLighthouseIP(ip) { lh.QueryServer(ip, f) } lh.RLock() if v, ok := lh.addrMap[ip]; ok { lh.RUnlock() return v } lh.RUnlock() return nil } // This is asynchronous so no reply should be expected func (lh *LightHouse) QueryServer(ip iputil.VpnIp, f udp.EncWriter) { if lh.amLighthouse { return } if lh.IsLighthouseIP(ip) { return } // Send a query to the lighthouses and hope for the best next time query, err := NewLhQueryByInt(ip).Marshal() if err != nil { lh.l.WithError(err).WithField("vpnIp", ip).Error("Failed to marshal lighthouse query payload") return } lighthouses := lh.GetLighthouses() lh.metricTx(NebulaMeta_HostQuery, int64(len(lighthouses))) nb := make([]byte, 12, 12) out := make([]byte, mtu) for n := range lighthouses { f.SendMessageToVpnIp(header.LightHouse, 0, n, query, nb, out) } } func (lh *LightHouse) QueryCache(ip iputil.VpnIp) *RemoteList { lh.RLock() if v, ok := lh.addrMap[ip]; ok { lh.RUnlock() return v } lh.RUnlock() lh.Lock() defer lh.Unlock() // Add an entry if we don't already have one return lh.unlockedGetRemoteList(ip) } // queryAndPrepMessage is a lock helper on RemoteList, assisting the caller to build a lighthouse message containing // details from the remote list. It looks for a hit in the addrMap and a hit in the RemoteList under the owner vpnIp // If one is found then f() is called with proper locking, f() must return result of n.MarshalTo() func (lh *LightHouse) queryAndPrepMessage(vpnIp iputil.VpnIp, f func(*cache) (int, error)) (bool, int, error) { lh.RLock() // Do we have an entry in the main cache? if v, ok := lh.addrMap[vpnIp]; ok { // Swap lh lock for remote list lock v.RLock() defer v.RUnlock() lh.RUnlock() // vpnIp should also be the owner here since we are a lighthouse. c := v.cache[vpnIp] // Make sure we have if c != nil { n, err := f(c) return true, n, err } return false, 0, nil } lh.RUnlock() return false, 0, nil } func (lh *LightHouse) DeleteVpnIp(vpnIp iputil.VpnIp) { // First we check the static mapping // and do nothing if it is there if _, ok := lh.GetStaticHostList()[vpnIp]; ok { return } lh.Lock() //l.Debugln(lh.addrMap) delete(lh.addrMap, vpnIp) if lh.l.Level >= logrus.DebugLevel { lh.l.Debugf("deleting %s from lighthouse.", vpnIp) } lh.Unlock() } // AddStaticRemote adds a static host entry for vpnIp as ourselves as the owner // We are the owner because we don't want a lighthouse server to advertise for static hosts it was configured with // And we don't want a lighthouse query reply to interfere with our learned cache if we are a client //NOTE: this function should not interact with any hot path objects, like lh.staticList, the caller should handle it func (lh *LightHouse) addStaticRemote(vpnIp iputil.VpnIp, toAddr *udp.Addr, staticList map[iputil.VpnIp]struct{}) { lh.Lock() am := lh.unlockedGetRemoteList(vpnIp) am.Lock() defer am.Unlock() lh.Unlock() if ipv4 := toAddr.IP.To4(); ipv4 != nil { to := NewIp4AndPort(ipv4, uint32(toAddr.Port)) if !lh.unlockedShouldAddV4(vpnIp, to) { return } am.unlockedPrependV4(lh.myVpnIp, to) } else { to := NewIp6AndPort(toAddr.IP, uint32(toAddr.Port)) if !lh.unlockedShouldAddV6(vpnIp, to) { return } am.unlockedPrependV6(lh.myVpnIp, to) } // Mark it as static in the caller provided map staticList[vpnIp] = struct{}{} } // unlockedGetRemoteList assumes you have the lh lock func (lh *LightHouse) unlockedGetRemoteList(vpnIp iputil.VpnIp) *RemoteList { am, ok := lh.addrMap[vpnIp] if !ok { am = NewRemoteList() lh.addrMap[vpnIp] = am } return am } // unlockedShouldAddV4 checks if to is allowed by our allow list func (lh *LightHouse) unlockedShouldAddV4(vpnIp iputil.VpnIp, to *Ip4AndPort) bool { allow := lh.GetRemoteAllowList().AllowIpV4(vpnIp, iputil.VpnIp(to.Ip)) if lh.l.Level >= logrus.TraceLevel { lh.l.WithField("remoteIp", vpnIp).WithField("allow", allow).Trace("remoteAllowList.Allow") } if !allow || ipMaskContains(lh.myVpnIp, lh.myVpnZeros, iputil.VpnIp(to.Ip)) { return false } return true } // unlockedShouldAddV6 checks if to is allowed by our allow list func (lh *LightHouse) unlockedShouldAddV6(vpnIp iputil.VpnIp, to *Ip6AndPort) bool { allow := lh.GetRemoteAllowList().AllowIpV6(vpnIp, to.Hi, to.Lo) if lh.l.Level >= logrus.TraceLevel { lh.l.WithField("remoteIp", lhIp6ToIp(to)).WithField("allow", allow).Trace("remoteAllowList.Allow") } // We don't check our vpn network here because nebula does not support ipv6 on the inside if !allow { return false } return true } func lhIp6ToIp(v *Ip6AndPort) net.IP { ip := make(net.IP, 16) binary.BigEndian.PutUint64(ip[:8], v.Hi) binary.BigEndian.PutUint64(ip[8:], v.Lo) return ip } func (lh *LightHouse) IsLighthouseIP(vpnIp iputil.VpnIp) bool { if _, ok := lh.GetLighthouses()[vpnIp]; ok { return true } return false } func NewLhQueryByInt(VpnIp iputil.VpnIp) *NebulaMeta { return &NebulaMeta{ Type: NebulaMeta_HostQuery, Details: &NebulaMetaDetails{ VpnIp: uint32(VpnIp), }, } } func NewIp4AndPort(ip net.IP, port uint32) *Ip4AndPort { ipp := Ip4AndPort{Port: port} ipp.Ip = uint32(iputil.Ip2VpnIp(ip)) return &ipp } func NewIp6AndPort(ip net.IP, port uint32) *Ip6AndPort { return &Ip6AndPort{ Hi: binary.BigEndian.Uint64(ip[:8]), Lo: binary.BigEndian.Uint64(ip[8:]), Port: port, } } func NewUDPAddrFromLH4(ipp *Ip4AndPort) *udp.Addr { ip := ipp.Ip return udp.NewAddr( net.IPv4(byte(ip&0xff000000>>24), byte(ip&0x00ff0000>>16), byte(ip&0x0000ff00>>8), byte(ip&0x000000ff)), uint16(ipp.Port), ) } func NewUDPAddrFromLH6(ipp *Ip6AndPort) *udp.Addr { return udp.NewAddr(lhIp6ToIp(ipp), uint16(ipp.Port)) } func (lh *LightHouse) LhUpdateWorker(ctx context.Context, f udp.EncWriter) { lh.updateParentCtx = ctx lh.updateUdp = f interval := lh.GetUpdateInterval() if lh.amLighthouse || interval == 0 { return } clockSource := time.NewTicker(time.Second * time.Duration(interval)) updateCtx, cancel := context.WithCancel(ctx) lh.updateCancel = cancel defer clockSource.Stop() for { lh.SendUpdate(f) select { case <-updateCtx.Done(): return case <-clockSource.C: continue } } } func (lh *LightHouse) SendUpdate(f udp.EncWriter) { var v4 []*Ip4AndPort var v6 []*Ip6AndPort for _, e := range lh.GetAdvertiseAddrs() { if ip := e.ip.To4(); ip != nil { v4 = append(v4, NewIp4AndPort(e.ip, uint32(e.port))) } else { v6 = append(v6, NewIp6AndPort(e.ip, uint32(e.port))) } } lal := lh.GetLocalAllowList() for _, e := range *localIps(lh.l, lal) { if ip4 := e.To4(); ip4 != nil && ipMaskContains(lh.myVpnIp, lh.myVpnZeros, iputil.Ip2VpnIp(ip4)) { continue } // Only add IPs that aren't my VPN/tun IP if ip := e.To4(); ip != nil { v4 = append(v4, NewIp4AndPort(e, lh.nebulaPort)) } else { v6 = append(v6, NewIp6AndPort(e, lh.nebulaPort)) } } var relays []uint32 for _, r := range lh.GetRelaysForMe() { relays = append(relays, (uint32)(r)) } m := &NebulaMeta{ Type: NebulaMeta_HostUpdateNotification, Details: &NebulaMetaDetails{ VpnIp: uint32(lh.myVpnIp), Ip4AndPorts: v4, Ip6AndPorts: v6, RelayVpnIp: relays, }, } lighthouses := lh.GetLighthouses() lh.metricTx(NebulaMeta_HostUpdateNotification, int64(len(lighthouses))) nb := make([]byte, 12, 12) out := make([]byte, mtu) mm, err := m.Marshal() if err != nil { lh.l.WithError(err).Error("Error while marshaling for lighthouse update") return } for vpnIp := range lighthouses { f.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, mm, nb, out) } } type LightHouseHandler struct { lh *LightHouse nb []byte out []byte pb []byte meta *NebulaMeta l *logrus.Logger } func (lh *LightHouse) NewRequestHandler() *LightHouseHandler { lhh := &LightHouseHandler{ lh: lh, nb: make([]byte, 12, 12), out: make([]byte, mtu), l: lh.l, pb: make([]byte, mtu), meta: &NebulaMeta{ Details: &NebulaMetaDetails{}, }, } return lhh } func (lh *LightHouse) metricRx(t NebulaMeta_MessageType, i int64) { lh.metrics.Rx(header.MessageType(t), 0, i) } func (lh *LightHouse) metricTx(t NebulaMeta_MessageType, i int64) { lh.metrics.Tx(header.MessageType(t), 0, i) } // This method is similar to Reset(), but it re-uses the pointer structs // so that we don't have to re-allocate them func (lhh *LightHouseHandler) resetMeta() *NebulaMeta { details := lhh.meta.Details lhh.meta.Reset() // Keep the array memory around details.Ip4AndPorts = details.Ip4AndPorts[:0] details.Ip6AndPorts = details.Ip6AndPorts[:0] details.RelayVpnIp = details.RelayVpnIp[:0] lhh.meta.Details = details return lhh.meta } func (lhh *LightHouseHandler) HandleRequest(rAddr *udp.Addr, vpnIp iputil.VpnIp, p []byte, w udp.EncWriter) { n := lhh.resetMeta() err := n.Unmarshal(p) if err != nil { lhh.l.WithError(err).WithField("vpnIp", vpnIp).WithField("udpAddr", rAddr). Error("Failed to unmarshal lighthouse packet") //TODO: send recv_error? return } if n.Details == nil { lhh.l.WithField("vpnIp", vpnIp).WithField("udpAddr", rAddr). Error("Invalid lighthouse update") //TODO: send recv_error? return } lhh.lh.metricRx(n.Type, 1) switch n.Type { case NebulaMeta_HostQuery: lhh.handleHostQuery(n, vpnIp, rAddr, w) case NebulaMeta_HostQueryReply: lhh.handleHostQueryReply(n, vpnIp) case NebulaMeta_HostUpdateNotification: lhh.handleHostUpdateNotification(n, vpnIp) case NebulaMeta_HostMovedNotification: case NebulaMeta_HostPunchNotification: lhh.handleHostPunchNotification(n, vpnIp, w) } } func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, vpnIp iputil.VpnIp, addr *udp.Addr, w udp.EncWriter) { // Exit if we don't answer queries if !lhh.lh.amLighthouse { if lhh.l.Level >= logrus.DebugLevel { lhh.l.Debugln("I don't answer queries, but received from: ", addr) } return } //TODO: we can DRY this further reqVpnIp := n.Details.VpnIp //TODO: Maybe instead of marshalling into n we marshal into a new `r` to not nuke our current request data found, ln, err := lhh.lh.queryAndPrepMessage(iputil.VpnIp(n.Details.VpnIp), func(c *cache) (int, error) { n = lhh.resetMeta() n.Type = NebulaMeta_HostQueryReply n.Details.VpnIp = reqVpnIp lhh.coalesceAnswers(c, n) return n.MarshalTo(lhh.pb) }) if !found { return } if err != nil { lhh.l.WithError(err).WithField("vpnIp", vpnIp).Error("Failed to marshal lighthouse host query reply") return } lhh.lh.metricTx(NebulaMeta_HostQueryReply, 1) w.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, lhh.pb[:ln], lhh.nb, lhh.out[:0]) // This signals the other side to punch some zero byte udp packets found, ln, err = lhh.lh.queryAndPrepMessage(vpnIp, func(c *cache) (int, error) { n = lhh.resetMeta() n.Type = NebulaMeta_HostPunchNotification n.Details.VpnIp = uint32(vpnIp) lhh.coalesceAnswers(c, n) return n.MarshalTo(lhh.pb) }) if !found { return } if err != nil { lhh.l.WithError(err).WithField("vpnIp", vpnIp).Error("Failed to marshal lighthouse host was queried for") return } lhh.lh.metricTx(NebulaMeta_HostPunchNotification, 1) w.SendMessageToVpnIp(header.LightHouse, 0, iputil.VpnIp(reqVpnIp), lhh.pb[:ln], lhh.nb, lhh.out[:0]) } func (lhh *LightHouseHandler) coalesceAnswers(c *cache, n *NebulaMeta) { if c.v4 != nil { if c.v4.learned != nil { n.Details.Ip4AndPorts = append(n.Details.Ip4AndPorts, c.v4.learned) } if c.v4.reported != nil && len(c.v4.reported) > 0 { n.Details.Ip4AndPorts = append(n.Details.Ip4AndPorts, c.v4.reported...) } } if c.v6 != nil { if c.v6.learned != nil { n.Details.Ip6AndPorts = append(n.Details.Ip6AndPorts, c.v6.learned) } if c.v6.reported != nil && len(c.v6.reported) > 0 { n.Details.Ip6AndPorts = append(n.Details.Ip6AndPorts, c.v6.reported...) } } if c.relay != nil { n.Details.RelayVpnIp = append(n.Details.RelayVpnIp, c.relay.relay...) } } func (lhh *LightHouseHandler) handleHostQueryReply(n *NebulaMeta, vpnIp iputil.VpnIp) { if !lhh.lh.IsLighthouseIP(vpnIp) { return } lhh.lh.Lock() am := lhh.lh.unlockedGetRemoteList(iputil.VpnIp(n.Details.VpnIp)) am.Lock() lhh.lh.Unlock() certVpnIp := iputil.VpnIp(n.Details.VpnIp) am.unlockedSetV4(vpnIp, certVpnIp, n.Details.Ip4AndPorts, lhh.lh.unlockedShouldAddV4) am.unlockedSetV6(vpnIp, certVpnIp, n.Details.Ip6AndPorts, lhh.lh.unlockedShouldAddV6) am.unlockedSetRelay(vpnIp, certVpnIp, n.Details.RelayVpnIp) am.Unlock() // Non-blocking attempt to trigger, skip if it would block select { case lhh.lh.handshakeTrigger <- iputil.VpnIp(n.Details.VpnIp): default: } } func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, vpnIp iputil.VpnIp) { if !lhh.lh.amLighthouse { if lhh.l.Level >= logrus.DebugLevel { lhh.l.Debugln("I am not a lighthouse, do not take host updates: ", vpnIp) } return } //Simple check that the host sent this not someone else if n.Details.VpnIp != uint32(vpnIp) { if lhh.l.Level >= logrus.DebugLevel { lhh.l.WithField("vpnIp", vpnIp).WithField("answer", iputil.VpnIp(n.Details.VpnIp)).Debugln("Host sent invalid update") } return } lhh.lh.Lock() am := lhh.lh.unlockedGetRemoteList(vpnIp) am.Lock() lhh.lh.Unlock() certVpnIp := iputil.VpnIp(n.Details.VpnIp) am.unlockedSetV4(vpnIp, certVpnIp, n.Details.Ip4AndPorts, lhh.lh.unlockedShouldAddV4) am.unlockedSetV6(vpnIp, certVpnIp, n.Details.Ip6AndPorts, lhh.lh.unlockedShouldAddV6) am.unlockedSetRelay(vpnIp, certVpnIp, n.Details.RelayVpnIp) am.Unlock() } func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, vpnIp iputil.VpnIp, w udp.EncWriter) { if !lhh.lh.IsLighthouseIP(vpnIp) { return } empty := []byte{0} punch := func(vpnPeer *udp.Addr) { if vpnPeer == nil { return } go func() { time.Sleep(lhh.lh.punchy.GetDelay()) lhh.lh.metricHolepunchTx.Inc(1) lhh.lh.punchConn.WriteTo(empty, vpnPeer) }() if lhh.l.Level >= logrus.DebugLevel { //TODO: lacking the ip we are actually punching on, old: l.Debugf("Punching %s on %d for %s", IntIp(a.Ip), a.Port, IntIp(n.Details.VpnIp)) lhh.l.Debugf("Punching on %d for %s", vpnPeer.Port, iputil.VpnIp(n.Details.VpnIp)) } } for _, a := range n.Details.Ip4AndPorts { punch(NewUDPAddrFromLH4(a)) } for _, a := range n.Details.Ip6AndPorts { punch(NewUDPAddrFromLH6(a)) } // This sends a nebula test packet to the host trying to contact us. In the case // of a double nat or other difficult scenario, this may help establish // a tunnel. if lhh.lh.punchy.GetRespond() { queryVpnIp := iputil.VpnIp(n.Details.VpnIp) go func() { time.Sleep(time.Second * 5) if lhh.l.Level >= logrus.DebugLevel { lhh.l.Debugf("Sending a nebula test packet to vpn ip %s", queryVpnIp) } //NOTE: we have to allocate a new output buffer here since we are spawning a new goroutine // for each punchBack packet. We should move this into a timerwheel or a single goroutine // managed by a channel. w.SendMessageToVpnIp(header.Test, header.TestRequest, queryVpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu)) }() } } // ipMaskContains checks if testIp is contained by ip after applying a cidr // zeros is 32 - bits from net.IPMask.Size() func ipMaskContains(ip iputil.VpnIp, zeros iputil.VpnIp, testIp iputil.VpnIp) bool { return (testIp^ip)>>zeros == 0 } nebula-1.6.1+dfsg/lighthouse_test.go000066400000000000000000000343021434072716400174570ustar00rootroot00000000000000package nebula import ( "fmt" "net" "testing" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/test" "github.com/slackhq/nebula/udp" "github.com/stretchr/testify/assert" ) //TODO: Add a test to ensure udpAddr is copied and not reused func TestOldIPv4Only(t *testing.T) { // This test ensures our new ipv6 enabled LH protobuf IpAndPorts works with the old style to enable backwards compatibility b := []byte{8, 129, 130, 132, 80, 16, 10} var m Ip4AndPort err := m.Unmarshal(b) assert.NoError(t, err) assert.Equal(t, "10.1.1.1", iputil.VpnIp(m.GetIp()).String()) } func TestNewLhQuery(t *testing.T) { myIp := net.ParseIP("192.1.1.1") myIpint := iputil.Ip2VpnIp(myIp) // Generating a new lh query should work a := NewLhQueryByInt(myIpint) // The result should be a nebulameta protobuf assert.IsType(t, &NebulaMeta{}, a) // It should also Marshal fine b, err := a.Marshal() assert.Nil(t, err) // and then Unmarshal fine n := &NebulaMeta{} err = n.Unmarshal(b) assert.Nil(t, err) } func Test_lhStaticMapping(t *testing.T) { l := test.NewLogger() _, myVpnNet, _ := net.ParseCIDR("10.128.0.1/16") lh1 := "10.128.0.2" c := config.NewC(l) c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1}} c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"1.1.1.1:4242"}} _, err := NewLightHouseFromConfig(l, c, myVpnNet, nil, nil) assert.Nil(t, err) lh2 := "10.128.0.3" c = config.NewC(l) c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1, lh2}} c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"100.1.1.1:4242"}} _, err = NewLightHouseFromConfig(l, c, myVpnNet, nil, nil) assert.EqualError(t, err, "lighthouse 10.128.0.3 does not have a static_host_map entry") } func BenchmarkLighthouseHandleRequest(b *testing.B) { l := test.NewLogger() _, myVpnNet, _ := net.ParseCIDR("10.128.0.1/0") c := config.NewC(l) lh, err := NewLightHouseFromConfig(l, c, myVpnNet, nil, nil) if !assert.NoError(b, err) { b.Fatal() } hAddr := udp.NewAddrFromString("4.5.6.7:12345") hAddr2 := udp.NewAddrFromString("4.5.6.7:12346") lh.addrMap[3] = NewRemoteList() lh.addrMap[3].unlockedSetV4( 3, 3, []*Ip4AndPort{ NewIp4AndPort(hAddr.IP, uint32(hAddr.Port)), NewIp4AndPort(hAddr2.IP, uint32(hAddr2.Port)), }, func(iputil.VpnIp, *Ip4AndPort) bool { return true }, ) rAddr := udp.NewAddrFromString("1.2.2.3:12345") rAddr2 := udp.NewAddrFromString("1.2.2.3:12346") lh.addrMap[2] = NewRemoteList() lh.addrMap[2].unlockedSetV4( 3, 3, []*Ip4AndPort{ NewIp4AndPort(rAddr.IP, uint32(rAddr.Port)), NewIp4AndPort(rAddr2.IP, uint32(rAddr2.Port)), }, func(iputil.VpnIp, *Ip4AndPort) bool { return true }, ) mw := &mockEncWriter{} b.Run("notfound", func(b *testing.B) { lhh := lh.NewRequestHandler() req := &NebulaMeta{ Type: NebulaMeta_HostQuery, Details: &NebulaMetaDetails{ VpnIp: 4, Ip4AndPorts: nil, }, } p, err := req.Marshal() assert.NoError(b, err) for n := 0; n < b.N; n++ { lhh.HandleRequest(rAddr, 2, p, mw) } }) b.Run("found", func(b *testing.B) { lhh := lh.NewRequestHandler() req := &NebulaMeta{ Type: NebulaMeta_HostQuery, Details: &NebulaMetaDetails{ VpnIp: 3, Ip4AndPorts: nil, }, } p, err := req.Marshal() assert.NoError(b, err) for n := 0; n < b.N; n++ { lhh.HandleRequest(rAddr, 2, p, mw) } }) } func TestLighthouse_Memory(t *testing.T) { l := test.NewLogger() myUdpAddr0 := &udp.Addr{IP: net.ParseIP("10.0.0.2"), Port: 4242} myUdpAddr1 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4242} myUdpAddr2 := &udp.Addr{IP: net.ParseIP("172.16.0.2"), Port: 4242} myUdpAddr3 := &udp.Addr{IP: net.ParseIP("100.152.0.2"), Port: 4242} myUdpAddr4 := &udp.Addr{IP: net.ParseIP("24.15.0.2"), Port: 4242} myUdpAddr5 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4243} myUdpAddr6 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4244} myUdpAddr7 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4245} myUdpAddr8 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4246} myUdpAddr9 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4247} myUdpAddr10 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4248} myUdpAddr11 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4249} myVpnIp := iputil.Ip2VpnIp(net.ParseIP("10.128.0.2")) theirUdpAddr0 := &udp.Addr{IP: net.ParseIP("10.0.0.3"), Port: 4242} theirUdpAddr1 := &udp.Addr{IP: net.ParseIP("192.168.0.3"), Port: 4242} theirUdpAddr2 := &udp.Addr{IP: net.ParseIP("172.16.0.3"), Port: 4242} theirUdpAddr3 := &udp.Addr{IP: net.ParseIP("100.152.0.3"), Port: 4242} theirUdpAddr4 := &udp.Addr{IP: net.ParseIP("24.15.0.3"), Port: 4242} theirVpnIp := iputil.Ip2VpnIp(net.ParseIP("10.128.0.3")) c := config.NewC(l) c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true} c.Settings["listen"] = map[interface{}]interface{}{"port": 4242} lh, err := NewLightHouseFromConfig(l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil) assert.NoError(t, err) lhh := lh.NewRequestHandler() // Test that my first update responds with just that newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{myUdpAddr1, myUdpAddr2}, lhh) r := newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh) assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr2) // Ensure we don't accumulate addresses newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{myUdpAddr3}, lhh) r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh) assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr3) // Grow it back to 2 newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{myUdpAddr1, myUdpAddr4}, lhh) r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh) assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4) // Update a different host and ask about it newLHHostUpdate(theirUdpAddr0, theirVpnIp, []*udp.Addr{theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4}, lhh) r = newLHHostRequest(theirUdpAddr0, theirVpnIp, theirVpnIp, lhh) assertIp4InArray(t, r.msg.Details.Ip4AndPorts, theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4) // Have both hosts ask about the other r = newLHHostRequest(theirUdpAddr0, theirVpnIp, myVpnIp, lhh) assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4) r = newLHHostRequest(myUdpAddr0, myVpnIp, theirVpnIp, lhh) assertIp4InArray(t, r.msg.Details.Ip4AndPorts, theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4) // Make sure we didn't get changed r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh) assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4) // Ensure proper ordering and limiting // Send 12 addrs, get 10 back, the last 2 removed, allowing the duplicate to remain (clients dedupe) newLHHostUpdate( myUdpAddr0, myVpnIp, []*udp.Addr{ myUdpAddr1, myUdpAddr2, myUdpAddr3, myUdpAddr4, myUdpAddr5, myUdpAddr5, //Duplicated on purpose myUdpAddr6, myUdpAddr7, myUdpAddr8, myUdpAddr9, myUdpAddr10, myUdpAddr11, // This should get cut }, lhh) r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh) assertIp4InArray( t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr2, myUdpAddr3, myUdpAddr4, myUdpAddr5, myUdpAddr5, myUdpAddr6, myUdpAddr7, myUdpAddr8, myUdpAddr9, ) // Make sure we won't add ips in our vpn network bad1 := &udp.Addr{IP: net.ParseIP("10.128.0.99"), Port: 4242} bad2 := &udp.Addr{IP: net.ParseIP("10.128.0.100"), Port: 4242} good := &udp.Addr{IP: net.ParseIP("1.128.0.99"), Port: 4242} newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{bad1, bad2, good}, lhh) r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh) assertIp4InArray(t, r.msg.Details.Ip4AndPorts, good) } func TestLighthouse_reload(t *testing.T) { l := test.NewLogger() c := config.NewC(l) c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true} c.Settings["listen"] = map[interface{}]interface{}{"port": 4242} lh, err := NewLightHouseFromConfig(l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil) assert.NoError(t, err) c.Settings["static_host_map"] = map[interface{}]interface{}{"10.128.0.2": []interface{}{"1.1.1.1:4242"}} lh.reload(c, false) } func newLHHostRequest(fromAddr *udp.Addr, myVpnIp, queryVpnIp iputil.VpnIp, lhh *LightHouseHandler) testLhReply { req := &NebulaMeta{ Type: NebulaMeta_HostQuery, Details: &NebulaMetaDetails{ VpnIp: uint32(queryVpnIp), }, } b, err := req.Marshal() if err != nil { panic(err) } filter := NebulaMeta_HostQueryReply w := &testEncWriter{ metaFilter: &filter, } lhh.HandleRequest(fromAddr, myVpnIp, b, w) return w.lastReply } func newLHHostUpdate(fromAddr *udp.Addr, vpnIp iputil.VpnIp, addrs []*udp.Addr, lhh *LightHouseHandler) { req := &NebulaMeta{ Type: NebulaMeta_HostUpdateNotification, Details: &NebulaMetaDetails{ VpnIp: uint32(vpnIp), Ip4AndPorts: make([]*Ip4AndPort, len(addrs)), }, } for k, v := range addrs { req.Details.Ip4AndPorts[k] = &Ip4AndPort{Ip: uint32(iputil.Ip2VpnIp(v.IP)), Port: uint32(v.Port)} } b, err := req.Marshal() if err != nil { panic(err) } w := &testEncWriter{} lhh.HandleRequest(fromAddr, vpnIp, b, w) } //TODO: this is a RemoteList test //func Test_lhRemoteAllowList(t *testing.T) { // l := NewLogger() // c := NewConfig(l) // c.Settings["remoteallowlist"] = map[interface{}]interface{}{ // "10.20.0.0/12": false, // } // allowList, err := c.GetAllowList("remoteallowlist", false) // assert.Nil(t, err) // // lh1 := "10.128.0.2" // lh1IP := net.ParseIP(lh1) // // udpServer, _ := NewListener(l, "0.0.0.0", 0, true) // // lh := NewLightHouse(l, true, &net.IPNet{IP: net.IP{0, 0, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, []uint32{ip2int(lh1IP)}, 10, 10003, udpServer, false, 1, false) // lh.SetRemoteAllowList(allowList) // // // A disallowed ip should not enter the cache but we should end up with an empty entry in the addrMap // remote1IP := net.ParseIP("10.20.0.3") // remotes := lh.unlockedGetRemoteList(ip2int(remote1IP)) // remotes.unlockedPrependV4(ip2int(remote1IP), NewIp4AndPort(remote1IP, 4242)) // assert.NotNil(t, lh.addrMap[ip2int(remote1IP)]) // assert.Empty(t, lh.addrMap[ip2int(remote1IP)].CopyAddrs([]*net.IPNet{})) // // // Make sure a good ip enters the cache and addrMap // remote2IP := net.ParseIP("10.128.0.3") // remote2UDPAddr := NewUDPAddr(remote2IP, uint16(4242)) // lh.addRemoteV4(ip2int(remote2IP), ip2int(remote2IP), NewIp4AndPort(remote2UDPAddr.IP, uint32(remote2UDPAddr.Port)), false, false) // assertUdpAddrInArray(t, lh.addrMap[ip2int(remote2IP)].CopyAddrs([]*net.IPNet{}), remote2UDPAddr) // // // Another good ip gets into the cache, ordering is inverted // remote3IP := net.ParseIP("10.128.0.4") // remote3UDPAddr := NewUDPAddr(remote3IP, uint16(4243)) // lh.addRemoteV4(ip2int(remote2IP), ip2int(remote2IP), NewIp4AndPort(remote3UDPAddr.IP, uint32(remote3UDPAddr.Port)), false, false) // assertUdpAddrInArray(t, lh.addrMap[ip2int(remote2IP)].CopyAddrs([]*net.IPNet{}), remote2UDPAddr, remote3UDPAddr) // // // If we exceed the length limit we should only have the most recent addresses // addedAddrs := []*udpAddr{} // for i := 0; i < 11; i++ { // remoteUDPAddr := NewUDPAddr(net.IP{10, 128, 0, 4}, uint16(4243+i)) // lh.addRemoteV4(ip2int(remote2IP), ip2int(remote2IP), NewIp4AndPort(remoteUDPAddr.IP, uint32(remoteUDPAddr.Port)), false, false) // // The first entry here is a duplicate, don't add it to the assert list // if i != 0 { // addedAddrs = append(addedAddrs, remoteUDPAddr) // } // } // // // We should only have the last 10 of what we tried to add // assert.True(t, len(addedAddrs) >= 10, "We should have tried to add at least 10 addresses") // assertUdpAddrInArray( // t, // lh.addrMap[ip2int(remote2IP)].CopyAddrs([]*net.IPNet{}), // addedAddrs[0], // addedAddrs[1], // addedAddrs[2], // addedAddrs[3], // addedAddrs[4], // addedAddrs[5], // addedAddrs[6], // addedAddrs[7], // addedAddrs[8], // addedAddrs[9], // ) //} func Test_ipMaskContains(t *testing.T) { assert.True(t, ipMaskContains(iputil.Ip2VpnIp(net.ParseIP("10.0.0.1")), 32-24, iputil.Ip2VpnIp(net.ParseIP("10.0.0.255")))) assert.False(t, ipMaskContains(iputil.Ip2VpnIp(net.ParseIP("10.0.0.1")), 32-24, iputil.Ip2VpnIp(net.ParseIP("10.0.1.1")))) assert.True(t, ipMaskContains(iputil.Ip2VpnIp(net.ParseIP("10.0.0.1")), 32, iputil.Ip2VpnIp(net.ParseIP("10.0.1.1")))) } type testLhReply struct { nebType header.MessageType nebSubType header.MessageSubType vpnIp iputil.VpnIp msg *NebulaMeta } type testEncWriter struct { lastReply testLhReply metaFilter *NebulaMeta_MessageType } func (tw *testEncWriter) SendVia(via interface{}, relay interface{}, ad, nb, out []byte, nocopy bool) { } func (tw *testEncWriter) Handshake(vpnIp iputil.VpnIp) { } func (tw *testEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, _, _ []byte) { msg := &NebulaMeta{} err := msg.Unmarshal(p) if tw.metaFilter == nil || msg.Type == *tw.metaFilter { tw.lastReply = testLhReply{ nebType: t, nebSubType: st, vpnIp: vpnIp, msg: msg, } } if err != nil { panic(err) } } // assertIp4InArray asserts every address in want is at the same position in have and that the lengths match func assertIp4InArray(t *testing.T, have []*Ip4AndPort, want ...*udp.Addr) { if !assert.Len(t, have, len(want)) { return } for k, w := range want { if !(have[k].Ip == uint32(iputil.Ip2VpnIp(w.IP)) && have[k].Port == uint32(w.Port)) { assert.Fail(t, fmt.Sprintf("Response did not contain: %v:%v at %v; %v", w.IP, w.Port, k, translateV4toUdpAddr(have))) } } } // assertUdpAddrInArray asserts every address in want is at the same position in have and that the lengths match func assertUdpAddrInArray(t *testing.T, have []*udp.Addr, want ...*udp.Addr) { if !assert.Len(t, have, len(want)) { return } for k, w := range want { if !(have[k].IP.Equal(w.IP) && have[k].Port == w.Port) { assert.Fail(t, fmt.Sprintf("Response did not contain: %v at %v; %v", w, k, have)) } } } func translateV4toUdpAddr(ips []*Ip4AndPort) []*udp.Addr { addrs := make([]*udp.Addr, len(ips)) for k, v := range ips { addrs[k] = NewUDPAddrFromLH4(v) } return addrs } nebula-1.6.1+dfsg/logger.go000066400000000000000000000022301434072716400155170ustar00rootroot00000000000000package nebula import ( "fmt" "strings" "time" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/config" ) func configLogger(l *logrus.Logger, c *config.C) error { // set up our logging level logLevel, err := logrus.ParseLevel(strings.ToLower(c.GetString("logging.level", "info"))) if err != nil { return fmt.Errorf("%s; possible levels: %s", err, logrus.AllLevels) } l.SetLevel(logLevel) disableTimestamp := c.GetBool("logging.disable_timestamp", false) timestampFormat := c.GetString("logging.timestamp_format", "") fullTimestamp := (timestampFormat != "") if timestampFormat == "" { timestampFormat = time.RFC3339 } logFormat := strings.ToLower(c.GetString("logging.format", "text")) switch logFormat { case "text": l.Formatter = &logrus.TextFormatter{ TimestampFormat: timestampFormat, FullTimestamp: fullTimestamp, DisableTimestamp: disableTimestamp, } case "json": l.Formatter = &logrus.JSONFormatter{ TimestampFormat: timestampFormat, DisableTimestamp: disableTimestamp, } default: return fmt.Errorf("unknown log format `%s`. possible formats: %s", logFormat, []string{"text", "json"}) } return nil } nebula-1.6.1+dfsg/main.go000066400000000000000000000252411434072716400151730ustar00rootroot00000000000000package nebula import ( "context" "encoding/binary" "errors" "fmt" "net" "time" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/overlay" "github.com/slackhq/nebula/sshd" "github.com/slackhq/nebula/udp" "github.com/slackhq/nebula/util" "gopkg.in/yaml.v2" ) type m map[string]interface{} func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logger, tunFd *int) (retcon *Control, reterr error) { ctx, cancel := context.WithCancel(context.Background()) // Automatically cancel the context if Main returns an error, to signal all created goroutines to quit. defer func() { if reterr != nil { cancel() } }() l := logger l.Formatter = &logrus.TextFormatter{ FullTimestamp: true, } // Print the config if in test, the exit comes later if configTest { b, err := yaml.Marshal(c.Settings) if err != nil { return nil, err } // Print the final config l.Println(string(b)) } err := configLogger(l, c) if err != nil { return nil, util.NewContextualError("Failed to configure the logger", nil, err) } c.RegisterReloadCallback(func(c *config.C) { err := configLogger(l, c) if err != nil { l.WithError(err).Error("Failed to configure the logger") } }) caPool, err := loadCAFromConfig(l, c) if err != nil { //The errors coming out of loadCA are already nicely formatted return nil, util.NewContextualError("Failed to load ca from config", nil, err) } l.WithField("fingerprints", caPool.GetFingerprints()).Debug("Trusted CA fingerprints") cs, err := NewCertStateFromConfig(c) if err != nil { //The errors coming out of NewCertStateFromConfig are already nicely formatted return nil, util.NewContextualError("Failed to load certificate from config", nil, err) } l.WithField("cert", cs.certificate).Debug("Client nebula certificate") fw, err := NewFirewallFromConfig(l, cs.certificate, c) if err != nil { return nil, util.NewContextualError("Error while loading firewall rules", nil, err) } l.WithField("firewallHash", fw.GetRuleHash()).Info("Firewall started") // TODO: make sure mask is 4 bytes tunCidr := cs.certificate.Details.Ips[0] ssh, err := sshd.NewSSHServer(l.WithField("subsystem", "sshd")) wireSSHReload(l, ssh, c) var sshStart func() if c.GetBool("sshd.enabled", false) { sshStart, err = configSSH(l, ssh, c) if err != nil { return nil, util.NewContextualError("Error while configuring the sshd", nil, err) } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // All non system modifying configuration consumption should live above this line // tun config, listeners, anything modifying the computer should be below //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// var routines int // If `routines` is set, use that and ignore the specific values if routines = c.GetInt("routines", 0); routines != 0 { if routines < 1 { routines = 1 } if routines > 1 { l.WithField("routines", routines).Info("Using multiple routines") } } else { // deprecated and undocumented tunQueues := c.GetInt("tun.routines", 1) udpQueues := c.GetInt("listen.routines", 1) if tunQueues > udpQueues { routines = tunQueues } else { routines = udpQueues } if routines != 1 { l.WithField("routines", routines).Warn("Setting tun.routines and listen.routines is deprecated. Use `routines` instead") } } // EXPERIMENTAL // Intentionally not documented yet while we do more testing and determine // a good default value. conntrackCacheTimeout := c.GetDuration("firewall.conntrack.routine_cache_timeout", 0) if routines > 1 && !c.IsSet("firewall.conntrack.routine_cache_timeout") { // Use a different default if we are running with multiple routines conntrackCacheTimeout = 1 * time.Second } if conntrackCacheTimeout > 0 { l.WithField("duration", conntrackCacheTimeout).Info("Using routine-local conntrack cache") } var tun overlay.Device if !configTest { c.CatchHUP(ctx) tun, err = overlay.NewDeviceFromConfig(c, l, tunCidr, tunFd, routines) if err != nil { return nil, util.NewContextualError("Failed to get a tun/tap device", nil, err) } defer func() { if reterr != nil { tun.Close() } }() } // set up our UDP listener udpConns := make([]*udp.Conn, routines) port := c.GetInt("listen.port", 0) if !configTest { for i := 0; i < routines; i++ { udpServer, err := udp.NewListener(l, c.GetString("listen.host", "0.0.0.0"), port, routines > 1, c.GetInt("listen.batch", 64)) if err != nil { return nil, util.NewContextualError("Failed to open udp listener", m{"queue": i}, err) } udpServer.ReloadConfig(c) udpConns[i] = udpServer } } // Set up my internal host map var preferredRanges []*net.IPNet rawPreferredRanges := c.GetStringSlice("preferred_ranges", []string{}) // First, check if 'preferred_ranges' is set and fallback to 'local_range' if len(rawPreferredRanges) > 0 { for _, rawPreferredRange := range rawPreferredRanges { _, preferredRange, err := net.ParseCIDR(rawPreferredRange) if err != nil { return nil, util.NewContextualError("Failed to parse preferred ranges", nil, err) } preferredRanges = append(preferredRanges, preferredRange) } } // local_range was superseded by preferred_ranges. If it is still present, // merge the local_range setting into preferred_ranges. We will probably // deprecate local_range and remove in the future. rawLocalRange := c.GetString("local_range", "") if rawLocalRange != "" { _, localRange, err := net.ParseCIDR(rawLocalRange) if err != nil { return nil, util.NewContextualError("Failed to parse local_range", nil, err) } // Check if the entry for local_range was already specified in // preferred_ranges. Don't put it into the slice twice if so. var found bool for _, r := range preferredRanges { if r.String() == localRange.String() { found = true break } } if !found { preferredRanges = append(preferredRanges, localRange) } } hostMap := NewHostMap(l, "main", tunCidr, preferredRanges) hostMap.metricsEnabled = c.GetBool("stats.message_metrics", false) l.WithField("network", hostMap.vpnCIDR).WithField("preferredRanges", hostMap.preferredRanges).Info("Main HostMap created") /* config.SetDefault("promoter.interval", 10) go hostMap.Promoter(config.GetInt("promoter.interval")) */ punchy := NewPunchyFromConfig(l, c) if punchy.GetPunch() && !configTest { l.Info("UDP hole punching enabled") go hostMap.Punchy(ctx, udpConns[0]) } lightHouse, err := NewLightHouseFromConfig(l, c, tunCidr, udpConns[0], punchy) switch { case errors.As(err, &util.ContextualError{}): return nil, err case err != nil: return nil, util.NewContextualError("Failed to initialize lighthouse handler", nil, err) } var messageMetrics *MessageMetrics if c.GetBool("stats.message_metrics", false) { messageMetrics = newMessageMetrics() } else { messageMetrics = newMessageMetricsOnlyRecvError() } useRelays := c.GetBool("relay.use_relays", DefaultUseRelays) && !c.GetBool("relay.am_relay", false) handshakeConfig := HandshakeConfig{ tryInterval: c.GetDuration("handshakes.try_interval", DefaultHandshakeTryInterval), retries: c.GetInt("handshakes.retries", DefaultHandshakeRetries), triggerBuffer: c.GetInt("handshakes.trigger_buffer", DefaultHandshakeTriggerBuffer), useRelays: useRelays, messageMetrics: messageMetrics, } handshakeManager := NewHandshakeManager(l, tunCidr, preferredRanges, hostMap, lightHouse, udpConns[0], handshakeConfig) lightHouse.handshakeTrigger = handshakeManager.trigger //TODO: These will be reused for psk //handshakeMACKey := config.GetString("handshake_mac.key", "") //handshakeAcceptedMACKeys := config.GetStringSlice("handshake_mac.accepted_keys", []string{}) serveDns := false if c.GetBool("lighthouse.serve_dns", false) { if c.GetBool("lighthouse.am_lighthouse", false) { serveDns = true } else { l.Warn("DNS server refusing to run because this host is not a lighthouse.") } } checkInterval := c.GetInt("timers.connection_alive_interval", 5) pendingDeletionInterval := c.GetInt("timers.pending_deletion_interval", 10) ifConfig := &InterfaceConfig{ HostMap: hostMap, Inside: tun, Outside: udpConns[0], certState: cs, Cipher: c.GetString("cipher", "aes"), Firewall: fw, ServeDns: serveDns, HandshakeManager: handshakeManager, lightHouse: lightHouse, checkInterval: checkInterval, pendingDeletionInterval: pendingDeletionInterval, DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false), DropMulticast: c.GetBool("tun.drop_multicast", false), routines: routines, MessageMetrics: messageMetrics, version: buildVersion, caPool: caPool, disconnectInvalid: c.GetBool("pki.disconnect_invalid", false), relayManager: NewRelayManager(ctx, l, hostMap, c), ConntrackCacheTimeout: conntrackCacheTimeout, l: l, } switch ifConfig.Cipher { case "aes": noiseEndianness = binary.BigEndian case "chachapoly": noiseEndianness = binary.LittleEndian default: return nil, fmt.Errorf("unknown cipher: %v", ifConfig.Cipher) } var ifce *Interface if !configTest { ifce, err = NewInterface(ctx, ifConfig) if err != nil { return nil, fmt.Errorf("failed to initialize interface: %s", err) } // TODO: Better way to attach these, probably want a new interface in InterfaceConfig // I don't want to make this initial commit too far-reaching though ifce.writers = udpConns ifce.RegisterConfigChangeCallbacks(c) ifce.reloadSendRecvError(c) go handshakeManager.Run(ctx, ifce) go lightHouse.LhUpdateWorker(ctx, ifce) } // TODO - stats third-party modules start uncancellable goroutines. Update those libs to accept // a context so that they can exit when the context is Done. statsStart, err := startStats(l, c, buildVersion, configTest) if err != nil { return nil, util.NewContextualError("Failed to start stats emitter", nil, err) } if configTest { return nil, nil } //TODO: check if we _should_ be emitting stats go ifce.emitStats(ctx, c.GetDuration("stats.interval", time.Second*10)) attachCommands(l, c, ssh, hostMap, handshakeManager.pendingHostMap, lightHouse, ifce) // Start DNS server last to allow using the nebula IP as lighthouse.dns.host var dnsStart func() if lightHouse.amLighthouse && serveDns { l.Debugln("Starting dns server") dnsStart = dnsMain(l, hostMap, c) } return &Control{ifce, l, cancel, sshStart, statsStart, dnsStart}, nil } nebula-1.6.1+dfsg/message_metrics.go000066400000000000000000000052621434072716400174220ustar00rootroot00000000000000package nebula import ( "fmt" "github.com/rcrowley/go-metrics" "github.com/slackhq/nebula/header" ) //TODO: this can probably move into the header package type MessageMetrics struct { rx [][]metrics.Counter tx [][]metrics.Counter rxUnknown metrics.Counter txUnknown metrics.Counter } func (m *MessageMetrics) Rx(t header.MessageType, s header.MessageSubType, i int64) { if m != nil { if t >= 0 && int(t) < len(m.rx) && s >= 0 && int(s) < len(m.rx[t]) { m.rx[t][s].Inc(i) } else if m.rxUnknown != nil { m.rxUnknown.Inc(i) } } } func (m *MessageMetrics) Tx(t header.MessageType, s header.MessageSubType, i int64) { if m != nil { if t >= 0 && int(t) < len(m.tx) && s >= 0 && int(s) < len(m.tx[t]) { m.tx[t][s].Inc(i) } else if m.txUnknown != nil { m.txUnknown.Inc(i) } } } func newMessageMetrics() *MessageMetrics { gen := func(t string) [][]metrics.Counter { return [][]metrics.Counter{ { metrics.GetOrRegisterCounter(fmt.Sprintf("messages.%s.handshake_ixpsk0", t), nil), }, nil, {metrics.GetOrRegisterCounter(fmt.Sprintf("messages.%s.recv_error", t), nil)}, {metrics.GetOrRegisterCounter(fmt.Sprintf("messages.%s.lighthouse", t), nil)}, { metrics.GetOrRegisterCounter(fmt.Sprintf("messages.%s.test_request", t), nil), metrics.GetOrRegisterCounter(fmt.Sprintf("messages.%s.test_response", t), nil), }, {metrics.GetOrRegisterCounter(fmt.Sprintf("messages.%s.close_tunnel", t), nil)}, } } return &MessageMetrics{ rx: gen("rx"), tx: gen("tx"), rxUnknown: metrics.GetOrRegisterCounter("messages.rx.other", nil), txUnknown: metrics.GetOrRegisterCounter("messages.tx.other", nil), } } // Historically we only recorded recv_error, so this is backwards compat func newMessageMetricsOnlyRecvError() *MessageMetrics { gen := func(t string) [][]metrics.Counter { return [][]metrics.Counter{ nil, nil, {metrics.GetOrRegisterCounter(fmt.Sprintf("messages.%s.recv_error", t), nil)}, } } return &MessageMetrics{ rx: gen("rx"), tx: gen("tx"), } } func newLighthouseMetrics() *MessageMetrics { gen := func(t string) [][]metrics.Counter { h := make([][]metrics.Counter, len(NebulaMeta_MessageType_name)) used := []NebulaMeta_MessageType{ NebulaMeta_HostQuery, NebulaMeta_HostQueryReply, NebulaMeta_HostUpdateNotification, NebulaMeta_HostPunchNotification, } for _, i := range used { h[i] = []metrics.Counter{metrics.GetOrRegisterCounter(fmt.Sprintf("lighthouse.%s.%s", t, i.String()), nil)} } return h } return &MessageMetrics{ rx: gen("rx"), tx: gen("tx"), rxUnknown: metrics.GetOrRegisterCounter("lighthouse.rx.other", nil), txUnknown: metrics.GetOrRegisterCounter("lighthouse.tx.other", nil), } } nebula-1.6.1+dfsg/metadata.go000066400000000000000000000004021434072716400160170ustar00rootroot00000000000000package nebula /* import ( proto "google.golang.org/protobuf/proto" ) func HandleMetaProto(p []byte) { m := &NebulaMeta{} err := proto.Unmarshal(p, m) if err != nil { l.Debugf("problem unmarshaling meta message: %s", err) } //fmt.Println(m) } */ nebula-1.6.1+dfsg/nebula.pb.go000066400000000000000000001577371434072716400161350ustar00rootroot00000000000000// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: nebula.proto package nebula import ( fmt "fmt" proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type NebulaMeta_MessageType int32 const ( NebulaMeta_None NebulaMeta_MessageType = 0 NebulaMeta_HostQuery NebulaMeta_MessageType = 1 NebulaMeta_HostQueryReply NebulaMeta_MessageType = 2 NebulaMeta_HostUpdateNotification NebulaMeta_MessageType = 3 NebulaMeta_HostMovedNotification NebulaMeta_MessageType = 4 NebulaMeta_HostPunchNotification NebulaMeta_MessageType = 5 NebulaMeta_HostWhoami NebulaMeta_MessageType = 6 NebulaMeta_HostWhoamiReply NebulaMeta_MessageType = 7 NebulaMeta_PathCheck NebulaMeta_MessageType = 8 NebulaMeta_PathCheckReply NebulaMeta_MessageType = 9 ) var NebulaMeta_MessageType_name = map[int32]string{ 0: "None", 1: "HostQuery", 2: "HostQueryReply", 3: "HostUpdateNotification", 4: "HostMovedNotification", 5: "HostPunchNotification", 6: "HostWhoami", 7: "HostWhoamiReply", 8: "PathCheck", 9: "PathCheckReply", } var NebulaMeta_MessageType_value = map[string]int32{ "None": 0, "HostQuery": 1, "HostQueryReply": 2, "HostUpdateNotification": 3, "HostMovedNotification": 4, "HostPunchNotification": 5, "HostWhoami": 6, "HostWhoamiReply": 7, "PathCheck": 8, "PathCheckReply": 9, } func (x NebulaMeta_MessageType) String() string { return proto.EnumName(NebulaMeta_MessageType_name, int32(x)) } func (NebulaMeta_MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_2d65afa7693df5ef, []int{0, 0} } type NebulaPing_MessageType int32 const ( NebulaPing_Ping NebulaPing_MessageType = 0 NebulaPing_Reply NebulaPing_MessageType = 1 ) var NebulaPing_MessageType_name = map[int32]string{ 0: "Ping", 1: "Reply", } var NebulaPing_MessageType_value = map[string]int32{ "Ping": 0, "Reply": 1, } func (x NebulaPing_MessageType) String() string { return proto.EnumName(NebulaPing_MessageType_name, int32(x)) } func (NebulaPing_MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_2d65afa7693df5ef, []int{4, 0} } type NebulaControl_MessageType int32 const ( NebulaControl_None NebulaControl_MessageType = 0 NebulaControl_CreateRelayRequest NebulaControl_MessageType = 1 NebulaControl_CreateRelayResponse NebulaControl_MessageType = 2 ) var NebulaControl_MessageType_name = map[int32]string{ 0: "None", 1: "CreateRelayRequest", 2: "CreateRelayResponse", } var NebulaControl_MessageType_value = map[string]int32{ "None": 0, "CreateRelayRequest": 1, "CreateRelayResponse": 2, } func (x NebulaControl_MessageType) String() string { return proto.EnumName(NebulaControl_MessageType_name, int32(x)) } func (NebulaControl_MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_2d65afa7693df5ef, []int{7, 0} } type NebulaMeta struct { Type NebulaMeta_MessageType `protobuf:"varint,1,opt,name=Type,proto3,enum=nebula.NebulaMeta_MessageType" json:"Type,omitempty"` Details *NebulaMetaDetails `protobuf:"bytes,2,opt,name=Details,proto3" json:"Details,omitempty"` } func (m *NebulaMeta) Reset() { *m = NebulaMeta{} } func (m *NebulaMeta) String() string { return proto.CompactTextString(m) } func (*NebulaMeta) ProtoMessage() {} func (*NebulaMeta) Descriptor() ([]byte, []int) { return fileDescriptor_2d65afa7693df5ef, []int{0} } func (m *NebulaMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *NebulaMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_NebulaMeta.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *NebulaMeta) XXX_Merge(src proto.Message) { xxx_messageInfo_NebulaMeta.Merge(m, src) } func (m *NebulaMeta) XXX_Size() int { return m.Size() } func (m *NebulaMeta) XXX_DiscardUnknown() { xxx_messageInfo_NebulaMeta.DiscardUnknown(m) } var xxx_messageInfo_NebulaMeta proto.InternalMessageInfo func (m *NebulaMeta) GetType() NebulaMeta_MessageType { if m != nil { return m.Type } return NebulaMeta_None } func (m *NebulaMeta) GetDetails() *NebulaMetaDetails { if m != nil { return m.Details } return nil } type NebulaMetaDetails struct { VpnIp uint32 `protobuf:"varint,1,opt,name=VpnIp,proto3" json:"VpnIp,omitempty"` Ip4AndPorts []*Ip4AndPort `protobuf:"bytes,2,rep,name=Ip4AndPorts,proto3" json:"Ip4AndPorts,omitempty"` Ip6AndPorts []*Ip6AndPort `protobuf:"bytes,4,rep,name=Ip6AndPorts,proto3" json:"Ip6AndPorts,omitempty"` RelayVpnIp []uint32 `protobuf:"varint,5,rep,packed,name=RelayVpnIp,proto3" json:"RelayVpnIp,omitempty"` Counter uint32 `protobuf:"varint,3,opt,name=counter,proto3" json:"counter,omitempty"` } func (m *NebulaMetaDetails) Reset() { *m = NebulaMetaDetails{} } func (m *NebulaMetaDetails) String() string { return proto.CompactTextString(m) } func (*NebulaMetaDetails) ProtoMessage() {} func (*NebulaMetaDetails) Descriptor() ([]byte, []int) { return fileDescriptor_2d65afa7693df5ef, []int{1} } func (m *NebulaMetaDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *NebulaMetaDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_NebulaMetaDetails.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *NebulaMetaDetails) XXX_Merge(src proto.Message) { xxx_messageInfo_NebulaMetaDetails.Merge(m, src) } func (m *NebulaMetaDetails) XXX_Size() int { return m.Size() } func (m *NebulaMetaDetails) XXX_DiscardUnknown() { xxx_messageInfo_NebulaMetaDetails.DiscardUnknown(m) } var xxx_messageInfo_NebulaMetaDetails proto.InternalMessageInfo func (m *NebulaMetaDetails) GetVpnIp() uint32 { if m != nil { return m.VpnIp } return 0 } func (m *NebulaMetaDetails) GetIp4AndPorts() []*Ip4AndPort { if m != nil { return m.Ip4AndPorts } return nil } func (m *NebulaMetaDetails) GetIp6AndPorts() []*Ip6AndPort { if m != nil { return m.Ip6AndPorts } return nil } func (m *NebulaMetaDetails) GetRelayVpnIp() []uint32 { if m != nil { return m.RelayVpnIp } return nil } func (m *NebulaMetaDetails) GetCounter() uint32 { if m != nil { return m.Counter } return 0 } type Ip4AndPort struct { Ip uint32 `protobuf:"varint,1,opt,name=Ip,proto3" json:"Ip,omitempty"` Port uint32 `protobuf:"varint,2,opt,name=Port,proto3" json:"Port,omitempty"` } func (m *Ip4AndPort) Reset() { *m = Ip4AndPort{} } func (m *Ip4AndPort) String() string { return proto.CompactTextString(m) } func (*Ip4AndPort) ProtoMessage() {} func (*Ip4AndPort) Descriptor() ([]byte, []int) { return fileDescriptor_2d65afa7693df5ef, []int{2} } func (m *Ip4AndPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *Ip4AndPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Ip4AndPort.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *Ip4AndPort) XXX_Merge(src proto.Message) { xxx_messageInfo_Ip4AndPort.Merge(m, src) } func (m *Ip4AndPort) XXX_Size() int { return m.Size() } func (m *Ip4AndPort) XXX_DiscardUnknown() { xxx_messageInfo_Ip4AndPort.DiscardUnknown(m) } var xxx_messageInfo_Ip4AndPort proto.InternalMessageInfo func (m *Ip4AndPort) GetIp() uint32 { if m != nil { return m.Ip } return 0 } func (m *Ip4AndPort) GetPort() uint32 { if m != nil { return m.Port } return 0 } type Ip6AndPort struct { Hi uint64 `protobuf:"varint,1,opt,name=Hi,proto3" json:"Hi,omitempty"` Lo uint64 `protobuf:"varint,2,opt,name=Lo,proto3" json:"Lo,omitempty"` Port uint32 `protobuf:"varint,3,opt,name=Port,proto3" json:"Port,omitempty"` } func (m *Ip6AndPort) Reset() { *m = Ip6AndPort{} } func (m *Ip6AndPort) String() string { return proto.CompactTextString(m) } func (*Ip6AndPort) ProtoMessage() {} func (*Ip6AndPort) Descriptor() ([]byte, []int) { return fileDescriptor_2d65afa7693df5ef, []int{3} } func (m *Ip6AndPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *Ip6AndPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Ip6AndPort.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *Ip6AndPort) XXX_Merge(src proto.Message) { xxx_messageInfo_Ip6AndPort.Merge(m, src) } func (m *Ip6AndPort) XXX_Size() int { return m.Size() } func (m *Ip6AndPort) XXX_DiscardUnknown() { xxx_messageInfo_Ip6AndPort.DiscardUnknown(m) } var xxx_messageInfo_Ip6AndPort proto.InternalMessageInfo func (m *Ip6AndPort) GetHi() uint64 { if m != nil { return m.Hi } return 0 } func (m *Ip6AndPort) GetLo() uint64 { if m != nil { return m.Lo } return 0 } func (m *Ip6AndPort) GetPort() uint32 { if m != nil { return m.Port } return 0 } type NebulaPing struct { Type NebulaPing_MessageType `protobuf:"varint,1,opt,name=Type,proto3,enum=nebula.NebulaPing_MessageType" json:"Type,omitempty"` Time uint64 `protobuf:"varint,2,opt,name=Time,proto3" json:"Time,omitempty"` } func (m *NebulaPing) Reset() { *m = NebulaPing{} } func (m *NebulaPing) String() string { return proto.CompactTextString(m) } func (*NebulaPing) ProtoMessage() {} func (*NebulaPing) Descriptor() ([]byte, []int) { return fileDescriptor_2d65afa7693df5ef, []int{4} } func (m *NebulaPing) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *NebulaPing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_NebulaPing.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *NebulaPing) XXX_Merge(src proto.Message) { xxx_messageInfo_NebulaPing.Merge(m, src) } func (m *NebulaPing) XXX_Size() int { return m.Size() } func (m *NebulaPing) XXX_DiscardUnknown() { xxx_messageInfo_NebulaPing.DiscardUnknown(m) } var xxx_messageInfo_NebulaPing proto.InternalMessageInfo func (m *NebulaPing) GetType() NebulaPing_MessageType { if m != nil { return m.Type } return NebulaPing_Ping } func (m *NebulaPing) GetTime() uint64 { if m != nil { return m.Time } return 0 } type NebulaHandshake struct { Details *NebulaHandshakeDetails `protobuf:"bytes,1,opt,name=Details,proto3" json:"Details,omitempty"` Hmac []byte `protobuf:"bytes,2,opt,name=Hmac,proto3" json:"Hmac,omitempty"` } func (m *NebulaHandshake) Reset() { *m = NebulaHandshake{} } func (m *NebulaHandshake) String() string { return proto.CompactTextString(m) } func (*NebulaHandshake) ProtoMessage() {} func (*NebulaHandshake) Descriptor() ([]byte, []int) { return fileDescriptor_2d65afa7693df5ef, []int{5} } func (m *NebulaHandshake) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *NebulaHandshake) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_NebulaHandshake.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *NebulaHandshake) XXX_Merge(src proto.Message) { xxx_messageInfo_NebulaHandshake.Merge(m, src) } func (m *NebulaHandshake) XXX_Size() int { return m.Size() } func (m *NebulaHandshake) XXX_DiscardUnknown() { xxx_messageInfo_NebulaHandshake.DiscardUnknown(m) } var xxx_messageInfo_NebulaHandshake proto.InternalMessageInfo func (m *NebulaHandshake) GetDetails() *NebulaHandshakeDetails { if m != nil { return m.Details } return nil } func (m *NebulaHandshake) GetHmac() []byte { if m != nil { return m.Hmac } return nil } type NebulaHandshakeDetails struct { Cert []byte `protobuf:"bytes,1,opt,name=Cert,proto3" json:"Cert,omitempty"` InitiatorIndex uint32 `protobuf:"varint,2,opt,name=InitiatorIndex,proto3" json:"InitiatorIndex,omitempty"` ResponderIndex uint32 `protobuf:"varint,3,opt,name=ResponderIndex,proto3" json:"ResponderIndex,omitempty"` Cookie uint64 `protobuf:"varint,4,opt,name=Cookie,proto3" json:"Cookie,omitempty"` Time uint64 `protobuf:"varint,5,opt,name=Time,proto3" json:"Time,omitempty"` } func (m *NebulaHandshakeDetails) Reset() { *m = NebulaHandshakeDetails{} } func (m *NebulaHandshakeDetails) String() string { return proto.CompactTextString(m) } func (*NebulaHandshakeDetails) ProtoMessage() {} func (*NebulaHandshakeDetails) Descriptor() ([]byte, []int) { return fileDescriptor_2d65afa7693df5ef, []int{6} } func (m *NebulaHandshakeDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *NebulaHandshakeDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_NebulaHandshakeDetails.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *NebulaHandshakeDetails) XXX_Merge(src proto.Message) { xxx_messageInfo_NebulaHandshakeDetails.Merge(m, src) } func (m *NebulaHandshakeDetails) XXX_Size() int { return m.Size() } func (m *NebulaHandshakeDetails) XXX_DiscardUnknown() { xxx_messageInfo_NebulaHandshakeDetails.DiscardUnknown(m) } var xxx_messageInfo_NebulaHandshakeDetails proto.InternalMessageInfo func (m *NebulaHandshakeDetails) GetCert() []byte { if m != nil { return m.Cert } return nil } func (m *NebulaHandshakeDetails) GetInitiatorIndex() uint32 { if m != nil { return m.InitiatorIndex } return 0 } func (m *NebulaHandshakeDetails) GetResponderIndex() uint32 { if m != nil { return m.ResponderIndex } return 0 } func (m *NebulaHandshakeDetails) GetCookie() uint64 { if m != nil { return m.Cookie } return 0 } func (m *NebulaHandshakeDetails) GetTime() uint64 { if m != nil { return m.Time } return 0 } type NebulaControl struct { Type NebulaControl_MessageType `protobuf:"varint,1,opt,name=Type,proto3,enum=nebula.NebulaControl_MessageType" json:"Type,omitempty"` InitiatorRelayIndex uint32 `protobuf:"varint,2,opt,name=InitiatorRelayIndex,proto3" json:"InitiatorRelayIndex,omitempty"` ResponderRelayIndex uint32 `protobuf:"varint,3,opt,name=ResponderRelayIndex,proto3" json:"ResponderRelayIndex,omitempty"` RelayToIp uint32 `protobuf:"varint,4,opt,name=RelayToIp,proto3" json:"RelayToIp,omitempty"` RelayFromIp uint32 `protobuf:"varint,5,opt,name=RelayFromIp,proto3" json:"RelayFromIp,omitempty"` } func (m *NebulaControl) Reset() { *m = NebulaControl{} } func (m *NebulaControl) String() string { return proto.CompactTextString(m) } func (*NebulaControl) ProtoMessage() {} func (*NebulaControl) Descriptor() ([]byte, []int) { return fileDescriptor_2d65afa7693df5ef, []int{7} } func (m *NebulaControl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *NebulaControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_NebulaControl.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *NebulaControl) XXX_Merge(src proto.Message) { xxx_messageInfo_NebulaControl.Merge(m, src) } func (m *NebulaControl) XXX_Size() int { return m.Size() } func (m *NebulaControl) XXX_DiscardUnknown() { xxx_messageInfo_NebulaControl.DiscardUnknown(m) } var xxx_messageInfo_NebulaControl proto.InternalMessageInfo func (m *NebulaControl) GetType() NebulaControl_MessageType { if m != nil { return m.Type } return NebulaControl_None } func (m *NebulaControl) GetInitiatorRelayIndex() uint32 { if m != nil { return m.InitiatorRelayIndex } return 0 } func (m *NebulaControl) GetResponderRelayIndex() uint32 { if m != nil { return m.ResponderRelayIndex } return 0 } func (m *NebulaControl) GetRelayToIp() uint32 { if m != nil { return m.RelayToIp } return 0 } func (m *NebulaControl) GetRelayFromIp() uint32 { if m != nil { return m.RelayFromIp } return 0 } func init() { proto.RegisterEnum("nebula.NebulaMeta_MessageType", NebulaMeta_MessageType_name, NebulaMeta_MessageType_value) proto.RegisterEnum("nebula.NebulaPing_MessageType", NebulaPing_MessageType_name, NebulaPing_MessageType_value) proto.RegisterEnum("nebula.NebulaControl_MessageType", NebulaControl_MessageType_name, NebulaControl_MessageType_value) proto.RegisterType((*NebulaMeta)(nil), "nebula.NebulaMeta") proto.RegisterType((*NebulaMetaDetails)(nil), "nebula.NebulaMetaDetails") proto.RegisterType((*Ip4AndPort)(nil), "nebula.Ip4AndPort") proto.RegisterType((*Ip6AndPort)(nil), "nebula.Ip6AndPort") proto.RegisterType((*NebulaPing)(nil), "nebula.NebulaPing") proto.RegisterType((*NebulaHandshake)(nil), "nebula.NebulaHandshake") proto.RegisterType((*NebulaHandshakeDetails)(nil), "nebula.NebulaHandshakeDetails") proto.RegisterType((*NebulaControl)(nil), "nebula.NebulaControl") } func init() { proto.RegisterFile("nebula.proto", fileDescriptor_2d65afa7693df5ef) } var fileDescriptor_2d65afa7693df5ef = []byte{ // 696 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xcd, 0x6e, 0xd3, 0x4a, 0x14, 0x8e, 0x1d, 0xe7, 0xef, 0xa4, 0x49, 0x7d, 0x4f, 0xef, 0xcd, 0x4d, 0xaf, 0xae, 0xac, 0xe0, 0x05, 0xca, 0x2a, 0xad, 0xd2, 0x52, 0xb1, 0x04, 0x82, 0x50, 0x52, 0xb5, 0x55, 0x18, 0x15, 0x90, 0xd8, 0xa0, 0x69, 0x32, 0xd4, 0x56, 0x12, 0x8f, 0x6b, 0x4f, 0x50, 0xf3, 0x16, 0x3c, 0x4c, 0x1f, 0x82, 0x05, 0x12, 0x5d, 0xb0, 0x60, 0x89, 0xda, 0x17, 0x41, 0x33, 0x76, 0x6c, 0x27, 0x0d, 0xec, 0xce, 0xcf, 0xf7, 0xcd, 0x7c, 0xe7, 0x9b, 0x63, 0xc3, 0x96, 0xc7, 0x2e, 0xe6, 0x53, 0xda, 0xf1, 0x03, 0x2e, 0x38, 0x16, 0xa3, 0xcc, 0xfe, 0xaa, 0x03, 0x9c, 0xa9, 0xf0, 0x94, 0x09, 0x8a, 0x5d, 0x30, 0xce, 0x17, 0x3e, 0x6b, 0x6a, 0x2d, 0xad, 0x5d, 0xef, 0x5a, 0x9d, 0x98, 0x93, 0x22, 0x3a, 0xa7, 0x2c, 0x0c, 0xe9, 0x25, 0x93, 0x28, 0xa2, 0xb0, 0x78, 0x00, 0xa5, 0x97, 0x4c, 0x50, 0x77, 0x1a, 0x36, 0xf5, 0x96, 0xd6, 0xae, 0x76, 0x77, 0x1f, 0xd2, 0x62, 0x00, 0x59, 0x22, 0xed, 0xef, 0x1a, 0x54, 0x33, 0x47, 0x61, 0x19, 0x8c, 0x33, 0xee, 0x31, 0x33, 0x87, 0x35, 0xa8, 0xf4, 0x79, 0x28, 0x5e, 0xcf, 0x59, 0xb0, 0x30, 0x35, 0x44, 0xa8, 0x27, 0x29, 0x61, 0xfe, 0x74, 0x61, 0xea, 0xf8, 0x1f, 0x34, 0x64, 0xed, 0x8d, 0x3f, 0xa6, 0x82, 0x9d, 0x71, 0xe1, 0x7e, 0x74, 0x47, 0x54, 0xb8, 0xdc, 0x33, 0xf3, 0xb8, 0x0b, 0xff, 0xc8, 0xde, 0x29, 0xff, 0xc4, 0xc6, 0x2b, 0x2d, 0x63, 0xd9, 0x1a, 0xce, 0xbd, 0x91, 0xb3, 0xd2, 0x2a, 0x60, 0x1d, 0x40, 0xb6, 0xde, 0x39, 0x9c, 0xce, 0x5c, 0xb3, 0x88, 0x3b, 0xb0, 0x9d, 0xe6, 0xd1, 0xb5, 0x25, 0xa9, 0x6c, 0x48, 0x85, 0xd3, 0x73, 0xd8, 0x68, 0x62, 0x96, 0xa5, 0xb2, 0x24, 0x8d, 0x20, 0x15, 0xfb, 0x9b, 0x06, 0x7f, 0x3d, 0x98, 0x1a, 0xff, 0x86, 0xc2, 0x5b, 0xdf, 0x1b, 0xf8, 0xca, 0xd6, 0x1a, 0x89, 0x12, 0x3c, 0x84, 0xea, 0xc0, 0x3f, 0x7c, 0xee, 0x8d, 0x87, 0x3c, 0x10, 0xd2, 0xbb, 0x7c, 0xbb, 0xda, 0xc5, 0xa5, 0x77, 0x69, 0x8b, 0x64, 0x61, 0x11, 0xeb, 0x28, 0x61, 0x19, 0xeb, 0xac, 0xa3, 0x0c, 0x2b, 0x81, 0xa1, 0x05, 0x40, 0xd8, 0x94, 0x2e, 0x22, 0x19, 0x85, 0x56, 0xbe, 0x5d, 0x23, 0x99, 0x0a, 0x36, 0xa1, 0x34, 0xe2, 0x73, 0x4f, 0xb0, 0xa0, 0x99, 0x57, 0x1a, 0x97, 0xa9, 0xbd, 0x0f, 0x90, 0x5e, 0x8f, 0x75, 0xd0, 0x93, 0x31, 0xf4, 0x81, 0x8f, 0x08, 0x86, 0xac, 0xab, 0x87, 0xaf, 0x11, 0x15, 0xdb, 0xcf, 0x24, 0xe3, 0x28, 0xc3, 0xe8, 0xbb, 0x8a, 0x61, 0x10, 0xbd, 0xef, 0xca, 0xfc, 0x84, 0x2b, 0xbc, 0x41, 0xf4, 0x13, 0x9e, 0x9c, 0x90, 0xcf, 0x9c, 0x70, 0xbd, 0xdc, 0xc9, 0xa1, 0xeb, 0x5d, 0xfe, 0x79, 0x27, 0x25, 0x62, 0xc3, 0x4e, 0x22, 0x18, 0xe7, 0xee, 0x8c, 0xc5, 0xf7, 0xa8, 0xd8, 0xb6, 0x1f, 0x6c, 0x9c, 0x24, 0x9b, 0x39, 0xac, 0x40, 0x21, 0x7a, 0x3f, 0xcd, 0xfe, 0x00, 0xdb, 0xd1, 0xb9, 0x7d, 0xea, 0x8d, 0x43, 0x87, 0x4e, 0x18, 0x3e, 0x4d, 0xd7, 0x5b, 0x53, 0xeb, 0xbd, 0xa6, 0x20, 0x41, 0xae, 0xef, 0xb8, 0x14, 0xd1, 0x9f, 0xd1, 0x91, 0x12, 0xb1, 0x45, 0x54, 0x6c, 0xdf, 0x68, 0xd0, 0xd8, 0xcc, 0x93, 0xf0, 0x1e, 0x0b, 0x84, 0xba, 0x65, 0x8b, 0xa8, 0x18, 0x1f, 0x43, 0x7d, 0xe0, 0xb9, 0xc2, 0xa5, 0x82, 0x07, 0x03, 0x6f, 0xcc, 0xae, 0x63, 0xa7, 0xd7, 0xaa, 0x12, 0x47, 0x58, 0xe8, 0x73, 0x6f, 0xcc, 0x62, 0x5c, 0xe4, 0xe7, 0x5a, 0x15, 0x1b, 0x50, 0xec, 0x71, 0x3e, 0x71, 0x59, 0xd3, 0x50, 0xce, 0xc4, 0x59, 0xe2, 0x57, 0x21, 0xf5, 0xeb, 0xd8, 0x28, 0x17, 0xcd, 0xd2, 0xb1, 0x51, 0x2e, 0x99, 0x65, 0xfb, 0x46, 0x87, 0x5a, 0x24, 0xbb, 0xc7, 0x3d, 0x11, 0xf0, 0x29, 0x3e, 0x59, 0x79, 0x95, 0x47, 0xab, 0x9e, 0xc4, 0xa0, 0x0d, 0x0f, 0xb3, 0x0f, 0x3b, 0x89, 0x74, 0xb5, 0x7f, 0xd9, 0xa9, 0x36, 0xb5, 0x24, 0x23, 0x19, 0x22, 0xc3, 0x88, 0xe6, 0xdb, 0xd4, 0xc2, 0xff, 0xa1, 0xa2, 0xb2, 0x73, 0x3e, 0xf0, 0xd5, 0x9c, 0x35, 0x92, 0x16, 0xb0, 0x05, 0x55, 0x95, 0xbc, 0x0a, 0xf8, 0x4c, 0x7d, 0x0b, 0xb2, 0x9f, 0x2d, 0xd9, 0xfd, 0xdf, 0xfd, 0x9a, 0x1a, 0x80, 0xbd, 0x80, 0x51, 0xc1, 0x14, 0x9a, 0xb0, 0xab, 0x39, 0x0b, 0x85, 0xa9, 0xe1, 0xbf, 0xb0, 0xb3, 0x52, 0x97, 0x92, 0x42, 0x66, 0xea, 0x2f, 0x0e, 0xbe, 0xdc, 0x59, 0xda, 0xed, 0x9d, 0xa5, 0xfd, 0xbc, 0xb3, 0xb4, 0xcf, 0xf7, 0x56, 0xee, 0xf6, 0xde, 0xca, 0xfd, 0xb8, 0xb7, 0x72, 0xef, 0x77, 0x2f, 0x5d, 0xe1, 0xcc, 0x2f, 0x3a, 0x23, 0x3e, 0xdb, 0x0b, 0xa7, 0x74, 0x34, 0x71, 0xae, 0xf6, 0x22, 0x0b, 0x2f, 0x8a, 0xea, 0x0f, 0x7d, 0xf0, 0x2b, 0x00, 0x00, 0xff, 0xff, 0xcd, 0xd7, 0xbe, 0xd5, 0xb1, 0x05, 0x00, 0x00, } func (m *NebulaMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *NebulaMeta) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *NebulaMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.Details != nil { { size, err := m.Details.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintNebula(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } if m.Type != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.Type)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *NebulaMetaDetails) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *NebulaMetaDetails) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *NebulaMetaDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.RelayVpnIp) > 0 { dAtA3 := make([]byte, len(m.RelayVpnIp)*10) var j2 int for _, num := range m.RelayVpnIp { for num >= 1<<7 { dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 j2++ } dAtA3[j2] = uint8(num) j2++ } i -= j2 copy(dAtA[i:], dAtA3[:j2]) i = encodeVarintNebula(dAtA, i, uint64(j2)) i-- dAtA[i] = 0x2a } if len(m.Ip6AndPorts) > 0 { for iNdEx := len(m.Ip6AndPorts) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Ip6AndPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintNebula(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 } } if m.Counter != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.Counter)) i-- dAtA[i] = 0x18 } if len(m.Ip4AndPorts) > 0 { for iNdEx := len(m.Ip4AndPorts) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Ip4AndPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintNebula(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } } if m.VpnIp != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.VpnIp)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *Ip4AndPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Ip4AndPort) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Ip4AndPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.Port != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.Port)) i-- dAtA[i] = 0x10 } if m.Ip != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.Ip)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *Ip6AndPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Ip6AndPort) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Ip6AndPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.Port != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.Port)) i-- dAtA[i] = 0x18 } if m.Lo != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.Lo)) i-- dAtA[i] = 0x10 } if m.Hi != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.Hi)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *NebulaPing) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *NebulaPing) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *NebulaPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.Time != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.Time)) i-- dAtA[i] = 0x10 } if m.Type != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.Type)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *NebulaHandshake) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *NebulaHandshake) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *NebulaHandshake) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Hmac) > 0 { i -= len(m.Hmac) copy(dAtA[i:], m.Hmac) i = encodeVarintNebula(dAtA, i, uint64(len(m.Hmac))) i-- dAtA[i] = 0x12 } if m.Details != nil { { size, err := m.Details.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintNebula(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *NebulaHandshakeDetails) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *NebulaHandshakeDetails) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *NebulaHandshakeDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.Time != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.Time)) i-- dAtA[i] = 0x28 } if m.Cookie != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.Cookie)) i-- dAtA[i] = 0x20 } if m.ResponderIndex != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.ResponderIndex)) i-- dAtA[i] = 0x18 } if m.InitiatorIndex != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.InitiatorIndex)) i-- dAtA[i] = 0x10 } if len(m.Cert) > 0 { i -= len(m.Cert) copy(dAtA[i:], m.Cert) i = encodeVarintNebula(dAtA, i, uint64(len(m.Cert))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *NebulaControl) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *NebulaControl) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *NebulaControl) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.RelayFromIp != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.RelayFromIp)) i-- dAtA[i] = 0x28 } if m.RelayToIp != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.RelayToIp)) i-- dAtA[i] = 0x20 } if m.ResponderRelayIndex != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.ResponderRelayIndex)) i-- dAtA[i] = 0x18 } if m.InitiatorRelayIndex != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.InitiatorRelayIndex)) i-- dAtA[i] = 0x10 } if m.Type != 0 { i = encodeVarintNebula(dAtA, i, uint64(m.Type)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func encodeVarintNebula(dAtA []byte, offset int, v uint64) int { offset -= sovNebula(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func (m *NebulaMeta) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Type != 0 { n += 1 + sovNebula(uint64(m.Type)) } if m.Details != nil { l = m.Details.Size() n += 1 + l + sovNebula(uint64(l)) } return n } func (m *NebulaMetaDetails) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.VpnIp != 0 { n += 1 + sovNebula(uint64(m.VpnIp)) } if len(m.Ip4AndPorts) > 0 { for _, e := range m.Ip4AndPorts { l = e.Size() n += 1 + l + sovNebula(uint64(l)) } } if m.Counter != 0 { n += 1 + sovNebula(uint64(m.Counter)) } if len(m.Ip6AndPorts) > 0 { for _, e := range m.Ip6AndPorts { l = e.Size() n += 1 + l + sovNebula(uint64(l)) } } if len(m.RelayVpnIp) > 0 { l = 0 for _, e := range m.RelayVpnIp { l += sovNebula(uint64(e)) } n += 1 + sovNebula(uint64(l)) + l } return n } func (m *Ip4AndPort) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Ip != 0 { n += 1 + sovNebula(uint64(m.Ip)) } if m.Port != 0 { n += 1 + sovNebula(uint64(m.Port)) } return n } func (m *Ip6AndPort) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Hi != 0 { n += 1 + sovNebula(uint64(m.Hi)) } if m.Lo != 0 { n += 1 + sovNebula(uint64(m.Lo)) } if m.Port != 0 { n += 1 + sovNebula(uint64(m.Port)) } return n } func (m *NebulaPing) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Type != 0 { n += 1 + sovNebula(uint64(m.Type)) } if m.Time != 0 { n += 1 + sovNebula(uint64(m.Time)) } return n } func (m *NebulaHandshake) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Details != nil { l = m.Details.Size() n += 1 + l + sovNebula(uint64(l)) } l = len(m.Hmac) if l > 0 { n += 1 + l + sovNebula(uint64(l)) } return n } func (m *NebulaHandshakeDetails) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Cert) if l > 0 { n += 1 + l + sovNebula(uint64(l)) } if m.InitiatorIndex != 0 { n += 1 + sovNebula(uint64(m.InitiatorIndex)) } if m.ResponderIndex != 0 { n += 1 + sovNebula(uint64(m.ResponderIndex)) } if m.Cookie != 0 { n += 1 + sovNebula(uint64(m.Cookie)) } if m.Time != 0 { n += 1 + sovNebula(uint64(m.Time)) } return n } func (m *NebulaControl) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Type != 0 { n += 1 + sovNebula(uint64(m.Type)) } if m.InitiatorRelayIndex != 0 { n += 1 + sovNebula(uint64(m.InitiatorRelayIndex)) } if m.ResponderRelayIndex != 0 { n += 1 + sovNebula(uint64(m.ResponderRelayIndex)) } if m.RelayToIp != 0 { n += 1 + sovNebula(uint64(m.RelayToIp)) } if m.RelayFromIp != 0 { n += 1 + sovNebula(uint64(m.RelayFromIp)) } return n } func sovNebula(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozNebula(x uint64) (n int) { return sovNebula(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *NebulaMeta) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: NebulaMeta: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: NebulaMeta: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Type |= NebulaMeta_MessageType(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthNebula } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthNebula } if postIndex > l { return io.ErrUnexpectedEOF } if m.Details == nil { m.Details = &NebulaMetaDetails{} } if err := m.Details.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipNebula(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthNebula } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *NebulaMetaDetails) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: NebulaMetaDetails: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: NebulaMetaDetails: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field VpnIp", wireType) } m.VpnIp = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.VpnIp |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ip4AndPorts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthNebula } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthNebula } if postIndex > l { return io.ErrUnexpectedEOF } m.Ip4AndPorts = append(m.Ip4AndPorts, &Ip4AndPort{}) if err := m.Ip4AndPorts[len(m.Ip4AndPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) } m.Counter = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Counter |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ip6AndPorts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthNebula } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthNebula } if postIndex > l { return io.ErrUnexpectedEOF } m.Ip6AndPorts = append(m.Ip6AndPorts, &Ip6AndPort{}) if err := m.Ip6AndPorts[len(m.Ip6AndPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType == 0 { var v uint32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= uint32(b&0x7F) << shift if b < 0x80 { break } } m.RelayVpnIp = append(m.RelayVpnIp, v) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ packedLen |= int(b&0x7F) << shift if b < 0x80 { break } } if packedLen < 0 { return ErrInvalidLengthNebula } postIndex := iNdEx + packedLen if postIndex < 0 { return ErrInvalidLengthNebula } if postIndex > l { return io.ErrUnexpectedEOF } var elementCount int var count int for _, integer := range dAtA[iNdEx:postIndex] { if integer < 128 { count++ } } elementCount = count if elementCount != 0 && len(m.RelayVpnIp) == 0 { m.RelayVpnIp = make([]uint32, 0, elementCount) } for iNdEx < postIndex { var v uint32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= uint32(b&0x7F) << shift if b < 0x80 { break } } m.RelayVpnIp = append(m.RelayVpnIp, v) } } else { return fmt.Errorf("proto: wrong wireType = %d for field RelayVpnIp", wireType) } default: iNdEx = preIndex skippy, err := skipNebula(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthNebula } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Ip4AndPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Ip4AndPort: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Ip4AndPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Ip", wireType) } m.Ip = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Ip |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Port |= uint32(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipNebula(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthNebula } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Ip6AndPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Ip6AndPort: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Ip6AndPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Hi", wireType) } m.Hi = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Hi |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Lo", wireType) } m.Lo = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Lo |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Port |= uint32(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipNebula(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthNebula } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *NebulaPing) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: NebulaPing: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: NebulaPing: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Type |= NebulaPing_MessageType(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } m.Time = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Time |= uint64(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipNebula(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthNebula } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *NebulaHandshake) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: NebulaHandshake: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: NebulaHandshake: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthNebula } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthNebula } if postIndex > l { return io.ErrUnexpectedEOF } if m.Details == nil { m.Details = &NebulaHandshakeDetails{} } if err := m.Details.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Hmac", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthNebula } postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthNebula } if postIndex > l { return io.ErrUnexpectedEOF } m.Hmac = append(m.Hmac[:0], dAtA[iNdEx:postIndex]...) if m.Hmac == nil { m.Hmac = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipNebula(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthNebula } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *NebulaHandshakeDetails) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: NebulaHandshakeDetails: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: NebulaHandshakeDetails: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cert", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthNebula } postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthNebula } if postIndex > l { return io.ErrUnexpectedEOF } m.Cert = append(m.Cert[:0], dAtA[iNdEx:postIndex]...) if m.Cert == nil { m.Cert = []byte{} } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field InitiatorIndex", wireType) } m.InitiatorIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.InitiatorIndex |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ResponderIndex", wireType) } m.ResponderIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.ResponderIndex |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Cookie", wireType) } m.Cookie = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Cookie |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } m.Time = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Time |= uint64(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipNebula(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthNebula } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *NebulaControl) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: NebulaControl: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: NebulaControl: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Type |= NebulaControl_MessageType(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field InitiatorRelayIndex", wireType) } m.InitiatorRelayIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.InitiatorRelayIndex |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ResponderRelayIndex", wireType) } m.ResponderRelayIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.ResponderRelayIndex |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RelayToIp", wireType) } m.RelayToIp = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.RelayToIp |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RelayFromIp", wireType) } m.RelayFromIp = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowNebula } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.RelayFromIp |= uint32(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipNebula(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthNebula } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipNebula(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowNebula } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowNebula } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } case 1: iNdEx += 8 case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowNebula } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if length < 0 { return 0, ErrInvalidLengthNebula } iNdEx += length case 3: depth++ case 4: if depth == 0 { return 0, ErrUnexpectedEndOfGroupNebula } depth-- case 5: iNdEx += 4 default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } if iNdEx < 0 { return 0, ErrInvalidLengthNebula } if depth == 0 { return iNdEx, nil } } return 0, io.ErrUnexpectedEOF } var ( ErrInvalidLengthNebula = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowNebula = fmt.Errorf("proto: integer overflow") ErrUnexpectedEndOfGroupNebula = fmt.Errorf("proto: unexpected end of group") ) nebula-1.6.1+dfsg/nebula.proto000066400000000000000000000026141434072716400162520ustar00rootroot00000000000000syntax = "proto3"; package nebula; option go_package = "github.com/slackhq/nebula"; message NebulaMeta { enum MessageType { None = 0; HostQuery = 1; HostQueryReply = 2; HostUpdateNotification = 3; HostMovedNotification = 4; HostPunchNotification = 5; HostWhoami = 6; HostWhoamiReply = 7; PathCheck = 8; PathCheckReply = 9; } MessageType Type = 1; NebulaMetaDetails Details = 2; } message NebulaMetaDetails { uint32 VpnIp = 1; repeated Ip4AndPort Ip4AndPorts = 2; repeated Ip6AndPort Ip6AndPorts = 4; repeated uint32 RelayVpnIp = 5; uint32 counter = 3; } message Ip4AndPort { uint32 Ip = 1; uint32 Port = 2; } message Ip6AndPort { uint64 Hi = 1; uint64 Lo = 2; uint32 Port = 3; } message NebulaPing { enum MessageType { Ping = 0; Reply = 1; } MessageType Type = 1; uint64 Time = 2; } message NebulaHandshake { NebulaHandshakeDetails Details = 1; bytes Hmac = 2; } message NebulaHandshakeDetails { bytes Cert = 1; uint32 InitiatorIndex = 2; uint32 ResponderIndex = 3; uint64 Cookie = 4; uint64 Time = 5; // reserved for WIP multiport reserved 6, 7; } message NebulaControl { enum MessageType { None = 0; CreateRelayRequest = 1; CreateRelayResponse = 2; } MessageType Type = 1; uint32 InitiatorRelayIndex = 2; uint32 ResponderRelayIndex = 3; uint32 RelayToIp = 4; uint32 RelayFromIp = 5; } nebula-1.6.1+dfsg/noise.go000066400000000000000000000040171434072716400153620ustar00rootroot00000000000000package nebula import ( "crypto/cipher" "encoding/binary" "errors" "github.com/flynn/noise" ) type endianness interface { PutUint64(b []byte, v uint64) } var noiseEndianness endianness = binary.BigEndian type NebulaCipherState struct { c noise.Cipher //k [32]byte //n uint64 } func NewNebulaCipherState(s *noise.CipherState) *NebulaCipherState { return &NebulaCipherState{c: s.Cipher()} } // EncryptDanger encrypts and authenticates a given payload. // // out is a destination slice to hold the output of the EncryptDanger operation. // - ad is additional data, which will be authenticated and appended to out, but not encrypted. // - plaintext is encrypted, authenticated and appended to out. // - n is a nonce value which must never be re-used with this key. // - nb is a buffer used for temporary storage in the implementation of this call, which should // be re-used by callers to minimize garbage collection. func (s *NebulaCipherState) EncryptDanger(out, ad, plaintext []byte, n uint64, nb []byte) ([]byte, error) { if s != nil { // TODO: Is this okay now that we have made messageCounter atomic? // Alternative may be to split the counter space into ranges //if n <= s.n { // return nil, errors.New("CRITICAL: a duplicate counter value was used") //} //s.n = n nb[0] = 0 nb[1] = 0 nb[2] = 0 nb[3] = 0 noiseEndianness.PutUint64(nb[4:], n) out = s.c.(cipher.AEAD).Seal(out, nb, plaintext, ad) //l.Debugf("Encryption: outlen: %d, nonce: %d, ad: %s, plainlen %d", len(out), n, ad, len(plaintext)) return out, nil } else { return nil, errors.New("no cipher state available to encrypt") } } func (s *NebulaCipherState) DecryptDanger(out, ad, ciphertext []byte, n uint64, nb []byte) ([]byte, error) { if s != nil { nb[0] = 0 nb[1] = 0 nb[2] = 0 nb[3] = 0 noiseEndianness.PutUint64(nb[4:], n) return s.c.(cipher.AEAD).Open(out, nb, ciphertext, ad) } else { return []byte{}, nil } } func (s *NebulaCipherState) Overhead() int { if s != nil { return s.c.(cipher.AEAD).Overhead() } return 0 } nebula-1.6.1+dfsg/outside.go000066400000000000000000000417111434072716400157230ustar00rootroot00000000000000package nebula import ( "encoding/binary" "errors" "fmt" "time" "github.com/flynn/noise" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/firewall" "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/udp" "golang.org/x/net/ipv4" "google.golang.org/protobuf/proto" ) const ( minFwPacketLen = 4 ) func (f *Interface) readOutsidePackets(addr *udp.Addr, via interface{}, out []byte, packet []byte, h *header.H, fwPacket *firewall.Packet, lhf udp.LightHouseHandlerFunc, nb []byte, q int, localCache firewall.ConntrackCache) { err := h.Parse(packet) if err != nil { // TODO: best if we return this and let caller log // TODO: Might be better to send the literal []byte("holepunch") packet and ignore that? // Hole punch packets are 0 or 1 byte big, so lets ignore printing those errors if len(packet) > 1 { f.l.WithField("packet", packet).Infof("Error while parsing inbound packet from %s: %s", addr, err) } return } //l.Error("in packet ", header, packet[HeaderLen:]) if addr != nil { if ip4 := addr.IP.To4(); ip4 != nil { if ipMaskContains(f.lightHouse.myVpnIp, f.lightHouse.myVpnZeros, iputil.VpnIp(binary.BigEndian.Uint32(ip4))) { if f.l.Level >= logrus.DebugLevel { f.l.WithField("udpAddr", addr).Debug("Refusing to process double encrypted packet") } return } } } var hostinfo *HostInfo // verify if we've seen this index before, otherwise respond to the handshake initiation if h.Type == header.Message && h.Subtype == header.MessageRelay { hostinfo, _ = f.hostMap.QueryRelayIndex(h.RemoteIndex) } else { hostinfo, _ = f.hostMap.QueryIndex(h.RemoteIndex) } var ci *ConnectionState if hostinfo != nil { ci = hostinfo.ConnectionState } switch h.Type { case header.Message: // TODO handleEncrypted sends directly to addr on error. Handle this in the tunneling case. if !f.handleEncrypted(ci, addr, h) { return } switch h.Subtype { case header.MessageNone: f.decryptToTun(hostinfo, h.MessageCounter, out, packet, fwPacket, nb, q, localCache) case header.MessageRelay: // The entire body is sent as AD, not encrypted. // The packet consists of a 16-byte parsed Nebula header, Associated Data-protected payload, and a trailing 16-byte AEAD signature value. // The packet is guaranteed to be at least 16 bytes at this point, b/c it got past the h.Parse() call above. If it's // otherwise malformed (meaning, there is no trailing 16 byte AEAD value), then this will result in at worst a 0-length slice // which will gracefully fail in the DecryptDanger call. signedPayload := packet[:len(packet)-hostinfo.ConnectionState.dKey.Overhead()] signatureValue := packet[len(packet)-hostinfo.ConnectionState.dKey.Overhead():] out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, signedPayload, signatureValue, h.MessageCounter, nb) if err != nil { return } // Successfully validated the thing. Get rid of the Relay header. signedPayload = signedPayload[header.Len:] // Pull the Roaming parts up here, and return in all call paths. f.handleHostRoaming(hostinfo, addr) f.connectionManager.In(hostinfo.vpnIp) relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex) if !ok { // The only way this happens is if hostmap has an index to the correct HostInfo, but the HostInfo is missing // its internal mapping. This shouldn't happen! hostinfo.logger(f.l).WithField("hostinfo", hostinfo.vpnIp).WithField("remoteIndex", h.RemoteIndex).Errorf("HostInfo missing remote index") // Delete my local index from the hostmap f.hostMap.DeleteRelayIdx(h.RemoteIndex) // When the peer doesn't recieve any return traffic, its connection_manager will eventually clean up // the broken relay when it cleans up the associated HostInfo object. return } switch relay.Type { case TerminalType: // If I am the target of this relay, process the unwrapped packet // From this recursive point, all these variables are 'burned'. We shouldn't rely on them again. f.readOutsidePackets(nil, &ViaSender{relayHI: hostinfo, remoteIdx: relay.RemoteIndex, relay: relay}, out[:0], signedPayload, h, fwPacket, lhf, nb, q, localCache) return case ForwardingType: // Find the target HostInfo relay object targetHI, err := f.hostMap.QueryVpnIp(relay.PeerIp) if err != nil { hostinfo.logger(f.l).WithField("peerIp", relay.PeerIp).WithError(err).Info("Failed to find target host info by ip") return } // find the target Relay info object targetRelay, ok := targetHI.relayState.QueryRelayForByIp(hostinfo.vpnIp) if !ok { hostinfo.logger(f.l).WithField("peerIp", relay.PeerIp).Info("Failed to find relay in hostinfo") return } // If that relay is Established, forward the payload through it if targetRelay.State == Established { switch targetRelay.Type { case ForwardingType: // Forward this packet through the relay tunnel // Find the target HostInfo f.SendVia(targetHI, targetRelay, signedPayload, nb, out, false) return case TerminalType: hostinfo.logger(f.l).Error("Unexpected Relay Type of Terminal") } } else { hostinfo.logger(f.l).WithField("targetRelayState", targetRelay.State).Info("Unexpected target relay state") return } } } case header.LightHouse: f.messageMetrics.Rx(h.Type, h.Subtype, 1) if !f.handleEncrypted(ci, addr, h) { return } d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb) if err != nil { hostinfo.logger(f.l).WithError(err).WithField("udpAddr", addr). WithField("packet", packet). Error("Failed to decrypt lighthouse packet") //TODO: maybe after build 64 is out? 06/14/2018 - NB //f.sendRecvError(net.Addr(addr), header.RemoteIndex) return } lhf(addr, hostinfo.vpnIp, d, f) // Fallthrough to the bottom to record incoming traffic case header.Test: f.messageMetrics.Rx(h.Type, h.Subtype, 1) if !f.handleEncrypted(ci, addr, h) { return } d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb) if err != nil { hostinfo.logger(f.l).WithError(err).WithField("udpAddr", addr). WithField("packet", packet). Error("Failed to decrypt test packet") //TODO: maybe after build 64 is out? 06/14/2018 - NB //f.sendRecvError(net.Addr(addr), header.RemoteIndex) return } if h.Subtype == header.TestRequest { // This testRequest might be from TryPromoteBest, so we should roam // to the new IP address before responding f.handleHostRoaming(hostinfo, addr) f.send(header.Test, header.TestReply, ci, hostinfo, d, nb, out) } // Fallthrough to the bottom to record incoming traffic // Non encrypted messages below here, they should not fall through to avoid tracking incoming traffic since they // are unauthenticated case header.Handshake: f.messageMetrics.Rx(h.Type, h.Subtype, 1) HandleIncomingHandshake(f, addr, via, packet, h, hostinfo) return case header.RecvError: f.messageMetrics.Rx(h.Type, h.Subtype, 1) f.handleRecvError(addr, h) return case header.CloseTunnel: f.messageMetrics.Rx(h.Type, h.Subtype, 1) if !f.handleEncrypted(ci, addr, h) { return } hostinfo.logger(f.l).WithField("udpAddr", addr). Info("Close tunnel received, tearing down.") f.closeTunnel(hostinfo) return case header.Control: if !f.handleEncrypted(ci, addr, h) { return } d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb) if err != nil { hostinfo.logger(f.l).WithError(err).WithField("udpAddr", addr). WithField("packet", packet). Error("Failed to decrypt Control packet") return } m := &NebulaControl{} err = m.Unmarshal(d) if err != nil { hostinfo.logger(f.l).WithError(err).Error("Failed to unmarshal control message") break } f.relayManager.HandleControlMsg(hostinfo, m, f) default: f.messageMetrics.Rx(h.Type, h.Subtype, 1) hostinfo.logger(f.l).Debugf("Unexpected packet received from %s", addr) return } f.handleHostRoaming(hostinfo, addr) f.connectionManager.In(hostinfo.vpnIp) } // closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote func (f *Interface) closeTunnel(hostInfo *HostInfo) { //TODO: this would be better as a single function in ConnectionManager that handled locks appropriately f.connectionManager.ClearIP(hostInfo.vpnIp) f.connectionManager.ClearPendingDeletion(hostInfo.vpnIp) f.lightHouse.DeleteVpnIp(hostInfo.vpnIp) f.hostMap.DeleteHostInfo(hostInfo) } // sendCloseTunnel is a helper function to send a proper close tunnel packet to a remote func (f *Interface) sendCloseTunnel(h *HostInfo) { f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu)) } func (f *Interface) handleHostRoaming(hostinfo *HostInfo, addr *udp.Addr) { if addr != nil && !hostinfo.remote.Equals(addr) { if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) { hostinfo.logger(f.l).WithField("newAddr", addr).Debug("lighthouse.remote_allow_list denied roaming") return } if !hostinfo.lastRoam.IsZero() && addr.Equals(hostinfo.lastRoamRemote) && time.Since(hostinfo.lastRoam) < RoamingSuppressSeconds*time.Second { if f.l.Level >= logrus.DebugLevel { hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", addr). Debugf("Suppressing roam back to previous remote for %d seconds", RoamingSuppressSeconds) } return } hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", addr). Info("Host roamed to new udp ip/port.") hostinfo.lastRoam = time.Now() hostinfo.lastRoamRemote = hostinfo.remote hostinfo.SetRemote(addr) } } func (f *Interface) handleEncrypted(ci *ConnectionState, addr *udp.Addr, h *header.H) bool { // If connectionstate exists and the replay protector allows, process packet // Else, send recv errors for 300 seconds after a restart to allow fast reconnection. if ci == nil || !ci.window.Check(f.l, h.MessageCounter) { if addr != nil { f.maybeSendRecvError(addr, h.RemoteIndex) return false } else { return false } } return true } // newPacket validates and parses the interesting bits for the firewall out of the ip and sub protocol headers func newPacket(data []byte, incoming bool, fp *firewall.Packet) error { // Do we at least have an ipv4 header worth of data? if len(data) < ipv4.HeaderLen { return fmt.Errorf("packet is less than %v bytes", ipv4.HeaderLen) } // Is it an ipv4 packet? if int((data[0]>>4)&0x0f) != 4 { return fmt.Errorf("packet is not ipv4, type: %v", int((data[0]>>4)&0x0f)) } // Adjust our start position based on the advertised ip header length ihl := int(data[0]&0x0f) << 2 // Well formed ip header length? if ihl < ipv4.HeaderLen { return fmt.Errorf("packet had an invalid header length: %v", ihl) } // Check if this is the second or further fragment of a fragmented packet. flagsfrags := binary.BigEndian.Uint16(data[6:8]) fp.Fragment = (flagsfrags & 0x1FFF) != 0 // Firewall handles protocol checks fp.Protocol = data[9] // Accounting for a variable header length, do we have enough data for our src/dst tuples? minLen := ihl if !fp.Fragment && fp.Protocol != firewall.ProtoICMP { minLen += minFwPacketLen } if len(data) < minLen { return fmt.Errorf("packet is less than %v bytes, ip header len: %v", minLen, ihl) } // Firewall packets are locally oriented if incoming { fp.RemoteIP = iputil.Ip2VpnIp(data[12:16]) fp.LocalIP = iputil.Ip2VpnIp(data[16:20]) if fp.Fragment || fp.Protocol == firewall.ProtoICMP { fp.RemotePort = 0 fp.LocalPort = 0 } else { fp.RemotePort = binary.BigEndian.Uint16(data[ihl : ihl+2]) fp.LocalPort = binary.BigEndian.Uint16(data[ihl+2 : ihl+4]) } } else { fp.LocalIP = iputil.Ip2VpnIp(data[12:16]) fp.RemoteIP = iputil.Ip2VpnIp(data[16:20]) if fp.Fragment || fp.Protocol == firewall.ProtoICMP { fp.RemotePort = 0 fp.LocalPort = 0 } else { fp.LocalPort = binary.BigEndian.Uint16(data[ihl : ihl+2]) fp.RemotePort = binary.BigEndian.Uint16(data[ihl+2 : ihl+4]) } } return nil } func (f *Interface) decrypt(hostinfo *HostInfo, mc uint64, out []byte, packet []byte, h *header.H, nb []byte) ([]byte, error) { var err error out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:header.Len], packet[header.Len:], mc, nb) if err != nil { return nil, err } if !hostinfo.ConnectionState.window.Update(f.l, mc) { hostinfo.logger(f.l).WithField("header", h). Debugln("dropping out of window packet") return nil, errors.New("out of window packet") } return out, nil } func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out []byte, packet []byte, fwPacket *firewall.Packet, nb []byte, q int, localCache firewall.ConntrackCache) { var err error out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:header.Len], packet[header.Len:], messageCounter, nb) if err != nil { hostinfo.logger(f.l).WithError(err).Error("Failed to decrypt packet") //TODO: maybe after build 64 is out? 06/14/2018 - NB //f.sendRecvError(hostinfo.remote, header.RemoteIndex) return } err = newPacket(out, true, fwPacket) if err != nil { hostinfo.logger(f.l).WithError(err).WithField("packet", out). Warnf("Error while validating inbound packet") return } if !hostinfo.ConnectionState.window.Update(f.l, messageCounter) { hostinfo.logger(f.l).WithField("fwPacket", fwPacket). Debugln("dropping out of window packet") return } dropReason := f.firewall.Drop(out, *fwPacket, true, hostinfo, f.caPool, localCache) if dropReason != nil { if f.l.Level >= logrus.DebugLevel { hostinfo.logger(f.l).WithField("fwPacket", fwPacket). WithField("reason", dropReason). Debugln("dropping inbound packet") } return } f.connectionManager.In(hostinfo.vpnIp) _, err = f.readers[q].Write(out) if err != nil { f.l.WithError(err).Error("Failed to write to tun") } } func (f *Interface) maybeSendRecvError(endpoint *udp.Addr, index uint32) { if f.sendRecvErrorConfig.ShouldSendRecvError(endpoint.IP) { f.sendRecvError(endpoint, index) } } func (f *Interface) sendRecvError(endpoint *udp.Addr, index uint32) { f.messageMetrics.Tx(header.RecvError, 0, 1) //TODO: this should be a signed message so we can trust that we should drop the index b := header.Encode(make([]byte, header.Len), header.Version, header.RecvError, 0, index, 0) f.outside.WriteTo(b, endpoint) if f.l.Level >= logrus.DebugLevel { f.l.WithField("index", index). WithField("udpAddr", endpoint). Debug("Recv error sent") } } func (f *Interface) handleRecvError(addr *udp.Addr, h *header.H) { if f.l.Level >= logrus.DebugLevel { f.l.WithField("index", h.RemoteIndex). WithField("udpAddr", addr). Debug("Recv error received") } // First, clean up in the pending hostmap f.handshakeManager.pendingHostMap.DeleteReverseIndex(h.RemoteIndex) hostinfo, err := f.hostMap.QueryReverseIndex(h.RemoteIndex) if err != nil { f.l.Debugln(err, ": ", h.RemoteIndex) return } hostinfo.Lock() defer hostinfo.Unlock() if !hostinfo.RecvErrorExceeded() { return } if hostinfo.remote != nil && !hostinfo.remote.Equals(addr) { f.l.Infoln("Someone spoofing recv_errors? ", addr, hostinfo.remote) return } f.closeTunnel(hostinfo) // We also delete it from pending hostmap to allow for // fast reconnect. f.handshakeManager.DeleteHostInfo(hostinfo) } /* func (f *Interface) sendMeta(ci *ConnectionState, endpoint *net.UDPAddr, meta *NebulaMeta) { if ci.eKey != nil { //TODO: log error? return } msg, err := proto.Marshal(meta) if err != nil { l.Debugln("failed to encode header") } c := ci.messageCounter b := HeaderEncode(nil, Version, uint8(metadata), 0, hostinfo.remoteIndexId, c) ci.messageCounter++ msg := ci.eKey.EncryptDanger(b, nil, msg, c) //msg := ci.eKey.EncryptDanger(b, nil, []byte(fmt.Sprintf("%d", counter)), c) f.outside.WriteTo(msg, endpoint) } */ func RecombineCertAndValidate(h *noise.HandshakeState, rawCertBytes []byte, caPool *cert.NebulaCAPool) (*cert.NebulaCertificate, error) { pk := h.PeerStatic() if pk == nil { return nil, errors.New("no peer static key was present") } if rawCertBytes == nil { return nil, errors.New("provided payload was empty") } r := &cert.RawNebulaCertificate{} err := proto.Unmarshal(rawCertBytes, r) if err != nil { return nil, fmt.Errorf("error unmarshaling cert: %s", err) } // If the Details are nil, just exit to avoid crashing if r.Details == nil { return nil, fmt.Errorf("certificate did not contain any details") } r.Details.PublicKey = pk recombined, err := proto.Marshal(r) if err != nil { return nil, fmt.Errorf("error while recombining certificate: %s", err) } c, _ := cert.UnmarshalNebulaCertificate(recombined) isValid, err := c.Verify(time.Now(), caPool) if err != nil { return c, fmt.Errorf("certificate validation failed: %s", err) } else if !isValid { // This case should never happen but here's to defensive programming! return c, errors.New("certificate validation failed but did not return an error") } return c, nil } nebula-1.6.1+dfsg/outside_test.go000066400000000000000000000043401434072716400167570ustar00rootroot00000000000000package nebula import ( "net" "testing" "github.com/slackhq/nebula/firewall" "github.com/slackhq/nebula/iputil" "github.com/stretchr/testify/assert" "golang.org/x/net/ipv4" ) func Test_newPacket(t *testing.T) { p := &firewall.Packet{} // length fail err := newPacket([]byte{0, 1}, true, p) assert.EqualError(t, err, "packet is less than 20 bytes") // length fail with ip options h := ipv4.Header{ Version: 1, Len: 100, Src: net.IPv4(10, 0, 0, 1), Dst: net.IPv4(10, 0, 0, 2), Options: []byte{0, 1, 0, 2}, } b, _ := h.Marshal() err = newPacket(b, true, p) assert.EqualError(t, err, "packet is less than 28 bytes, ip header len: 24") // not an ipv4 packet err = newPacket([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, true, p) assert.EqualError(t, err, "packet is not ipv4, type: 0") // invalid ihl err = newPacket([]byte{4<<4 | (8 >> 2 & 0x0f), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, true, p) assert.EqualError(t, err, "packet had an invalid header length: 8") // account for variable ip header length - incoming h = ipv4.Header{ Version: 1, Len: 100, Src: net.IPv4(10, 0, 0, 1), Dst: net.IPv4(10, 0, 0, 2), Options: []byte{0, 1, 0, 2}, Protocol: firewall.ProtoTCP, } b, _ = h.Marshal() b = append(b, []byte{0, 3, 0, 4}...) err = newPacket(b, true, p) assert.Nil(t, err) assert.Equal(t, p.Protocol, uint8(firewall.ProtoTCP)) assert.Equal(t, p.LocalIP, iputil.Ip2VpnIp(net.IPv4(10, 0, 0, 2))) assert.Equal(t, p.RemoteIP, iputil.Ip2VpnIp(net.IPv4(10, 0, 0, 1))) assert.Equal(t, p.RemotePort, uint16(3)) assert.Equal(t, p.LocalPort, uint16(4)) // account for variable ip header length - outgoing h = ipv4.Header{ Version: 1, Protocol: 2, Len: 100, Src: net.IPv4(10, 0, 0, 1), Dst: net.IPv4(10, 0, 0, 2), Options: []byte{0, 1, 0, 2}, } b, _ = h.Marshal() b = append(b, []byte{0, 5, 0, 6}...) err = newPacket(b, false, p) assert.Nil(t, err) assert.Equal(t, p.Protocol, uint8(2)) assert.Equal(t, p.LocalIP, iputil.Ip2VpnIp(net.IPv4(10, 0, 0, 1))) assert.Equal(t, p.RemoteIP, iputil.Ip2VpnIp(net.IPv4(10, 0, 0, 2))) assert.Equal(t, p.RemotePort, uint16(6)) assert.Equal(t, p.LocalPort, uint16(5)) } nebula-1.6.1+dfsg/overlay/000077500000000000000000000000001434072716400153755ustar00rootroot00000000000000nebula-1.6.1+dfsg/overlay/device.go000066400000000000000000000004111434072716400171570ustar00rootroot00000000000000package overlay import ( "io" "net" "github.com/slackhq/nebula/iputil" ) type Device interface { io.ReadWriteCloser Activate() error Cidr() *net.IPNet Name() string RouteFor(iputil.VpnIp) iputil.VpnIp NewMultiQueueReader() (io.ReadWriteCloser, error) } nebula-1.6.1+dfsg/overlay/route.go000066400000000000000000000120021434072716400170550ustar00rootroot00000000000000package overlay import ( "fmt" "math" "net" "runtime" "strconv" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cidr" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/iputil" ) type Route struct { MTU int Metric int Cidr *net.IPNet Via *iputil.VpnIp } func makeRouteTree(l *logrus.Logger, routes []Route, allowMTU bool) (*cidr.Tree4, error) { routeTree := cidr.NewTree4() for _, r := range routes { if !allowMTU && r.MTU > 0 { l.WithField("route", r).Warnf("route MTU is not supported in %s", runtime.GOOS) } if r.Via != nil { routeTree.AddCIDR(r.Cidr, *r.Via) } } return routeTree, nil } func parseRoutes(c *config.C, network *net.IPNet) ([]Route, error) { var err error r := c.Get("tun.routes") if r == nil { return []Route{}, nil } rawRoutes, ok := r.([]interface{}) if !ok { return nil, fmt.Errorf("tun.routes is not an array") } if len(rawRoutes) < 1 { return []Route{}, nil } routes := make([]Route, len(rawRoutes)) for i, r := range rawRoutes { m, ok := r.(map[interface{}]interface{}) if !ok { return nil, fmt.Errorf("entry %v in tun.routes is invalid", i+1) } rMtu, ok := m["mtu"] if !ok { return nil, fmt.Errorf("entry %v.mtu in tun.routes is not present", i+1) } mtu, ok := rMtu.(int) if !ok { mtu, err = strconv.Atoi(rMtu.(string)) if err != nil { return nil, fmt.Errorf("entry %v.mtu in tun.routes is not an integer: %v", i+1, err) } } if mtu < 500 { return nil, fmt.Errorf("entry %v.mtu in tun.routes is below 500: %v", i+1, mtu) } rRoute, ok := m["route"] if !ok { return nil, fmt.Errorf("entry %v.route in tun.routes is not present", i+1) } r := Route{ MTU: mtu, } _, r.Cidr, err = net.ParseCIDR(fmt.Sprintf("%v", rRoute)) if err != nil { return nil, fmt.Errorf("entry %v.route in tun.routes failed to parse: %v", i+1, err) } if !ipWithin(network, r.Cidr) { return nil, fmt.Errorf( "entry %v.route in tun.routes is not contained within the network attached to the certificate; route: %v, network: %v", i+1, r.Cidr.String(), network.String(), ) } routes[i] = r } return routes, nil } func parseUnsafeRoutes(c *config.C, network *net.IPNet) ([]Route, error) { var err error r := c.Get("tun.unsafe_routes") if r == nil { return []Route{}, nil } rawRoutes, ok := r.([]interface{}) if !ok { return nil, fmt.Errorf("tun.unsafe_routes is not an array") } if len(rawRoutes) < 1 { return []Route{}, nil } routes := make([]Route, len(rawRoutes)) for i, r := range rawRoutes { m, ok := r.(map[interface{}]interface{}) if !ok { return nil, fmt.Errorf("entry %v in tun.unsafe_routes is invalid", i+1) } var mtu int if rMtu, ok := m["mtu"]; ok { mtu, ok = rMtu.(int) if !ok { mtu, err = strconv.Atoi(rMtu.(string)) if err != nil { return nil, fmt.Errorf("entry %v.mtu in tun.unsafe_routes is not an integer: %v", i+1, err) } } if mtu != 0 && mtu < 500 { return nil, fmt.Errorf("entry %v.mtu in tun.unsafe_routes is below 500: %v", i+1, mtu) } } rMetric, ok := m["metric"] if !ok { rMetric = 0 } metric, ok := rMetric.(int) if !ok { _, err = strconv.ParseInt(rMetric.(string), 10, 32) if err != nil { return nil, fmt.Errorf("entry %v.metric in tun.unsafe_routes is not an integer: %v", i+1, err) } } if metric < 0 || metric > math.MaxInt32 { return nil, fmt.Errorf("entry %v.metric in tun.unsafe_routes is not in range (0-%d) : %v", i+1, math.MaxInt32, metric) } rVia, ok := m["via"] if !ok { return nil, fmt.Errorf("entry %v.via in tun.unsafe_routes is not present", i+1) } via, ok := rVia.(string) if !ok { return nil, fmt.Errorf("entry %v.via in tun.unsafe_routes is not a string: found %T", i+1, rVia) } nVia := net.ParseIP(via) if nVia == nil { return nil, fmt.Errorf("entry %v.via in tun.unsafe_routes failed to parse address: %v", i+1, via) } rRoute, ok := m["route"] if !ok { return nil, fmt.Errorf("entry %v.route in tun.unsafe_routes is not present", i+1) } viaVpnIp := iputil.Ip2VpnIp(nVia) r := Route{ Via: &viaVpnIp, MTU: mtu, Metric: metric, } _, r.Cidr, err = net.ParseCIDR(fmt.Sprintf("%v", rRoute)) if err != nil { return nil, fmt.Errorf("entry %v.route in tun.unsafe_routes failed to parse: %v", i+1, err) } if ipWithin(network, r.Cidr) { return nil, fmt.Errorf( "entry %v.route in tun.unsafe_routes is contained within the network attached to the certificate; route: %v, network: %v", i+1, r.Cidr.String(), network.String(), ) } routes[i] = r } return routes, nil } func ipWithin(o *net.IPNet, i *net.IPNet) bool { // Make sure o contains the lowest form of i if !o.Contains(i.IP.Mask(i.Mask)) { return false } // Find the max ip in i ip4 := i.IP.To4() if ip4 == nil { return false } last := make(net.IP, len(ip4)) copy(last, ip4) for x := range ip4 { last[x] |= ^i.Mask[x] } // Make sure o contains the max if !o.Contains(last) { return false } return true } nebula-1.6.1+dfsg/overlay/route_test.go000066400000000000000000000237041434072716400201270ustar00rootroot00000000000000package overlay import ( "fmt" "net" "testing" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/test" "github.com/stretchr/testify/assert" ) func Test_parseRoutes(t *testing.T) { l := test.NewLogger() c := config.NewC(l) _, n, _ := net.ParseCIDR("10.0.0.0/24") // test no routes config routes, err := parseRoutes(c, n) assert.Nil(t, err) assert.Len(t, routes, 0) // not an array c.Settings["tun"] = map[interface{}]interface{}{"routes": "hi"} routes, err = parseRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "tun.routes is not an array") // no routes c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{}} routes, err = parseRoutes(c, n) assert.Nil(t, err) assert.Len(t, routes, 0) // weird route c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{"asdf"}} routes, err = parseRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1 in tun.routes is invalid") // no mtu c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{}}} routes, err = parseRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1.mtu in tun.routes is not present") // bad mtu c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "nope"}}} routes, err = parseRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1.mtu in tun.routes is not an integer: strconv.Atoi: parsing \"nope\": invalid syntax") // low mtu c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "499"}}} routes, err = parseRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1.mtu in tun.routes is below 500: 499") // missing route c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "500"}}} routes, err = parseRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1.route in tun.routes is not present") // unparsable route c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "500", "route": "nope"}}} routes, err = parseRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1.route in tun.routes failed to parse: invalid CIDR address: nope") // below network range c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "500", "route": "1.0.0.0/8"}}} routes, err = parseRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1.route in tun.routes is not contained within the network attached to the certificate; route: 1.0.0.0/8, network: 10.0.0.0/24") // above network range c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "500", "route": "10.0.1.0/24"}}} routes, err = parseRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1.route in tun.routes is not contained within the network attached to the certificate; route: 10.0.1.0/24, network: 10.0.0.0/24") // happy case c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{ map[interface{}]interface{}{"mtu": "9000", "route": "10.0.0.0/29"}, map[interface{}]interface{}{"mtu": "8000", "route": "10.0.0.1/32"}, }} routes, err = parseRoutes(c, n) assert.Nil(t, err) assert.Len(t, routes, 2) tested := 0 for _, r := range routes { if r.MTU == 8000 { assert.Equal(t, "10.0.0.1/32", r.Cidr.String()) tested++ } else { assert.Equal(t, 9000, r.MTU) assert.Equal(t, "10.0.0.0/29", r.Cidr.String()) tested++ } } if tested != 2 { t.Fatal("Did not see both routes") } } func Test_parseUnsafeRoutes(t *testing.T) { l := test.NewLogger() c := config.NewC(l) _, n, _ := net.ParseCIDR("10.0.0.0/24") // test no routes config routes, err := parseUnsafeRoutes(c, n) assert.Nil(t, err) assert.Len(t, routes, 0) // not an array c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": "hi"} routes, err = parseUnsafeRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "tun.unsafe_routes is not an array") // no routes c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{}} routes, err = parseUnsafeRoutes(c, n) assert.Nil(t, err) assert.Len(t, routes, 0) // weird route c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{"asdf"}} routes, err = parseUnsafeRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1 in tun.unsafe_routes is invalid") // no via c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{}}} routes, err = parseUnsafeRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1.via in tun.unsafe_routes is not present") // invalid via for _, invalidValue := range []interface{}{ 127, false, nil, 1.0, []string{"1", "2"}, } { c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": invalidValue}}} routes, err = parseUnsafeRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, fmt.Sprintf("entry 1.via in tun.unsafe_routes is not a string: found %T", invalidValue)) } // unparsable via c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"mtu": "500", "via": "nope"}}} routes, err = parseUnsafeRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1.via in tun.unsafe_routes failed to parse address: nope") // missing route c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "500"}}} routes, err = parseUnsafeRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1.route in tun.unsafe_routes is not present") // unparsable route c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "500", "route": "nope"}}} routes, err = parseUnsafeRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1.route in tun.unsafe_routes failed to parse: invalid CIDR address: nope") // within network range c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "route": "10.0.0.0/24"}}} routes, err = parseUnsafeRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1.route in tun.unsafe_routes is contained within the network attached to the certificate; route: 10.0.0.0/24, network: 10.0.0.0/24") // below network range c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "route": "1.0.0.0/8"}}} routes, err = parseUnsafeRoutes(c, n) assert.Len(t, routes, 1) assert.Nil(t, err) // above network range c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "route": "10.0.1.0/24"}}} routes, err = parseUnsafeRoutes(c, n) assert.Len(t, routes, 1) assert.Nil(t, err) // no mtu c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "route": "1.0.0.0/8"}}} routes, err = parseUnsafeRoutes(c, n) assert.Len(t, routes, 1) assert.Equal(t, 0, routes[0].MTU) // bad mtu c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "nope"}}} routes, err = parseUnsafeRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1.mtu in tun.unsafe_routes is not an integer: strconv.Atoi: parsing \"nope\": invalid syntax") // low mtu c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "499"}}} routes, err = parseUnsafeRoutes(c, n) assert.Nil(t, routes) assert.EqualError(t, err, "entry 1.mtu in tun.unsafe_routes is below 500: 499") // happy case c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{ map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "9000", "route": "1.0.0.0/29"}, map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "8000", "route": "1.0.0.1/32"}, map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "1500", "metric": 1234, "route": "1.0.0.2/32"}, }} routes, err = parseUnsafeRoutes(c, n) assert.Nil(t, err) assert.Len(t, routes, 3) tested := 0 for _, r := range routes { if r.MTU == 8000 { assert.Equal(t, "1.0.0.1/32", r.Cidr.String()) tested++ } else if r.MTU == 9000 { assert.Equal(t, 9000, r.MTU) assert.Equal(t, "1.0.0.0/29", r.Cidr.String()) tested++ } else { assert.Equal(t, 1500, r.MTU) assert.Equal(t, 1234, r.Metric) assert.Equal(t, "1.0.0.2/32", r.Cidr.String()) tested++ } } if tested != 3 { t.Fatal("Did not see both unsafe_routes") } } func Test_makeRouteTree(t *testing.T) { l := test.NewLogger() c := config.NewC(l) _, n, _ := net.ParseCIDR("10.0.0.0/24") c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{ map[interface{}]interface{}{"via": "192.168.0.1", "route": "1.0.0.0/28"}, map[interface{}]interface{}{"via": "192.168.0.2", "route": "1.0.0.1/32"}, }} routes, err := parseUnsafeRoutes(c, n) assert.NoError(t, err) assert.Len(t, routes, 2) routeTree, err := makeRouteTree(l, routes, true) assert.NoError(t, err) ip := iputil.Ip2VpnIp(net.ParseIP("1.0.0.2")) r := routeTree.MostSpecificContains(ip) assert.NotNil(t, r) assert.IsType(t, iputil.VpnIp(0), r) assert.EqualValues(t, iputil.Ip2VpnIp(net.ParseIP("192.168.0.1")), r) ip = iputil.Ip2VpnIp(net.ParseIP("1.0.0.1")) r = routeTree.MostSpecificContains(ip) assert.NotNil(t, r) assert.IsType(t, iputil.VpnIp(0), r) assert.EqualValues(t, iputil.Ip2VpnIp(net.ParseIP("192.168.0.2")), r) ip = iputil.Ip2VpnIp(net.ParseIP("1.1.0.1")) r = routeTree.MostSpecificContains(ip) assert.Nil(t, r) } nebula-1.6.1+dfsg/overlay/tun.go000066400000000000000000000021541434072716400165340ustar00rootroot00000000000000package overlay import ( "net" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/util" ) const DefaultMTU = 1300 func NewDeviceFromConfig(c *config.C, l *logrus.Logger, tunCidr *net.IPNet, fd *int, routines int) (Device, error) { routes, err := parseRoutes(c, tunCidr) if err != nil { return nil, util.NewContextualError("Could not parse tun.routes", nil, err) } unsafeRoutes, err := parseUnsafeRoutes(c, tunCidr) if err != nil { return nil, util.NewContextualError("Could not parse tun.unsafe_routes", nil, err) } routes = append(routes, unsafeRoutes...) switch { case c.GetBool("tun.disabled", false): tun := newDisabledTun(tunCidr, c.GetInt("tun.tx_queue", 500), c.GetBool("stats.message_metrics", false), l) return tun, nil case fd != nil: return newTunFromFd( l, *fd, tunCidr, c.GetInt("tun.mtu", DefaultMTU), routes, c.GetInt("tun.tx_queue", 500), ) default: return newTun( l, c.GetString("tun.dev", ""), tunCidr, c.GetInt("tun.mtu", DefaultMTU), routes, c.GetInt("tun.tx_queue", 500), routines > 1, ) } } nebula-1.6.1+dfsg/overlay/tun_android.go000066400000000000000000000025231434072716400202340ustar00rootroot00000000000000//go:build !e2e_testing // +build !e2e_testing package overlay import ( "fmt" "io" "net" "os" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cidr" "github.com/slackhq/nebula/iputil" ) type tun struct { io.ReadWriteCloser fd int cidr *net.IPNet routeTree *cidr.Tree4 l *logrus.Logger } func newTunFromFd(l *logrus.Logger, deviceFd int, cidr *net.IPNet, _ int, routes []Route, _ int) (*tun, error) { routeTree, err := makeRouteTree(l, routes, false) if err != nil { return nil, err } file := os.NewFile(uintptr(deviceFd), "/dev/net/tun") return &tun{ ReadWriteCloser: file, fd: int(file.Fd()), cidr: cidr, l: l, routeTree: routeTree, }, nil } func newTun(_ *logrus.Logger, _ string, _ *net.IPNet, _ int, _ []Route, _ int, _ bool) (*tun, error) { return nil, fmt.Errorf("newTun not supported in Android") } func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp { r := t.routeTree.MostSpecificContains(ip) if r != nil { return r.(iputil.VpnIp) } return 0 } func (t tun) Activate() error { return nil } func (t *tun) Cidr() *net.IPNet { return t.cidr } func (t *tun) Name() string { return "android" } func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) { return nil, fmt.Errorf("TODO: multiqueue not implemented for android") } nebula-1.6.1+dfsg/overlay/tun_darwin.go000066400000000000000000000232501434072716400201000ustar00rootroot00000000000000//go:build !ios && !e2e_testing // +build !ios,!e2e_testing package overlay import ( "errors" "fmt" "io" "net" "os" "syscall" "unsafe" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cidr" "github.com/slackhq/nebula/iputil" netroute "golang.org/x/net/route" "golang.org/x/sys/unix" ) type tun struct { io.ReadWriteCloser Device string cidr *net.IPNet DefaultMTU int Routes []Route routeTree *cidr.Tree4 l *logrus.Logger // cache out buffer since we need to prepend 4 bytes for tun metadata out []byte } type sockaddrCtl struct { scLen uint8 scFamily uint8 ssSysaddr uint16 scID uint32 scUnit uint32 scReserved [5]uint32 } type ifReq struct { Name [16]byte Flags uint16 pad [8]byte } func ioctl(a1, a2, a3 uintptr) error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, a1, a2, a3) if errno != 0 { return errno } return nil } var sockaddrCtlSize uintptr = 32 const ( _SYSPROTO_CONTROL = 2 //define SYSPROTO_CONTROL 2 /* kernel control protocol */ _AF_SYS_CONTROL = 2 //#define AF_SYS_CONTROL 2 /* corresponding sub address type */ _PF_SYSTEM = unix.AF_SYSTEM //#define PF_SYSTEM AF_SYSTEM _CTLIOCGINFO = 3227799043 //#define CTLIOCGINFO _IOWR('N', 3, struct ctl_info) utunControlName = "com.apple.net.utun_control" ) type ifreqAddr struct { Name [16]byte Addr unix.RawSockaddrInet4 pad [8]byte } type ifreqMTU struct { Name [16]byte MTU int32 pad [8]byte } func newTun(l *logrus.Logger, name string, cidr *net.IPNet, defaultMTU int, routes []Route, _ int, _ bool) (*tun, error) { routeTree, err := makeRouteTree(l, routes, false) if err != nil { return nil, err } ifIndex := -1 if name != "" && name != "utun" { _, err := fmt.Sscanf(name, "utun%d", &ifIndex) if err != nil || ifIndex < 0 { // NOTE: we don't make this error so we don't break existing // configs that set a name before it was used. l.Warn("interface name must be utun[0-9]+ on Darwin, ignoring") ifIndex = -1 } } fd, err := unix.Socket(_PF_SYSTEM, unix.SOCK_DGRAM, _SYSPROTO_CONTROL) if err != nil { return nil, fmt.Errorf("system socket: %v", err) } var ctlInfo = &struct { ctlID uint32 ctlName [96]byte }{} copy(ctlInfo.ctlName[:], utunControlName) err = ioctl(uintptr(fd), uintptr(_CTLIOCGINFO), uintptr(unsafe.Pointer(ctlInfo))) if err != nil { return nil, fmt.Errorf("CTLIOCGINFO: %v", err) } sc := sockaddrCtl{ scLen: uint8(sockaddrCtlSize), scFamily: unix.AF_SYSTEM, ssSysaddr: _AF_SYS_CONTROL, scID: ctlInfo.ctlID, scUnit: uint32(ifIndex) + 1, } _, _, errno := unix.RawSyscall( unix.SYS_CONNECT, uintptr(fd), uintptr(unsafe.Pointer(&sc)), sockaddrCtlSize, ) if errno != 0 { return nil, fmt.Errorf("SYS_CONNECT: %v", errno) } var ifName struct { name [16]byte } ifNameSize := uintptr(len(ifName.name)) _, _, errno = syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), 2, // SYSPROTO_CONTROL 2, // UTUN_OPT_IFNAME uintptr(unsafe.Pointer(&ifName)), uintptr(unsafe.Pointer(&ifNameSize)), 0) if errno != 0 { return nil, fmt.Errorf("SYS_GETSOCKOPT: %v", errno) } name = string(ifName.name[:ifNameSize-1]) err = syscall.SetNonblock(fd, true) if err != nil { return nil, fmt.Errorf("SetNonblock: %v", err) } file := os.NewFile(uintptr(fd), "") tun := &tun{ ReadWriteCloser: file, Device: name, cidr: cidr, DefaultMTU: defaultMTU, Routes: routes, routeTree: routeTree, l: l, } return tun, nil } func (t *tun) deviceBytes() (o [16]byte) { for i, c := range t.Device { o[i] = byte(c) } return } func newTunFromFd(_ *logrus.Logger, _ int, _ *net.IPNet, _ int, _ []Route, _ int) (*tun, error) { return nil, fmt.Errorf("newTunFromFd not supported in Darwin") } func (t *tun) Close() error { if t.ReadWriteCloser != nil { return t.ReadWriteCloser.Close() } return nil } func (t *tun) Activate() error { devName := t.deviceBytes() var addr, mask [4]byte copy(addr[:], t.cidr.IP.To4()) copy(mask[:], t.cidr.Mask) s, err := unix.Socket( unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP, ) if err != nil { return err } fd := uintptr(s) ifra := ifreqAddr{ Name: devName, Addr: unix.RawSockaddrInet4{ Family: unix.AF_INET, Addr: addr, }, } // Set the device ip address if err = ioctl(fd, unix.SIOCSIFADDR, uintptr(unsafe.Pointer(&ifra))); err != nil { return fmt.Errorf("failed to set tun address: %s", err) } // Set the device network ifra.Addr.Addr = mask if err = ioctl(fd, unix.SIOCSIFNETMASK, uintptr(unsafe.Pointer(&ifra))); err != nil { return fmt.Errorf("failed to set tun netmask: %s", err) } // Set the device name ifrf := ifReq{Name: devName} if err = ioctl(fd, unix.SIOCGIFFLAGS, uintptr(unsafe.Pointer(&ifrf))); err != nil { return fmt.Errorf("failed to set tun device name: %s", err) } // Set the MTU on the device ifm := ifreqMTU{Name: devName, MTU: int32(t.DefaultMTU)} if err = ioctl(fd, unix.SIOCSIFMTU, uintptr(unsafe.Pointer(&ifm))); err != nil { return fmt.Errorf("failed to set tun mtu: %v", err) } /* // Set the transmit queue length ifrq := ifreqQLEN{Name: devName, Value: int32(t.TXQueueLen)} if err = ioctl(fd, unix.SIOCSIFTXQLEN, uintptr(unsafe.Pointer(&ifrq))); err != nil { // If we can't set the queue length nebula will still work but it may lead to packet loss l.WithError(err).Error("Failed to set tun tx queue length") } */ // Bring up the interface ifrf.Flags = ifrf.Flags | unix.IFF_UP if err = ioctl(fd, unix.SIOCSIFFLAGS, uintptr(unsafe.Pointer(&ifrf))); err != nil { return fmt.Errorf("failed to bring the tun device up: %s", err) } routeSock, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC) if err != nil { return fmt.Errorf("unable to create AF_ROUTE socket: %v", err) } defer func() { unix.Shutdown(routeSock, unix.SHUT_RDWR) err := unix.Close(routeSock) if err != nil { t.l.WithError(err).Error("failed to close AF_ROUTE socket") } }() routeAddr := &netroute.Inet4Addr{} maskAddr := &netroute.Inet4Addr{} linkAddr, err := getLinkAddr(t.Device) if err != nil { return err } if linkAddr == nil { return fmt.Errorf("unable to discover link_addr for tun interface") } copy(routeAddr.IP[:], addr[:]) copy(maskAddr.IP[:], mask[:]) err = addRoute(routeSock, routeAddr, maskAddr, linkAddr) if err != nil { if errors.Is(err, unix.EEXIST) { err = fmt.Errorf("unable to add tun route, identical route already exists: %s", t.cidr) } return err } // Run the interface ifrf.Flags = ifrf.Flags | unix.IFF_UP | unix.IFF_RUNNING if err = ioctl(fd, unix.SIOCSIFFLAGS, uintptr(unsafe.Pointer(&ifrf))); err != nil { return fmt.Errorf("failed to run tun device: %s", err) } // Unsafe path routes for _, r := range t.Routes { if r.Via == nil { // We don't allow route MTUs so only install routes with a via continue } copy(routeAddr.IP[:], r.Cidr.IP.To4()) copy(maskAddr.IP[:], net.IP(r.Cidr.Mask).To4()) err = addRoute(routeSock, routeAddr, maskAddr, linkAddr) if err != nil { if errors.Is(err, unix.EEXIST) { t.l.WithField("route", r.Cidr). Warnf("unable to add unsafe_route, identical route already exists") } else { return err } } // TODO how to set metric } return nil } func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp { r := t.routeTree.MostSpecificContains(ip) if r != nil { return r.(iputil.VpnIp) } return 0 } // Get the LinkAddr for the interface of the given name // TODO: Is there an easier way to fetch this when we create the interface? // Maybe SIOCGIFINDEX? but this doesn't appear to exist in the darwin headers. func getLinkAddr(name string) (*netroute.LinkAddr, error) { rib, err := netroute.FetchRIB(unix.AF_UNSPEC, unix.NET_RT_IFLIST, 0) if err != nil { return nil, err } msgs, err := netroute.ParseRIB(unix.NET_RT_IFLIST, rib) if err != nil { return nil, err } for _, m := range msgs { switch m := m.(type) { case *netroute.InterfaceMessage: if m.Name == name { sa, ok := m.Addrs[unix.RTAX_IFP].(*netroute.LinkAddr) if ok { return sa, nil } } } } return nil, nil } func addRoute(sock int, addr, mask *netroute.Inet4Addr, link *netroute.LinkAddr) error { r := netroute.RouteMessage{ Version: unix.RTM_VERSION, Type: unix.RTM_ADD, Flags: unix.RTF_UP, Seq: 1, Addrs: []netroute.Addr{ unix.RTAX_DST: addr, unix.RTAX_GATEWAY: link, unix.RTAX_NETMASK: mask, }, } data, err := r.Marshal() if err != nil { return fmt.Errorf("failed to create route.RouteMessage: %w", err) } _, err = unix.Write(sock, data[:]) if err != nil { return fmt.Errorf("failed to write route.RouteMessage to socket: %w", err) } return nil } func (t *tun) Read(to []byte) (int, error) { buf := make([]byte, len(to)+4) n, err := t.ReadWriteCloser.Read(buf) copy(to, buf[4:]) return n - 4, err } // Write is only valid for single threaded use func (t *tun) Write(from []byte) (int, error) { buf := t.out if cap(buf) < len(from)+4 { buf = make([]byte, len(from)+4) t.out = buf } buf = buf[:len(from)+4] if len(from) == 0 { return 0, syscall.EIO } // Determine the IP Family for the NULL L2 Header ipVer := from[0] >> 4 if ipVer == 4 { buf[3] = syscall.AF_INET } else if ipVer == 6 { buf[3] = syscall.AF_INET6 } else { return 0, fmt.Errorf("unable to determine IP version from packet") } copy(buf[4:], from) n, err := t.ReadWriteCloser.Write(buf) return n - 4, err } func (t *tun) Cidr() *net.IPNet { return t.cidr } func (t *tun) Name() string { return t.Device } func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) { return nil, fmt.Errorf("TODO: multiqueue not implemented for darwin") } nebula-1.6.1+dfsg/overlay/tun_disabled.go000066400000000000000000000067401434072716400203700ustar00rootroot00000000000000package overlay import ( "encoding/binary" "fmt" "io" "net" "strings" "github.com/rcrowley/go-metrics" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/iputil" ) type disabledTun struct { read chan []byte cidr *net.IPNet // Track these metrics since we don't have the tun device to do it for us tx metrics.Counter rx metrics.Counter l *logrus.Logger } func newDisabledTun(cidr *net.IPNet, queueLen int, metricsEnabled bool, l *logrus.Logger) *disabledTun { tun := &disabledTun{ cidr: cidr, read: make(chan []byte, queueLen), l: l, } if metricsEnabled { tun.tx = metrics.GetOrRegisterCounter("messages.tx.message", nil) tun.rx = metrics.GetOrRegisterCounter("messages.rx.message", nil) } else { tun.tx = &metrics.NilCounter{} tun.rx = &metrics.NilCounter{} } return tun } func (*disabledTun) Activate() error { return nil } func (*disabledTun) RouteFor(iputil.VpnIp) iputil.VpnIp { return 0 } func (t *disabledTun) Cidr() *net.IPNet { return t.cidr } func (*disabledTun) Name() string { return "disabled" } func (t *disabledTun) Read(b []byte) (int, error) { r, ok := <-t.read if !ok { return 0, io.EOF } if len(r) > len(b) { return 0, fmt.Errorf("packet larger than mtu: %d > %d bytes", len(r), len(b)) } t.tx.Inc(1) if t.l.Level >= logrus.DebugLevel { t.l.WithField("raw", prettyPacket(r)).Debugf("Write payload") } return copy(b, r), nil } func (t *disabledTun) handleICMPEchoRequest(b []byte) bool { // Return early if this is not a simple ICMP Echo Request //TODO: make constants out of these if !(len(b) >= 28 && len(b) <= 9001 && b[0] == 0x45 && b[9] == 0x01 && b[20] == 0x08) { return false } // We don't support fragmented packets if b[7] != 0 || (b[6]&0x2F != 0) { return false } buf := make([]byte, len(b)) copy(buf, b) // Swap dest / src IPs and recalculate checksum ipv4 := buf[0:20] copy(ipv4[12:16], b[16:20]) copy(ipv4[16:20], b[12:16]) ipv4[10] = 0 ipv4[11] = 0 binary.BigEndian.PutUint16(ipv4[10:], ipChecksum(ipv4)) // Change type to ICMP Echo Reply and recalculate checksum icmp := buf[20:] icmp[0] = 0 icmp[2] = 0 icmp[3] = 0 binary.BigEndian.PutUint16(icmp[2:], ipChecksum(icmp)) // attempt to write it, but don't block select { case t.read <- buf: default: t.l.Debugf("tun_disabled: dropped ICMP Echo Reply response") } return true } func (t *disabledTun) Write(b []byte) (int, error) { t.rx.Inc(1) // Check for ICMP Echo Request before spending time doing the full parsing if t.handleICMPEchoRequest(b) { if t.l.Level >= logrus.DebugLevel { t.l.WithField("raw", prettyPacket(b)).Debugf("Disabled tun responded to ICMP Echo Request") } } else if t.l.Level >= logrus.DebugLevel { t.l.WithField("raw", prettyPacket(b)).Debugf("Disabled tun received unexpected payload") } return len(b), nil } func (t *disabledTun) NewMultiQueueReader() (io.ReadWriteCloser, error) { return t, nil } func (t *disabledTun) Close() error { if t.read != nil { close(t.read) t.read = nil } return nil } type prettyPacket []byte func (p prettyPacket) String() string { var s strings.Builder for i, b := range p { if i > 0 && i%8 == 0 { s.WriteString(" ") } s.WriteString(fmt.Sprintf("%02x ", b)) } return s.String() } func ipChecksum(b []byte) uint16 { var c uint32 sz := len(b) - 1 for i := 0; i < sz; i += 2 { c += uint32(b[i]) << 8 c += uint32(b[i+1]) } if sz%2 == 0 { c += uint32(b[sz]) << 8 } for (c >> 16) > 0 { c = (c & 0xffff) + (c >> 16) } return ^uint16(c) } nebula-1.6.1+dfsg/overlay/tun_freebsd.go000066400000000000000000000061741434072716400202340ustar00rootroot00000000000000//go:build !e2e_testing // +build !e2e_testing package overlay import ( "fmt" "io" "net" "os" "os/exec" "regexp" "strconv" "strings" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cidr" "github.com/slackhq/nebula/iputil" ) var deviceNameRE = regexp.MustCompile(`^tun[0-9]+$`) type tun struct { Device string cidr *net.IPNet MTU int Routes []Route routeTree *cidr.Tree4 l *logrus.Logger io.ReadWriteCloser } func (t *tun) Close() error { if t.ReadWriteCloser != nil { return t.ReadWriteCloser.Close() } return nil } func newTunFromFd(_ *logrus.Logger, _ int, _ *net.IPNet, _ int, _ []Route, _ int) (*tun, error) { return nil, fmt.Errorf("newTunFromFd not supported in FreeBSD") } func newTun(l *logrus.Logger, deviceName string, cidr *net.IPNet, defaultMTU int, routes []Route, _ int, _ bool) (*tun, error) { routeTree, err := makeRouteTree(l, routes, false) if err != nil { return nil, err } if strings.HasPrefix(deviceName, "/dev/") { deviceName = strings.TrimPrefix(deviceName, "/dev/") } if !deviceNameRE.MatchString(deviceName) { return nil, fmt.Errorf("tun.dev must match `tun[0-9]+`") } return &tun{ Device: deviceName, cidr: cidr, MTU: defaultMTU, Routes: routes, routeTree: routeTree, l: l, }, nil } func (t *tun) Activate() error { var err error t.ReadWriteCloser, err = os.OpenFile("/dev/"+t.Device, os.O_RDWR, 0) if err != nil { return fmt.Errorf("activate failed: %v", err) } // TODO use syscalls instead of exec.Command t.l.Debug("command: ifconfig", t.Device, t.cidr.String(), t.cidr.IP.String()) if err = exec.Command("/sbin/ifconfig", t.Device, t.cidr.String(), t.cidr.IP.String()).Run(); err != nil { return fmt.Errorf("failed to run 'ifconfig': %s", err) } t.l.Debug("command: route", "-n", "add", "-net", t.cidr.String(), "-interface", t.Device) if err = exec.Command("/sbin/route", "-n", "add", "-net", t.cidr.String(), "-interface", t.Device).Run(); err != nil { return fmt.Errorf("failed to run 'route add': %s", err) } t.l.Debug("command: ifconfig", t.Device, "mtu", strconv.Itoa(t.MTU)) if err = exec.Command("/sbin/ifconfig", t.Device, "mtu", strconv.Itoa(t.MTU)).Run(); err != nil { return fmt.Errorf("failed to run 'ifconfig': %s", err) } // Unsafe path routes for _, r := range t.Routes { if r.Via == nil { // We don't allow route MTUs so only install routes with a via continue } t.l.Debug("command: route", "-n", "add", "-net", r.Cidr.String(), "-interface", t.Device) if err = exec.Command("/sbin/route", "-n", "add", "-net", r.Cidr.String(), "-interface", t.Device).Run(); err != nil { return fmt.Errorf("failed to run 'route add' for unsafe_route %s: %s", r.Cidr.String(), err) } } return nil } func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp { r := t.routeTree.MostSpecificContains(ip) if r != nil { return r.(iputil.VpnIp) } return 0 } func (t *tun) Cidr() *net.IPNet { return t.cidr } func (t *tun) Name() string { return t.Device } func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) { return nil, fmt.Errorf("TODO: multiqueue not implemented for freebsd") } nebula-1.6.1+dfsg/overlay/tun_ios.go000066400000000000000000000046141434072716400174110ustar00rootroot00000000000000//go:build ios && !e2e_testing // +build ios,!e2e_testing package overlay import ( "errors" "fmt" "io" "net" "os" "sync" "syscall" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cidr" "github.com/slackhq/nebula/iputil" ) type tun struct { io.ReadWriteCloser cidr *net.IPNet routeTree *cidr.Tree4 } func newTun(_ *logrus.Logger, _ string, _ *net.IPNet, _ int, _ []Route, _ int, _ bool) (*tun, error) { return nil, fmt.Errorf("newTun not supported in iOS") } func newTunFromFd(l *logrus.Logger, deviceFd int, cidr *net.IPNet, _ int, routes []Route, _ int) (*tun, error) { routeTree, err := makeRouteTree(l, routes, false) if err != nil { return nil, err } file := os.NewFile(uintptr(deviceFd), "/dev/tun") return &tun{ cidr: cidr, ReadWriteCloser: &tunReadCloser{f: file}, routeTree: routeTree, }, nil } func (t *tun) Activate() error { return nil } func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp { r := t.routeTree.MostSpecificContains(ip) if r != nil { return r.(iputil.VpnIp) } return 0 } // The following is hoisted up from water, we do this so we can inject our own fd on iOS type tunReadCloser struct { f io.ReadWriteCloser rMu sync.Mutex rBuf []byte wMu sync.Mutex wBuf []byte } func (tr *tunReadCloser) Read(to []byte) (int, error) { tr.rMu.Lock() defer tr.rMu.Unlock() if cap(tr.rBuf) < len(to)+4 { tr.rBuf = make([]byte, len(to)+4) } tr.rBuf = tr.rBuf[:len(to)+4] n, err := tr.f.Read(tr.rBuf) copy(to, tr.rBuf[4:]) return n - 4, err } func (tr *tunReadCloser) Write(from []byte) (int, error) { if len(from) == 0 { return 0, syscall.EIO } tr.wMu.Lock() defer tr.wMu.Unlock() if cap(tr.wBuf) < len(from)+4 { tr.wBuf = make([]byte, len(from)+4) } tr.wBuf = tr.wBuf[:len(from)+4] // Determine the IP Family for the NULL L2 Header ipVer := from[0] >> 4 if ipVer == 4 { tr.wBuf[3] = syscall.AF_INET } else if ipVer == 6 { tr.wBuf[3] = syscall.AF_INET6 } else { return 0, errors.New("unable to determine IP version from packet") } copy(tr.wBuf[4:], from) n, err := tr.f.Write(tr.wBuf) return n - 4, err } func (tr *tunReadCloser) Close() error { return tr.f.Close() } func (t *tun) Cidr() *net.IPNet { return t.cidr } func (t *tun) Name() string { return "iOS" } func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) { return nil, fmt.Errorf("TODO: multiqueue not implemented for ios") } nebula-1.6.1+dfsg/overlay/tun_linux.go000066400000000000000000000160271434072716400177570ustar00rootroot00000000000000//go:build !android && !e2e_testing // +build !android,!e2e_testing package overlay import ( "fmt" "io" "net" "os" "strings" "unsafe" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cidr" "github.com/slackhq/nebula/iputil" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" ) type tun struct { io.ReadWriteCloser fd int Device string cidr *net.IPNet MaxMTU int DefaultMTU int TXQueueLen int Routes []Route routeTree *cidr.Tree4 l *logrus.Logger } type ifReq struct { Name [16]byte Flags uint16 pad [8]byte } func ioctl(a1, a2, a3 uintptr) error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, a1, a2, a3) if errno != 0 { return errno } return nil } type ifreqAddr struct { Name [16]byte Addr unix.RawSockaddrInet4 pad [8]byte } type ifreqMTU struct { Name [16]byte MTU int32 pad [8]byte } type ifreqQLEN struct { Name [16]byte Value int32 pad [8]byte } func newTunFromFd(l *logrus.Logger, deviceFd int, cidr *net.IPNet, defaultMTU int, routes []Route, txQueueLen int) (*tun, error) { routeTree, err := makeRouteTree(l, routes, true) if err != nil { return nil, err } file := os.NewFile(uintptr(deviceFd), "/dev/net/tun") return &tun{ ReadWriteCloser: file, fd: int(file.Fd()), Device: "tun0", cidr: cidr, DefaultMTU: defaultMTU, TXQueueLen: txQueueLen, Routes: routes, routeTree: routeTree, l: l, }, nil } func newTun(l *logrus.Logger, deviceName string, cidr *net.IPNet, defaultMTU int, routes []Route, txQueueLen int, multiqueue bool) (*tun, error) { fd, err := unix.Open("/dev/net/tun", os.O_RDWR, 0) if err != nil { return nil, err } var req ifReq req.Flags = uint16(unix.IFF_TUN | unix.IFF_NO_PI) if multiqueue { req.Flags |= unix.IFF_MULTI_QUEUE } copy(req.Name[:], deviceName) if err = ioctl(uintptr(fd), uintptr(unix.TUNSETIFF), uintptr(unsafe.Pointer(&req))); err != nil { return nil, err } name := strings.Trim(string(req.Name[:]), "\x00") file := os.NewFile(uintptr(fd), "/dev/net/tun") maxMTU := defaultMTU for _, r := range routes { if r.MTU == 0 { r.MTU = defaultMTU } if r.MTU > maxMTU { maxMTU = r.MTU } } routeTree, err := makeRouteTree(l, routes, true) if err != nil { return nil, err } return &tun{ ReadWriteCloser: file, fd: int(file.Fd()), Device: name, cidr: cidr, MaxMTU: maxMTU, DefaultMTU: defaultMTU, TXQueueLen: txQueueLen, Routes: routes, routeTree: routeTree, l: l, }, nil } func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) { fd, err := unix.Open("/dev/net/tun", os.O_RDWR, 0) if err != nil { return nil, err } var req ifReq req.Flags = uint16(unix.IFF_TUN | unix.IFF_NO_PI | unix.IFF_MULTI_QUEUE) copy(req.Name[:], t.Device) if err = ioctl(uintptr(fd), uintptr(unix.TUNSETIFF), uintptr(unsafe.Pointer(&req))); err != nil { return nil, err } file := os.NewFile(uintptr(fd), "/dev/net/tun") return file, nil } func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp { r := t.routeTree.MostSpecificContains(ip) if r != nil { return r.(iputil.VpnIp) } return 0 } func (t *tun) Write(b []byte) (int, error) { var nn int max := len(b) for { n, err := unix.Write(t.fd, b[nn:max]) if n > 0 { nn += n } if nn == len(b) { return nn, err } if err != nil { return nn, err } if n == 0 { return nn, io.ErrUnexpectedEOF } } } func (t tun) deviceBytes() (o [16]byte) { for i, c := range t.Device { o[i] = byte(c) } return } func (t tun) Activate() error { devName := t.deviceBytes() var addr, mask [4]byte copy(addr[:], t.cidr.IP.To4()) copy(mask[:], t.cidr.Mask) s, err := unix.Socket( unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP, ) if err != nil { return err } fd := uintptr(s) ifra := ifreqAddr{ Name: devName, Addr: unix.RawSockaddrInet4{ Family: unix.AF_INET, Addr: addr, }, } // Set the device ip address if err = ioctl(fd, unix.SIOCSIFADDR, uintptr(unsafe.Pointer(&ifra))); err != nil { return fmt.Errorf("failed to set tun address: %s", err) } // Set the device network ifra.Addr.Addr = mask if err = ioctl(fd, unix.SIOCSIFNETMASK, uintptr(unsafe.Pointer(&ifra))); err != nil { return fmt.Errorf("failed to set tun netmask: %s", err) } // Set the device name ifrf := ifReq{Name: devName} if err = ioctl(fd, unix.SIOCGIFFLAGS, uintptr(unsafe.Pointer(&ifrf))); err != nil { return fmt.Errorf("failed to set tun device name: %s", err) } // Set the MTU on the device ifm := ifreqMTU{Name: devName, MTU: int32(t.MaxMTU)} if err = ioctl(fd, unix.SIOCSIFMTU, uintptr(unsafe.Pointer(&ifm))); err != nil { // This is currently a non fatal condition because the route table must have the MTU set appropriately as well t.l.WithError(err).Error("Failed to set tun mtu") } // Set the transmit queue length ifrq := ifreqQLEN{Name: devName, Value: int32(t.TXQueueLen)} if err = ioctl(fd, unix.SIOCSIFTXQLEN, uintptr(unsafe.Pointer(&ifrq))); err != nil { // If we can't set the queue length nebula will still work but it may lead to packet loss t.l.WithError(err).Error("Failed to set tun tx queue length") } // Bring up the interface ifrf.Flags = ifrf.Flags | unix.IFF_UP if err = ioctl(fd, unix.SIOCSIFFLAGS, uintptr(unsafe.Pointer(&ifrf))); err != nil { return fmt.Errorf("failed to bring the tun device up: %s", err) } // Set the routes link, err := netlink.LinkByName(t.Device) if err != nil { return fmt.Errorf("failed to get tun device link: %s", err) } // Default route dr := &net.IPNet{IP: t.cidr.IP.Mask(t.cidr.Mask), Mask: t.cidr.Mask} nr := netlink.Route{ LinkIndex: link.Attrs().Index, Dst: dr, MTU: t.DefaultMTU, AdvMSS: t.advMSS(Route{}), Scope: unix.RT_SCOPE_LINK, Src: t.cidr.IP, Protocol: unix.RTPROT_KERNEL, Table: unix.RT_TABLE_MAIN, Type: unix.RTN_UNICAST, } err = netlink.RouteReplace(&nr) if err != nil { return fmt.Errorf("failed to set mtu %v on the default route %v; %v", t.DefaultMTU, dr, err) } // Path routes for _, r := range t.Routes { nr := netlink.Route{ LinkIndex: link.Attrs().Index, Dst: r.Cidr, MTU: r.MTU, AdvMSS: t.advMSS(r), Scope: unix.RT_SCOPE_LINK, } if r.Metric > 0 { nr.Priority = r.Metric } err = netlink.RouteAdd(&nr) if err != nil { return fmt.Errorf("failed to set mtu %v on route %v; %v", r.MTU, r.Cidr, err) } } // Run the interface ifrf.Flags = ifrf.Flags | unix.IFF_UP | unix.IFF_RUNNING if err = ioctl(fd, unix.SIOCSIFFLAGS, uintptr(unsafe.Pointer(&ifrf))); err != nil { return fmt.Errorf("failed to run tun device: %s", err) } return nil } func (t *tun) Cidr() *net.IPNet { return t.cidr } func (t *tun) Name() string { return t.Device } func (t tun) advMSS(r Route) int { mtu := r.MTU if r.MTU == 0 { mtu = t.DefaultMTU } // We only need to set advmss if the route MTU does not match the device MTU if mtu != t.MaxMTU { return mtu - 40 } return 0 } nebula-1.6.1+dfsg/overlay/tun_linux_test.go000066400000000000000000000016771434072716400210230ustar00rootroot00000000000000//go:build !e2e_testing // +build !e2e_testing package overlay import "testing" var runAdvMSSTests = []struct { name string tun tun r Route expected int }{ // Standard case, default MTU is the device max MTU {"default", tun{DefaultMTU: 1440, MaxMTU: 1440}, Route{}, 0}, {"default-min", tun{DefaultMTU: 1440, MaxMTU: 1440}, Route{MTU: 1440}, 0}, {"default-low", tun{DefaultMTU: 1440, MaxMTU: 1440}, Route{MTU: 1200}, 1160}, // Case where we have a route MTU set higher than the default {"route", tun{DefaultMTU: 1440, MaxMTU: 8941}, Route{}, 1400}, {"route-min", tun{DefaultMTU: 1440, MaxMTU: 8941}, Route{MTU: 1440}, 1400}, {"route-high", tun{DefaultMTU: 1440, MaxMTU: 8941}, Route{MTU: 8941}, 0}, } func TestTunAdvMSS(t *testing.T) { for _, tt := range runAdvMSSTests { t.Run(tt.name, func(t *testing.T) { o := tt.tun.advMSS(tt.r) if o != tt.expected { t.Errorf("got %d, want %d", o, tt.expected) } }) } } nebula-1.6.1+dfsg/overlay/tun_tester.go000066400000000000000000000056261434072716400201310ustar00rootroot00000000000000//go:build e2e_testing // +build e2e_testing package overlay import ( "fmt" "io" "net" "os" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cidr" "github.com/slackhq/nebula/iputil" ) type TestTun struct { Device string cidr *net.IPNet Routes []Route routeTree *cidr.Tree4 l *logrus.Logger rxPackets chan []byte // Packets to receive into nebula TxPackets chan []byte // Packets transmitted outside by nebula } func newTun(l *logrus.Logger, deviceName string, cidr *net.IPNet, _ int, routes []Route, _ int, _ bool) (*TestTun, error) { routeTree, err := makeRouteTree(l, routes, false) if err != nil { return nil, err } return &TestTun{ Device: deviceName, cidr: cidr, Routes: routes, routeTree: routeTree, l: l, rxPackets: make(chan []byte, 10), TxPackets: make(chan []byte, 10), }, nil } func newTunFromFd(_ *logrus.Logger, _ int, _ *net.IPNet, _ int, _ []Route, _ int) (*TestTun, error) { return nil, fmt.Errorf("newTunFromFd not supported") } // Send will place a byte array onto the receive queue for nebula to consume // These are unencrypted ip layer frames destined for another nebula node. // packets should exit the udp side, capture them with udpConn.Get func (t *TestTun) Send(packet []byte) { if t.l.Level >= logrus.InfoLevel { t.l.WithField("dataLen", len(packet)).Info("Tun receiving injected packet") } t.rxPackets <- packet } // Get will pull an unencrypted ip layer frame from the transmit queue // nebula meant to send this message to some application on the local system // packets were ingested from the udp side, you can send them with udpConn.Send func (t *TestTun) Get(block bool) []byte { if block { return <-t.TxPackets } select { case p := <-t.TxPackets: return p default: return nil } } //********************************************************************************************************************// // Below this is boilerplate implementation to make nebula actually work //********************************************************************************************************************// func (t *TestTun) RouteFor(ip iputil.VpnIp) iputil.VpnIp { r := t.routeTree.MostSpecificContains(ip) if r != nil { return r.(iputil.VpnIp) } return 0 } func (t *TestTun) Activate() error { return nil } func (t *TestTun) Cidr() *net.IPNet { return t.cidr } func (t *TestTun) Name() string { return t.Device } func (t *TestTun) Write(b []byte) (n int, err error) { packet := make([]byte, len(b), len(b)) copy(packet, b) t.TxPackets <- packet return len(b), nil } func (t *TestTun) Close() error { close(t.rxPackets) return nil } func (t *TestTun) Read(b []byte) (int, error) { p, ok := <-t.rxPackets if !ok { return 0, os.ErrClosed } copy(b, p) return len(p), nil } func (t *TestTun) NewMultiQueueReader() (io.ReadWriteCloser, error) { return nil, fmt.Errorf("TODO: multiqueue not implemented") } nebula-1.6.1+dfsg/overlay/tun_water_windows.go000066400000000000000000000054421434072716400215130ustar00rootroot00000000000000package overlay import ( "fmt" "io" "net" "os/exec" "strconv" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cidr" "github.com/slackhq/nebula/iputil" "github.com/songgao/water" ) type waterTun struct { Device string cidr *net.IPNet MTU int Routes []Route routeTree *cidr.Tree4 *water.Interface } func newWaterTun(l *logrus.Logger, cidr *net.IPNet, defaultMTU int, routes []Route) (*waterTun, error) { routeTree, err := makeRouteTree(l, routes, false) if err != nil { return nil, err } // NOTE: You cannot set the deviceName under Windows, so you must check tun.Device after calling .Activate() return &waterTun{ cidr: cidr, MTU: defaultMTU, Routes: routes, routeTree: routeTree, }, nil } func (t *waterTun) Activate() error { var err error t.Interface, err = water.New(water.Config{ DeviceType: water.TUN, PlatformSpecificParams: water.PlatformSpecificParams{ ComponentID: "tap0901", Network: t.cidr.String(), }, }) if err != nil { return fmt.Errorf("activate failed: %v", err) } t.Device = t.Interface.Name() // TODO use syscalls instead of exec.Command err = exec.Command( `C:\Windows\System32\netsh.exe`, "interface", "ipv4", "set", "address", fmt.Sprintf("name=%s", t.Device), "source=static", fmt.Sprintf("addr=%s", t.cidr.IP), fmt.Sprintf("mask=%s", net.IP(t.cidr.Mask)), "gateway=none", ).Run() if err != nil { return fmt.Errorf("failed to run 'netsh' to set address: %s", err) } err = exec.Command( `C:\Windows\System32\netsh.exe`, "interface", "ipv4", "set", "interface", t.Device, fmt.Sprintf("mtu=%d", t.MTU), ).Run() if err != nil { return fmt.Errorf("failed to run 'netsh' to set MTU: %s", err) } iface, err := net.InterfaceByName(t.Device) if err != nil { return fmt.Errorf("failed to find interface named %s: %v", t.Device, err) } for _, r := range t.Routes { if r.Via == nil { // We don't allow route MTUs so only install routes with a via continue } err = exec.Command( "C:\\Windows\\System32\\route.exe", "add", r.Cidr.String(), r.Via.String(), "IF", strconv.Itoa(iface.Index), "METRIC", strconv.Itoa(r.Metric), ).Run() if err != nil { return fmt.Errorf("failed to add the unsafe_route %s: %v", r.Cidr.String(), err) } } return nil } func (t *waterTun) RouteFor(ip iputil.VpnIp) iputil.VpnIp { r := t.routeTree.MostSpecificContains(ip) if r != nil { return r.(iputil.VpnIp) } return 0 } func (t *waterTun) Cidr() *net.IPNet { return t.cidr } func (t *waterTun) Name() string { return t.Device } func (t *waterTun) Close() error { if t.Interface == nil { return nil } return t.Interface.Close() } func (t *waterTun) NewMultiQueueReader() (io.ReadWriteCloser, error) { return nil, fmt.Errorf("TODO: multiqueue not implemented for windows") } nebula-1.6.1+dfsg/overlay/tun_windows.go000066400000000000000000000024731434072716400203120ustar00rootroot00000000000000//go:build !e2e_testing // +build !e2e_testing package overlay import ( "fmt" "net" "os" "path/filepath" "runtime" "syscall" "github.com/sirupsen/logrus" ) func newTunFromFd(_ *logrus.Logger, _ int, _ *net.IPNet, _ int, _ []Route, _ int) (Device, error) { return nil, fmt.Errorf("newTunFromFd not supported in Windows") } func newTun(l *logrus.Logger, deviceName string, cidr *net.IPNet, defaultMTU int, routes []Route, _ int, _ bool) (Device, error) { useWintun := true if err := checkWinTunExists(); err != nil { l.WithError(err).Warn("Check Wintun driver failed, fallback to wintap driver") useWintun = false } if useWintun { device, err := newWinTun(l, deviceName, cidr, defaultMTU, routes) if err != nil { return nil, fmt.Errorf("create Wintun interface failed, %w", err) } return device, nil } device, err := newWaterTun(l, cidr, defaultMTU, routes) if err != nil { return nil, fmt.Errorf("create wintap driver failed, %w", err) } return device, nil } func checkWinTunExists() error { myPath, err := os.Executable() if err != nil { return err } arch := runtime.GOARCH switch arch { case "386": //NOTE: wintun bundles 386 as x86 arch = "x86" } _, err = syscall.LoadDLL(filepath.Join(filepath.Dir(myPath), "dist", "windows", "wintun", "bin", arch, "wintun.dll")) return err } nebula-1.6.1+dfsg/overlay/tun_wintun_windows.go000066400000000000000000000076331434072716400217210ustar00rootroot00000000000000package overlay import ( "crypto" "fmt" "io" "net" "net/netip" "unsafe" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/cidr" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/wintun" "golang.org/x/sys/windows" "golang.zx2c4.com/wireguard/windows/tunnel/winipcfg" ) const tunGUIDLabel = "Fixed Nebula Windows GUID v1" type winTun struct { Device string cidr *net.IPNet prefix netip.Prefix MTU int Routes []Route routeTree *cidr.Tree4 tun *wintun.NativeTun } func generateGUIDByDeviceName(name string) (*windows.GUID, error) { // GUID is 128 bit hash := crypto.MD5.New() _, err := hash.Write([]byte(tunGUIDLabel)) if err != nil { return nil, err } _, err = hash.Write([]byte(name)) if err != nil { return nil, err } sum := hash.Sum(nil) return (*windows.GUID)(unsafe.Pointer(&sum[0])), nil } func newWinTun(l *logrus.Logger, deviceName string, cidr *net.IPNet, defaultMTU int, routes []Route) (*winTun, error) { guid, err := generateGUIDByDeviceName(deviceName) if err != nil { return nil, fmt.Errorf("generate GUID failed: %w", err) } tunDevice, err := wintun.CreateTUNWithRequestedGUID(deviceName, guid, defaultMTU) if err != nil { return nil, fmt.Errorf("create TUN device failed: %w", err) } routeTree, err := makeRouteTree(l, routes, false) if err != nil { return nil, err } prefix, err := iputil.ToNetIpPrefix(*cidr) if err != nil { return nil, err } return &winTun{ Device: deviceName, cidr: cidr, prefix: prefix, MTU: defaultMTU, Routes: routes, routeTree: routeTree, tun: tunDevice.(*wintun.NativeTun), }, nil } func (t *winTun) Activate() error { luid := winipcfg.LUID(t.tun.LUID()) if err := luid.SetIPAddresses([]netip.Prefix{t.prefix}); err != nil { return fmt.Errorf("failed to set address: %w", err) } foundDefault4 := false routes := make([]*winipcfg.RouteData, 0, len(t.Routes)+1) for _, r := range t.Routes { if r.Via == nil { // We don't allow route MTUs so only install routes with a via continue } if !foundDefault4 { if ones, bits := r.Cidr.Mask.Size(); ones == 0 && bits != 0 { foundDefault4 = true } } prefix, err := iputil.ToNetIpPrefix(*r.Cidr) if err != nil { return err } // Add our unsafe route routes = append(routes, &winipcfg.RouteData{ Destination: prefix, NextHop: r.Via.ToNetIpAddr(), Metric: uint32(r.Metric), }) } if err := luid.AddRoutes(routes); err != nil { return fmt.Errorf("failed to add routes: %w", err) } ipif, err := luid.IPInterface(windows.AF_INET) if err != nil { return fmt.Errorf("failed to get ip interface: %w", err) } ipif.NLMTU = uint32(t.MTU) if foundDefault4 { ipif.UseAutomaticMetric = false ipif.Metric = 0 } if err := ipif.Set(); err != nil { return fmt.Errorf("failed to set ip interface: %w", err) } return nil } func (t *winTun) RouteFor(ip iputil.VpnIp) iputil.VpnIp { r := t.routeTree.MostSpecificContains(ip) if r != nil { return r.(iputil.VpnIp) } return 0 } func (t *winTun) Cidr() *net.IPNet { return t.cidr } func (t *winTun) Name() string { return t.Device } func (t *winTun) Read(b []byte) (int, error) { return t.tun.Read(b, 0) } func (t *winTun) Write(b []byte) (int, error) { return t.tun.Write(b, 0) } func (t *winTun) NewMultiQueueReader() (io.ReadWriteCloser, error) { return nil, fmt.Errorf("TODO: multiqueue not implemented for windows") } func (t *winTun) Close() error { // It seems that the Windows networking stack doesn't like it when we destroy interfaces that have active routes, // so to be certain, just remove everything before destroying. luid := winipcfg.LUID(t.tun.LUID()) _ = luid.FlushRoutes(windows.AF_INET) _ = luid.FlushIPAddresses(windows.AF_INET) /* We don't support IPV6 yet _ = luid.FlushRoutes(windows.AF_INET6) _ = luid.FlushIPAddresses(windows.AF_INET6) */ _ = luid.FlushDNS(windows.AF_INET) return t.tun.Close() } nebula-1.6.1+dfsg/punchy.go000066400000000000000000000040501434072716400155500ustar00rootroot00000000000000package nebula import ( "sync/atomic" "time" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/config" ) type Punchy struct { atomicPunch int32 atomicRespond int32 atomicDelay time.Duration l *logrus.Logger } func NewPunchyFromConfig(l *logrus.Logger, c *config.C) *Punchy { p := &Punchy{l: l} p.reload(c, true) c.RegisterReloadCallback(func(c *config.C) { p.reload(c, false) }) return p } func (p *Punchy) reload(c *config.C, initial bool) { if initial { var yes bool if c.IsSet("punchy.punch") { yes = c.GetBool("punchy.punch", false) } else { // Deprecated fallback yes = c.GetBool("punchy", false) } if yes { atomic.StoreInt32(&p.atomicPunch, 1) } else { atomic.StoreInt32(&p.atomicPunch, 0) } } else if c.HasChanged("punchy.punch") || c.HasChanged("punchy") { //TODO: it should be relatively easy to support this, just need to be able to cancel the goroutine and boot it up from here p.l.Warn("Changing punchy.punch with reload is not supported, ignoring.") } if initial || c.HasChanged("punchy.respond") || c.HasChanged("punch_back") { var yes bool if c.IsSet("punchy.respond") { yes = c.GetBool("punchy.respond", false) } else { // Deprecated fallback yes = c.GetBool("punch_back", false) } if yes { atomic.StoreInt32(&p.atomicRespond, 1) } else { atomic.StoreInt32(&p.atomicRespond, 0) } if !initial { p.l.Infof("punchy.respond changed to %v", p.GetRespond()) } } //NOTE: this will not apply to any in progress operations, only the next one if initial || c.HasChanged("punchy.delay") { atomic.StoreInt64((*int64)(&p.atomicDelay), (int64)(c.GetDuration("punchy.delay", time.Second))) if !initial { p.l.Infof("punchy.delay changed to %s", p.GetDelay()) } } } func (p *Punchy) GetPunch() bool { return atomic.LoadInt32(&p.atomicPunch) == 1 } func (p *Punchy) GetRespond() bool { return atomic.LoadInt32(&p.atomicRespond) == 1 } func (p *Punchy) GetDelay() time.Duration { return (time.Duration)(atomic.LoadInt64((*int64)(&p.atomicDelay))) } nebula-1.6.1+dfsg/punchy_test.go000066400000000000000000000032611434072716400166120ustar00rootroot00000000000000package nebula import ( "testing" "time" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/test" "github.com/stretchr/testify/assert" ) func TestNewPunchyFromConfig(t *testing.T) { l := test.NewLogger() c := config.NewC(l) // Test defaults p := NewPunchyFromConfig(l, c) assert.Equal(t, false, p.GetPunch()) assert.Equal(t, false, p.GetRespond()) assert.Equal(t, time.Second, p.GetDelay()) // punchy deprecation c.Settings["punchy"] = true p = NewPunchyFromConfig(l, c) assert.Equal(t, true, p.GetPunch()) // punchy.punch c.Settings["punchy"] = map[interface{}]interface{}{"punch": true} p = NewPunchyFromConfig(l, c) assert.Equal(t, true, p.GetPunch()) // punch_back deprecation c.Settings["punch_back"] = true p = NewPunchyFromConfig(l, c) assert.Equal(t, true, p.GetRespond()) // punchy.respond c.Settings["punchy"] = map[interface{}]interface{}{"respond": true} c.Settings["punch_back"] = false p = NewPunchyFromConfig(l, c) assert.Equal(t, true, p.GetRespond()) // punchy.delay c.Settings["punchy"] = map[interface{}]interface{}{"delay": "1m"} p = NewPunchyFromConfig(l, c) assert.Equal(t, time.Minute, p.GetDelay()) } func TestPunchy_reload(t *testing.T) { l := test.NewLogger() c := config.NewC(l) delay, _ := time.ParseDuration("1m") assert.NoError(t, c.LoadString(` punchy: delay: 1m respond: false `)) p := NewPunchyFromConfig(l, c) assert.Equal(t, delay, p.GetDelay()) assert.Equal(t, false, p.GetRespond()) newDelay, _ := time.ParseDuration("10m") assert.NoError(t, c.ReloadConfigString(` punchy: delay: 10m respond: true `)) p.reload(c, false) assert.Equal(t, newDelay, p.GetDelay()) assert.Equal(t, true, p.GetRespond()) } nebula-1.6.1+dfsg/relay_manager.go000066400000000000000000000230151434072716400170520ustar00rootroot00000000000000package nebula import ( "context" "errors" "fmt" "sync/atomic" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" ) type relayManager struct { l *logrus.Logger hostmap *HostMap atomicAmRelay int32 } func NewRelayManager(ctx context.Context, l *logrus.Logger, hostmap *HostMap, c *config.C) *relayManager { rm := &relayManager{ l: l, hostmap: hostmap, } rm.reload(c, true) c.RegisterReloadCallback(func(c *config.C) { err := rm.reload(c, false) if err != nil { l.WithError(err).Error("Failed to reload relay_manager") } }) return rm } func (rm *relayManager) reload(c *config.C, initial bool) error { if initial || c.HasChanged("relay.am_relay") { rm.setAmRelay(c.GetBool("relay.am_relay", false)) } return nil } func (rm *relayManager) GetAmRelay() bool { return atomic.LoadInt32(&rm.atomicAmRelay) == 1 } func (rm *relayManager) setAmRelay(v bool) { var val int32 switch v { case true: val = 1 case false: val = 0 } atomic.StoreInt32(&rm.atomicAmRelay, val) } // AddRelay finds an available relay index on the hostmap, and associates the relay info with it. // relayHostInfo is the Nebula peer which can be used as a relay to access the target vpnIp. func AddRelay(l *logrus.Logger, relayHostInfo *HostInfo, hm *HostMap, vpnIp iputil.VpnIp, remoteIdx *uint32, relayType int, state int) (uint32, error) { hm.Lock() defer hm.Unlock() for i := 0; i < 32; i++ { index, err := generateIndex(l) if err != nil { return 0, err } _, inRelays := hm.Relays[index] if !inRelays { hm.Relays[index] = relayHostInfo newRelay := Relay{ Type: relayType, State: state, LocalIndex: index, PeerIp: vpnIp, } if remoteIdx != nil { newRelay.RemoteIndex = *remoteIdx } relayHostInfo.relayState.InsertRelay(vpnIp, index, &newRelay) return index, nil } } return 0, errors.New("failed to generate unique localIndexId") } // EstablishRelay updates a Requested Relay to become an Established Relay, which can pass traffic. func (rm *relayManager) EstablishRelay(relayHostInfo *HostInfo, m *NebulaControl) (*Relay, error) { relay, ok := relayHostInfo.relayState.QueryRelayForByIdx(m.InitiatorRelayIndex) if !ok { rm.l.WithFields(logrus.Fields{"relayHostInfo": relayHostInfo.vpnIp, "initiatorRelayIndex": m.InitiatorRelayIndex, "relayFrom": m.RelayFromIp, "relayTo": m.RelayToIp}).Info("relayManager EstablishRelay relayForByIdx not found") return nil, fmt.Errorf("unknown relay") } // relay deserves some synchronization relay.RemoteIndex = m.ResponderRelayIndex relay.State = Established return relay, nil } func (rm *relayManager) HandleControlMsg(h *HostInfo, m *NebulaControl, f *Interface) { switch m.Type { case NebulaControl_CreateRelayRequest: rm.handleCreateRelayRequest(h, f, m) case NebulaControl_CreateRelayResponse: rm.handleCreateRelayResponse(h, f, m) } } func (rm *relayManager) handleCreateRelayResponse(h *HostInfo, f *Interface, m *NebulaControl) { rm.l.WithFields(logrus.Fields{ "relayFrom": iputil.VpnIp(m.RelayFromIp), "relayTarget": iputil.VpnIp(m.RelayToIp), "initiatorIdx": m.InitiatorRelayIndex, "responderIdx": m.ResponderRelayIndex, "hostInfo": h.vpnIp}). Info("handleCreateRelayResponse") target := iputil.VpnIp(m.RelayToIp) relay, err := rm.EstablishRelay(h, m) if err != nil { rm.l.WithError(err).WithField("target", target.String()).Error("Failed to update relay for target") return } // Do I need to complete the relays now? if relay.Type == TerminalType { return } // I'm the middle man. Let the initiator know that the I've established the relay they requested. peerHostInfo, err := rm.hostmap.QueryVpnIp(relay.PeerIp) if err != nil { rm.l.WithError(err).WithField("relayPeerIp", relay.PeerIp).Error("Can't find a HostInfo for peer IP") return } peerRelay, ok := peerHostInfo.relayState.QueryRelayForByIp(target) if !ok { rm.l.WithField("peerIp", peerHostInfo.vpnIp).WithField("target", target.String()).Error("peerRelay does not have Relay state for target IP", peerHostInfo.vpnIp.String(), target.String()) return } peerRelay.State = Established resp := NebulaControl{ Type: NebulaControl_CreateRelayResponse, ResponderRelayIndex: peerRelay.LocalIndex, InitiatorRelayIndex: peerRelay.RemoteIndex, RelayFromIp: uint32(peerHostInfo.vpnIp), RelayToIp: uint32(target), } msg, err := resp.Marshal() if err != nil { rm.l. WithError(err).Error("relayManager Failed to marhsal Control CreateRelayResponse message to create relay") } else { f.SendMessageToVpnIp(header.Control, 0, peerHostInfo.vpnIp, msg, make([]byte, 12), make([]byte, mtu)) } } func (rm *relayManager) handleCreateRelayRequest(h *HostInfo, f *Interface, m *NebulaControl) { rm.l.WithFields(logrus.Fields{ "relayFrom": iputil.VpnIp(m.RelayFromIp), "relayTarget": iputil.VpnIp(m.RelayToIp), "initiatorIdx": m.InitiatorRelayIndex, "hostInfo": h.vpnIp}). Info("handleCreateRelayRequest") from := iputil.VpnIp(m.RelayFromIp) target := iputil.VpnIp(m.RelayToIp) // Is the target of the relay me? if target == f.myVpnIp { existingRelay, ok := h.relayState.QueryRelayForByIp(from) addRelay := !ok if ok { // Clean up existing relay, if this is a new request. if existingRelay.RemoteIndex != m.InitiatorRelayIndex { // We got a brand new Relay request, because its index is different than what we saw before. // Clean up the existing Relay state, and get ready to record new Relay state. rm.hostmap.RemoveRelay(existingRelay.LocalIndex) addRelay = true } } if addRelay { _, err := AddRelay(rm.l, h, f.hostMap, from, &m.InitiatorRelayIndex, TerminalType, Established) if err != nil { return } } relay, ok := h.relayState.QueryRelayForByIp(from) if ok && m.InitiatorRelayIndex != relay.RemoteIndex { // Do something, Something happened. } resp := NebulaControl{ Type: NebulaControl_CreateRelayResponse, ResponderRelayIndex: relay.LocalIndex, InitiatorRelayIndex: relay.RemoteIndex, RelayFromIp: uint32(from), RelayToIp: uint32(target), } msg, err := resp.Marshal() if err != nil { rm.l. WithError(err).Error("relayManager Failed to marshal Control CreateRelayResponse message to create relay") } else { f.SendMessageToVpnIp(header.Control, 0, h.vpnIp, msg, make([]byte, 12), make([]byte, mtu)) } return } else { // the target is not me. Create a relay to the target, from me. if rm.GetAmRelay() == false { return } peer, err := rm.hostmap.QueryVpnIp(target) if err != nil { // Try to establish a connection to this host. If we get a future relay request, // we'll be ready! f.getOrHandshake(target) return } if peer.remote == nil { // Only create relays to peers for whom I have a direct connection return } sendCreateRequest := false var index uint32 targetRelay, ok := peer.relayState.QueryRelayForByIp(from) if ok { index = targetRelay.LocalIndex if targetRelay.State == Requested { sendCreateRequest = true } } else { // Allocate an index in the hostMap for this relay peer index, err = AddRelay(rm.l, peer, f.hostMap, from, nil, ForwardingType, Requested) if err != nil { return } sendCreateRequest = true } if sendCreateRequest { // Send a CreateRelayRequest to the peer. req := NebulaControl{ Type: NebulaControl_CreateRelayRequest, InitiatorRelayIndex: index, RelayFromIp: uint32(h.vpnIp), RelayToIp: uint32(target), } msg, err := req.Marshal() if err != nil { rm.l. WithError(err).Error("relayManager Failed to marshal Control message to create relay") } else { f.SendMessageToVpnIp(header.Control, 0, target, msg, make([]byte, 12), make([]byte, mtu)) } } // Also track the half-created Relay state just received relay, ok := h.relayState.QueryRelayForByIp(target) if !ok { // Add the relay state := Requested if targetRelay != nil && targetRelay.State == Established { state = Established } _, err := AddRelay(rm.l, h, f.hostMap, target, &m.InitiatorRelayIndex, ForwardingType, state) if err != nil { rm.l. WithError(err).Error("relayManager Failed to allocate a local index for relay") return } } else { if relay.RemoteIndex != m.InitiatorRelayIndex { // This is a stale Relay entry for the same tunnel targets. // Clean up the existing stuff. rm.RemoveRelay(relay.LocalIndex) // Add the new relay _, err := AddRelay(rm.l, h, f.hostMap, target, &m.InitiatorRelayIndex, ForwardingType, Requested) if err != nil { return } relay, _ = h.relayState.QueryRelayForByIp(target) } switch relay.State { case Established: resp := NebulaControl{ Type: NebulaControl_CreateRelayResponse, ResponderRelayIndex: relay.LocalIndex, InitiatorRelayIndex: relay.RemoteIndex, RelayFromIp: uint32(h.vpnIp), RelayToIp: uint32(target), } msg, err := resp.Marshal() if err != nil { rm.l. WithError(err).Error("relayManager Failed to marshal Control CreateRelayResponse message to create relay") } else { f.SendMessageToVpnIp(header.Control, 0, h.vpnIp, msg, make([]byte, 12), make([]byte, mtu)) } case Requested: // Keep waiting for the other relay to complete } } } } func (rm *relayManager) RemoveRelay(localIdx uint32) { rm.hostmap.RemoveRelay(localIdx) } nebula-1.6.1+dfsg/remote_list.go000066400000000000000000000374161434072716400166040ustar00rootroot00000000000000package nebula import ( "bytes" "net" "sort" "sync" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/udp" ) // forEachFunc is used to benefit folks that want to do work inside the lock type forEachFunc func(addr *udp.Addr, preferred bool) // The checkFuncs here are to simplify bulk importing LH query response logic into a single function (reset slice and iterate) type checkFuncV4 func(vpnIp iputil.VpnIp, to *Ip4AndPort) bool type checkFuncV6 func(vpnIp iputil.VpnIp, to *Ip6AndPort) bool // CacheMap is a struct that better represents the lighthouse cache for humans // The string key is the owners vpnIp type CacheMap map[string]*Cache // Cache is the other part of CacheMap to better represent the lighthouse cache for humans // We don't reason about ipv4 vs ipv6 here type Cache struct { Learned []*udp.Addr `json:"learned,omitempty"` Reported []*udp.Addr `json:"reported,omitempty"` Relay []*net.IP `json:"relay"` } //TODO: Seems like we should plop static host entries in here too since the are protected by the lighthouse from deletion // We will never clean learned/reported information for them as it stands today // cache is an internal struct that splits v4 and v6 addresses inside the cache map type cache struct { v4 *cacheV4 v6 *cacheV6 relay *cacheRelay } type cacheRelay struct { relay []uint32 } // cacheV4 stores learned and reported ipv4 records under cache type cacheV4 struct { learned *Ip4AndPort reported []*Ip4AndPort } // cacheV4 stores learned and reported ipv6 records under cache type cacheV6 struct { learned *Ip6AndPort reported []*Ip6AndPort } // RemoteList is a unifying concept for lighthouse servers and clients as well as hostinfos. // It serves as a local cache of query replies, host update notifications, and locally learned addresses type RemoteList struct { // Every interaction with internals requires a lock! sync.RWMutex // A deduplicated set of addresses. Any accessor should lock beforehand. addrs []*udp.Addr // A set of relay addresses. VpnIp addresses that the remote identified as relays. relays []*iputil.VpnIp // These are maps to store v4 and v6 addresses per lighthouse // Map key is the vpnIp of the person that told us about this the cached entries underneath. // For learned addresses, this is the vpnIp that sent the packet cache map[iputil.VpnIp]*cache // This is a list of remotes that we have tried to handshake with and have returned from the wrong vpn ip. // They should not be tried again during a handshake badRemotes []*udp.Addr // A flag that the cache may have changed and addrs needs to be rebuilt shouldRebuild bool } // NewRemoteList creates a new empty RemoteList func NewRemoteList() *RemoteList { return &RemoteList{ addrs: make([]*udp.Addr, 0), relays: make([]*iputil.VpnIp, 0), cache: make(map[iputil.VpnIp]*cache), } } // Len locks and reports the size of the deduplicated address list // The deduplication work may need to occur here, so you must pass preferredRanges func (r *RemoteList) Len(preferredRanges []*net.IPNet) int { r.Rebuild(preferredRanges) r.RLock() defer r.RUnlock() return len(r.addrs) } // ForEach locks and will call the forEachFunc for every deduplicated address in the list // The deduplication work may need to occur here, so you must pass preferredRanges func (r *RemoteList) ForEach(preferredRanges []*net.IPNet, forEach forEachFunc) { r.Rebuild(preferredRanges) r.RLock() for _, v := range r.addrs { forEach(v, isPreferred(v.IP, preferredRanges)) } r.RUnlock() } // CopyAddrs locks and makes a deep copy of the deduplicated address list // The deduplication work may need to occur here, so you must pass preferredRanges func (r *RemoteList) CopyAddrs(preferredRanges []*net.IPNet) []*udp.Addr { if r == nil { return nil } r.Rebuild(preferredRanges) r.RLock() defer r.RUnlock() c := make([]*udp.Addr, len(r.addrs)) for i, v := range r.addrs { c[i] = v.Copy() } return c } // LearnRemote locks and sets the learned slot for the owner vpn ip to the provided addr // Currently this is only needed when HostInfo.SetRemote is called as that should cover both handshaking and roaming. // It will mark the deduplicated address list as dirty, so do not call it unless new information is available //TODO: this needs to support the allow list list func (r *RemoteList) LearnRemote(ownerVpnIp iputil.VpnIp, addr *udp.Addr) { r.Lock() defer r.Unlock() if v4 := addr.IP.To4(); v4 != nil { r.unlockedSetLearnedV4(ownerVpnIp, NewIp4AndPort(v4, uint32(addr.Port))) } else { r.unlockedSetLearnedV6(ownerVpnIp, NewIp6AndPort(addr.IP, uint32(addr.Port))) } } // CopyCache locks and creates a more human friendly form of the internal address cache. // This may contain duplicates and blocked addresses func (r *RemoteList) CopyCache() *CacheMap { r.RLock() defer r.RUnlock() cm := make(CacheMap) getOrMake := func(vpnIp string) *Cache { c := cm[vpnIp] if c == nil { c = &Cache{ Learned: make([]*udp.Addr, 0), Reported: make([]*udp.Addr, 0), Relay: make([]*net.IP, 0), } cm[vpnIp] = c } return c } for owner, mc := range r.cache { c := getOrMake(owner.String()) if mc.v4 != nil { if mc.v4.learned != nil { c.Learned = append(c.Learned, NewUDPAddrFromLH4(mc.v4.learned)) } for _, a := range mc.v4.reported { c.Reported = append(c.Reported, NewUDPAddrFromLH4(a)) } } if mc.v6 != nil { if mc.v6.learned != nil { c.Learned = append(c.Learned, NewUDPAddrFromLH6(mc.v6.learned)) } for _, a := range mc.v6.reported { c.Reported = append(c.Reported, NewUDPAddrFromLH6(a)) } } if mc.relay != nil { for _, a := range mc.relay.relay { nip := iputil.VpnIp(a).ToIP() c.Relay = append(c.Relay, &nip) } } } return &cm } // BlockRemote locks and records the address as bad, it will be excluded from the deduplicated address list func (r *RemoteList) BlockRemote(bad *udp.Addr) { if bad == nil { // relays can have nil udp Addrs return } r.Lock() defer r.Unlock() // Check if we already blocked this addr if r.unlockedIsBad(bad) { return } // We copy here because we are taking something else's memory and we can't trust everything r.badRemotes = append(r.badRemotes, bad.Copy()) // Mark the next interaction must recollect/dedupe r.shouldRebuild = true } // CopyBlockedRemotes locks and makes a deep copy of the blocked remotes list func (r *RemoteList) CopyBlockedRemotes() []*udp.Addr { r.RLock() defer r.RUnlock() c := make([]*udp.Addr, len(r.badRemotes)) for i, v := range r.badRemotes { c[i] = v.Copy() } return c } // ResetBlockedRemotes locks and clears the blocked remotes list func (r *RemoteList) ResetBlockedRemotes() { r.Lock() r.badRemotes = nil r.Unlock() } // Rebuild locks and generates the deduplicated address list only if there is work to be done // There is generally no reason to call this directly but it is safe to do so func (r *RemoteList) Rebuild(preferredRanges []*net.IPNet) { r.Lock() defer r.Unlock() // Only rebuild if the cache changed //TODO: shouldRebuild is probably pointless as we don't check for actual change when lighthouse updates come in if r.shouldRebuild { r.unlockedCollect() r.shouldRebuild = false } // Always re-sort, preferredRanges can change via HUP r.unlockedSort(preferredRanges) } // unlockedIsBad assumes you have the write lock and checks if the remote matches any entry in the blocked address list func (r *RemoteList) unlockedIsBad(remote *udp.Addr) bool { for _, v := range r.badRemotes { if v.Equals(remote) { return true } } return false } // unlockedSetLearnedV4 assumes you have the write lock and sets the current learned address for this owner and marks the // deduplicated address list as dirty func (r *RemoteList) unlockedSetLearnedV4(ownerVpnIp iputil.VpnIp, to *Ip4AndPort) { r.shouldRebuild = true r.unlockedGetOrMakeV4(ownerVpnIp).learned = to } // unlockedSetV4 assumes you have the write lock and resets the reported list of ips for this owner to the list provided // and marks the deduplicated address list as dirty func (r *RemoteList) unlockedSetV4(ownerVpnIp iputil.VpnIp, vpnIp iputil.VpnIp, to []*Ip4AndPort, check checkFuncV4) { r.shouldRebuild = true c := r.unlockedGetOrMakeV4(ownerVpnIp) // Reset the slice c.reported = c.reported[:0] // We can't take their array but we can take their pointers for _, v := range to[:minInt(len(to), MaxRemotes)] { if check(vpnIp, v) { c.reported = append(c.reported, v) } } } func (r *RemoteList) unlockedSetRelay(ownerVpnIp iputil.VpnIp, vpnIp iputil.VpnIp, to []uint32) { r.shouldRebuild = true c := r.unlockedGetOrMakeRelay(ownerVpnIp) // Reset the slice c.relay = c.relay[:0] // We can't take their array but we can take their pointers c.relay = append(c.relay, to[:minInt(len(to), MaxRemotes)]...) } // unlockedPrependV4 assumes you have the write lock and prepends the address in the reported list for this owner // This is only useful for establishing static hosts func (r *RemoteList) unlockedPrependV4(ownerVpnIp iputil.VpnIp, to *Ip4AndPort) { r.shouldRebuild = true c := r.unlockedGetOrMakeV4(ownerVpnIp) // We are doing the easy append because this is rarely called c.reported = append([]*Ip4AndPort{to}, c.reported...) if len(c.reported) > MaxRemotes { c.reported = c.reported[:MaxRemotes] } } // unlockedSetLearnedV6 assumes you have the write lock and sets the current learned address for this owner and marks the // deduplicated address list as dirty func (r *RemoteList) unlockedSetLearnedV6(ownerVpnIp iputil.VpnIp, to *Ip6AndPort) { r.shouldRebuild = true r.unlockedGetOrMakeV6(ownerVpnIp).learned = to } // unlockedSetV6 assumes you have the write lock and resets the reported list of ips for this owner to the list provided // and marks the deduplicated address list as dirty func (r *RemoteList) unlockedSetV6(ownerVpnIp iputil.VpnIp, vpnIp iputil.VpnIp, to []*Ip6AndPort, check checkFuncV6) { r.shouldRebuild = true c := r.unlockedGetOrMakeV6(ownerVpnIp) // Reset the slice c.reported = c.reported[:0] // We can't take their array but we can take their pointers for _, v := range to[:minInt(len(to), MaxRemotes)] { if check(vpnIp, v) { c.reported = append(c.reported, v) } } } // unlockedPrependV6 assumes you have the write lock and prepends the address in the reported list for this owner // This is only useful for establishing static hosts func (r *RemoteList) unlockedPrependV6(ownerVpnIp iputil.VpnIp, to *Ip6AndPort) { r.shouldRebuild = true c := r.unlockedGetOrMakeV6(ownerVpnIp) // We are doing the easy append because this is rarely called c.reported = append([]*Ip6AndPort{to}, c.reported...) if len(c.reported) > MaxRemotes { c.reported = c.reported[:MaxRemotes] } } func (r *RemoteList) unlockedGetOrMakeRelay(ownerVpnIp iputil.VpnIp) *cacheRelay { am := r.cache[ownerVpnIp] if am == nil { am = &cache{} r.cache[ownerVpnIp] = am } // Avoid occupying memory for relay if we never have any if am.relay == nil { am.relay = &cacheRelay{} } return am.relay } // unlockedGetOrMakeV4 assumes you have the write lock and builds the cache and owner entry. Only the v4 pointer is established. // The caller must dirty the learned address cache if required func (r *RemoteList) unlockedGetOrMakeV4(ownerVpnIp iputil.VpnIp) *cacheV4 { am := r.cache[ownerVpnIp] if am == nil { am = &cache{} r.cache[ownerVpnIp] = am } // Avoid occupying memory for v6 addresses if we never have any if am.v4 == nil { am.v4 = &cacheV4{} } return am.v4 } // unlockedGetOrMakeV6 assumes you have the write lock and builds the cache and owner entry. Only the v6 pointer is established. // The caller must dirty the learned address cache if required func (r *RemoteList) unlockedGetOrMakeV6(ownerVpnIp iputil.VpnIp) *cacheV6 { am := r.cache[ownerVpnIp] if am == nil { am = &cache{} r.cache[ownerVpnIp] = am } // Avoid occupying memory for v4 addresses if we never have any if am.v6 == nil { am.v6 = &cacheV6{} } return am.v6 } // unlockedCollect assumes you have the write lock and collects/transforms the cache into the deduped address list. // The result of this function can contain duplicates. unlockedSort handles cleaning it. func (r *RemoteList) unlockedCollect() { addrs := r.addrs[:0] relays := r.relays[:0] for _, c := range r.cache { if c.v4 != nil { if c.v4.learned != nil { u := NewUDPAddrFromLH4(c.v4.learned) if !r.unlockedIsBad(u) { addrs = append(addrs, u) } } for _, v := range c.v4.reported { u := NewUDPAddrFromLH4(v) if !r.unlockedIsBad(u) { addrs = append(addrs, u) } } } if c.v6 != nil { if c.v6.learned != nil { u := NewUDPAddrFromLH6(c.v6.learned) if !r.unlockedIsBad(u) { addrs = append(addrs, u) } } for _, v := range c.v6.reported { u := NewUDPAddrFromLH6(v) if !r.unlockedIsBad(u) { addrs = append(addrs, u) } } } if c.relay != nil { for _, v := range c.relay.relay { ip := iputil.VpnIp(v) relays = append(relays, &ip) } } } r.addrs = addrs r.relays = relays } // unlockedSort assumes you have the write lock and performs the deduping and sorting of the address list func (r *RemoteList) unlockedSort(preferredRanges []*net.IPNet) { n := len(r.addrs) if n < 2 { return } lessFunc := func(i, j int) bool { a := r.addrs[i] b := r.addrs[j] // Preferred addresses first aPref := isPreferred(a.IP, preferredRanges) bPref := isPreferred(b.IP, preferredRanges) switch { case aPref && !bPref: // If i is preferred and j is not, i is less than j return true case !aPref && bPref: // If j is preferred then i is not due to the else, i is not less than j return false default: // Both i an j are either preferred or not, sort within that } // ipv6 addresses 2nd a4 := a.IP.To4() b4 := b.IP.To4() switch { case a4 == nil && b4 != nil: // If i is v6 and j is v4, i is less than j return true case a4 != nil && b4 == nil: // If j is v6 and i is v4, i is not less than j return false case a4 != nil && b4 != nil: // Special case for ipv4, a4 and b4 are not nil aPrivate := isPrivateIP(a4) bPrivate := isPrivateIP(b4) switch { case !aPrivate && bPrivate: // If i is a public ip (not private) and j is a private ip, i is less then j return true case aPrivate && !bPrivate: // If j is public (not private) then i is private due to the else, i is not less than j return false default: // Both i an j are either public or private, sort within that } default: // Both i an j are either ipv4 or ipv6, sort within that } // lexical order of ips 3rd c := bytes.Compare(a.IP, b.IP) if c == 0 { // Ips are the same, Lexical order of ports 4th return a.Port < b.Port } // Ip wasn't the same return c < 0 } // Sort it sort.Slice(r.addrs, lessFunc) // Deduplicate a, b := 0, 1 for b < n { if !r.addrs[a].Equals(r.addrs[b]) { a++ if a != b { r.addrs[a], r.addrs[b] = r.addrs[b], r.addrs[a] } } b++ } r.addrs = r.addrs[:a+1] return } // minInt returns the minimum integer of a or b func minInt(a, b int) int { if a < b { return a } return b } // isPreferred returns true of the ip is contained in the preferredRanges list func isPreferred(ip net.IP, preferredRanges []*net.IPNet) bool { //TODO: this would be better in a CIDR6Tree for _, p := range preferredRanges { if p.Contains(ip) { return true } } return false } var _, private24BitBlock, _ = net.ParseCIDR("10.0.0.0/8") var _, private20BitBlock, _ = net.ParseCIDR("172.16.0.0/12") var _, private16BitBlock, _ = net.ParseCIDR("192.168.0.0/16") // isPrivateIP returns true if the ip is contained by a rfc 1918 private range func isPrivateIP(ip net.IP) bool { //TODO: another great cidrtree option //TODO: Private for ipv6 or just let it ride? return private24BitBlock.Contains(ip) || private20BitBlock.Contains(ip) || private16BitBlock.Contains(ip) } nebula-1.6.1+dfsg/remote_list_test.go000066400000000000000000000177611434072716400176440ustar00rootroot00000000000000package nebula import ( "net" "testing" "github.com/slackhq/nebula/iputil" "github.com/stretchr/testify/assert" ) func TestRemoteList_Rebuild(t *testing.T) { rl := NewRemoteList() rl.unlockedSetV4( 0, 0, []*Ip4AndPort{ {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("70.199.182.92"))), Port: 1475}, // this is duped {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.0.182"))), Port: 10101}, {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.1.1"))), Port: 10101}, // this is duped {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.18.0.1"))), Port: 10101}, // this is duped {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.18.0.1"))), Port: 10101}, // this is a dupe {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.19.0.1"))), Port: 10101}, {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.31.0.1"))), Port: 10101}, {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.1.1"))), Port: 10101}, // this is a dupe {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("70.199.182.92"))), Port: 1476}, // almost dupe of 0 with a diff port {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("70.199.182.92"))), Port: 1475}, // this is a dupe }, func(iputil.VpnIp, *Ip4AndPort) bool { return true }, ) rl.unlockedSetV6( 1, 1, []*Ip6AndPort{ NewIp6AndPort(net.ParseIP("1::1"), 1), // this is duped NewIp6AndPort(net.ParseIP("1::1"), 2), // almost dupe of 0 with a diff port, also gets duped NewIp6AndPort(net.ParseIP("1:100::1"), 1), NewIp6AndPort(net.ParseIP("1::1"), 1), // this is a dupe NewIp6AndPort(net.ParseIP("1::1"), 2), // this is a dupe }, func(iputil.VpnIp, *Ip6AndPort) bool { return true }, ) rl.Rebuild([]*net.IPNet{}) assert.Len(t, rl.addrs, 10, "addrs contains too many entries") // ipv6 first, sorted lexically within assert.Equal(t, "[1::1]:1", rl.addrs[0].String()) assert.Equal(t, "[1::1]:2", rl.addrs[1].String()) assert.Equal(t, "[1:100::1]:1", rl.addrs[2].String()) // ipv4 last, sorted by public first, then private, lexically within them assert.Equal(t, "70.199.182.92:1475", rl.addrs[3].String()) assert.Equal(t, "70.199.182.92:1476", rl.addrs[4].String()) assert.Equal(t, "172.17.0.182:10101", rl.addrs[5].String()) assert.Equal(t, "172.17.1.1:10101", rl.addrs[6].String()) assert.Equal(t, "172.18.0.1:10101", rl.addrs[7].String()) assert.Equal(t, "172.19.0.1:10101", rl.addrs[8].String()) assert.Equal(t, "172.31.0.1:10101", rl.addrs[9].String()) // Now ensure we can hoist ipv4 up _, ipNet, err := net.ParseCIDR("0.0.0.0/0") assert.NoError(t, err) rl.Rebuild([]*net.IPNet{ipNet}) assert.Len(t, rl.addrs, 10, "addrs contains too many entries") // ipv4 first, public then private, lexically within them assert.Equal(t, "70.199.182.92:1475", rl.addrs[0].String()) assert.Equal(t, "70.199.182.92:1476", rl.addrs[1].String()) assert.Equal(t, "172.17.0.182:10101", rl.addrs[2].String()) assert.Equal(t, "172.17.1.1:10101", rl.addrs[3].String()) assert.Equal(t, "172.18.0.1:10101", rl.addrs[4].String()) assert.Equal(t, "172.19.0.1:10101", rl.addrs[5].String()) assert.Equal(t, "172.31.0.1:10101", rl.addrs[6].String()) // ipv6 last, sorted by public first, then private, lexically within them assert.Equal(t, "[1::1]:1", rl.addrs[7].String()) assert.Equal(t, "[1::1]:2", rl.addrs[8].String()) assert.Equal(t, "[1:100::1]:1", rl.addrs[9].String()) // Ensure we can hoist a specific ipv4 range over anything else _, ipNet, err = net.ParseCIDR("172.17.0.0/16") assert.NoError(t, err) rl.Rebuild([]*net.IPNet{ipNet}) assert.Len(t, rl.addrs, 10, "addrs contains too many entries") // Preferred ipv4 first assert.Equal(t, "172.17.0.182:10101", rl.addrs[0].String()) assert.Equal(t, "172.17.1.1:10101", rl.addrs[1].String()) // ipv6 next assert.Equal(t, "[1::1]:1", rl.addrs[2].String()) assert.Equal(t, "[1::1]:2", rl.addrs[3].String()) assert.Equal(t, "[1:100::1]:1", rl.addrs[4].String()) // the remaining ipv4 last assert.Equal(t, "70.199.182.92:1475", rl.addrs[5].String()) assert.Equal(t, "70.199.182.92:1476", rl.addrs[6].String()) assert.Equal(t, "172.18.0.1:10101", rl.addrs[7].String()) assert.Equal(t, "172.19.0.1:10101", rl.addrs[8].String()) assert.Equal(t, "172.31.0.1:10101", rl.addrs[9].String()) } func BenchmarkFullRebuild(b *testing.B) { rl := NewRemoteList() rl.unlockedSetV4( 0, 0, []*Ip4AndPort{ {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("70.199.182.92"))), Port: 1475}, {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.0.182"))), Port: 10101}, {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.1.1"))), Port: 10101}, {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.18.0.1"))), Port: 10101}, {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.19.0.1"))), Port: 10101}, {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.31.0.1"))), Port: 10101}, {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.1.1"))), Port: 10101}, // this is a dupe {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("70.199.182.92"))), Port: 1476}, // dupe of 0 with a diff port }, func(iputil.VpnIp, *Ip4AndPort) bool { return true }, ) rl.unlockedSetV6( 0, 0, []*Ip6AndPort{ NewIp6AndPort(net.ParseIP("1::1"), 1), NewIp6AndPort(net.ParseIP("1::1"), 2), // dupe of 0 with a diff port NewIp6AndPort(net.ParseIP("1:100::1"), 1), NewIp6AndPort(net.ParseIP("1::1"), 1), // this is a dupe }, func(iputil.VpnIp, *Ip6AndPort) bool { return true }, ) b.Run("no preferred", func(b *testing.B) { for i := 0; i < b.N; i++ { rl.shouldRebuild = true rl.Rebuild([]*net.IPNet{}) } }) _, ipNet, err := net.ParseCIDR("172.17.0.0/16") assert.NoError(b, err) b.Run("1 preferred", func(b *testing.B) { for i := 0; i < b.N; i++ { rl.shouldRebuild = true rl.Rebuild([]*net.IPNet{ipNet}) } }) _, ipNet2, err := net.ParseCIDR("70.0.0.0/8") assert.NoError(b, err) b.Run("2 preferred", func(b *testing.B) { for i := 0; i < b.N; i++ { rl.shouldRebuild = true rl.Rebuild([]*net.IPNet{ipNet, ipNet2}) } }) _, ipNet3, err := net.ParseCIDR("0.0.0.0/0") assert.NoError(b, err) b.Run("3 preferred", func(b *testing.B) { for i := 0; i < b.N; i++ { rl.shouldRebuild = true rl.Rebuild([]*net.IPNet{ipNet, ipNet2, ipNet3}) } }) } func BenchmarkSortRebuild(b *testing.B) { rl := NewRemoteList() rl.unlockedSetV4( 0, 0, []*Ip4AndPort{ {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("70.199.182.92"))), Port: 1475}, {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.0.182"))), Port: 10101}, {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.1.1"))), Port: 10101}, {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.18.0.1"))), Port: 10101}, {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.19.0.1"))), Port: 10101}, {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.31.0.1"))), Port: 10101}, {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("172.17.1.1"))), Port: 10101}, // this is a dupe {Ip: uint32(iputil.Ip2VpnIp(net.ParseIP("70.199.182.92"))), Port: 1476}, // dupe of 0 with a diff port }, func(iputil.VpnIp, *Ip4AndPort) bool { return true }, ) rl.unlockedSetV6( 0, 0, []*Ip6AndPort{ NewIp6AndPort(net.ParseIP("1::1"), 1), NewIp6AndPort(net.ParseIP("1::1"), 2), // dupe of 0 with a diff port NewIp6AndPort(net.ParseIP("1:100::1"), 1), NewIp6AndPort(net.ParseIP("1::1"), 1), // this is a dupe }, func(iputil.VpnIp, *Ip6AndPort) bool { return true }, ) b.Run("no preferred", func(b *testing.B) { for i := 0; i < b.N; i++ { rl.shouldRebuild = true rl.Rebuild([]*net.IPNet{}) } }) _, ipNet, err := net.ParseCIDR("172.17.0.0/16") rl.Rebuild([]*net.IPNet{ipNet}) assert.NoError(b, err) b.Run("1 preferred", func(b *testing.B) { for i := 0; i < b.N; i++ { rl.Rebuild([]*net.IPNet{ipNet}) } }) _, ipNet2, err := net.ParseCIDR("70.0.0.0/8") rl.Rebuild([]*net.IPNet{ipNet, ipNet2}) assert.NoError(b, err) b.Run("2 preferred", func(b *testing.B) { for i := 0; i < b.N; i++ { rl.Rebuild([]*net.IPNet{ipNet, ipNet2}) } }) _, ipNet3, err := net.ParseCIDR("0.0.0.0/0") rl.Rebuild([]*net.IPNet{ipNet, ipNet2, ipNet3}) assert.NoError(b, err) b.Run("3 preferred", func(b *testing.B) { for i := 0; i < b.N; i++ { rl.Rebuild([]*net.IPNet{ipNet, ipNet2, ipNet3}) } }) } nebula-1.6.1+dfsg/ssh.go000066400000000000000000000547401434072716400150520ustar00rootroot00000000000000package nebula import ( "bytes" "encoding/json" "flag" "fmt" "io/ioutil" "net" "os" "reflect" "runtime/pprof" "sort" "strings" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/sshd" "github.com/slackhq/nebula/udp" ) type sshListHostMapFlags struct { Json bool Pretty bool } type sshPrintCertFlags struct { Json bool Pretty bool Raw bool } type sshPrintTunnelFlags struct { Pretty bool } type sshChangeRemoteFlags struct { Address string } type sshCloseTunnelFlags struct { LocalOnly bool } type sshCreateTunnelFlags struct { Address string } func wireSSHReload(l *logrus.Logger, ssh *sshd.SSHServer, c *config.C) { c.RegisterReloadCallback(func(c *config.C) { if c.GetBool("sshd.enabled", false) { sshRun, err := configSSH(l, ssh, c) if err != nil { l.WithError(err).Error("Failed to reconfigure the sshd") ssh.Stop() } if sshRun != nil { go sshRun() } } else { ssh.Stop() } }) } // configSSH reads the ssh info out of the passed-in Config and // updates the passed-in SSHServer. On success, it returns a function // that callers may invoke to run the configured ssh server. On // failure, it returns nil, error. func configSSH(l *logrus.Logger, ssh *sshd.SSHServer, c *config.C) (func(), error) { //TODO conntrack list //TODO print firewall rules or hash? listen := c.GetString("sshd.listen", "") if listen == "" { return nil, fmt.Errorf("sshd.listen must be provided") } _, port, err := net.SplitHostPort(listen) if err != nil { return nil, fmt.Errorf("invalid sshd.listen address: %s", err) } if port == "22" { return nil, fmt.Errorf("sshd.listen can not use port 22") } //TODO: no good way to reload this right now hostKeyFile := c.GetString("sshd.host_key", "") if hostKeyFile == "" { return nil, fmt.Errorf("sshd.host_key must be provided") } hostKeyBytes, err := ioutil.ReadFile(hostKeyFile) if err != nil { return nil, fmt.Errorf("error while loading sshd.host_key file: %s", err) } err = ssh.SetHostKey(hostKeyBytes) if err != nil { return nil, fmt.Errorf("error while adding sshd.host_key: %s", err) } rawKeys := c.Get("sshd.authorized_users") keys, ok := rawKeys.([]interface{}) if ok { for _, rk := range keys { kDef, ok := rk.(map[interface{}]interface{}) if !ok { l.WithField("sshKeyConfig", rk).Warn("Authorized user had an error, ignoring") continue } user, ok := kDef["user"].(string) if !ok { l.WithField("sshKeyConfig", rk).Warn("Authorized user is missing the user field") continue } k := kDef["keys"] switch v := k.(type) { case string: err := ssh.AddAuthorizedKey(user, v) if err != nil { l.WithError(err).WithField("sshKeyConfig", rk).WithField("sshKey", v).Warn("Failed to authorize key") continue } case []interface{}: for _, subK := range v { sk, ok := subK.(string) if !ok { l.WithField("sshKeyConfig", rk).WithField("sshKey", subK).Warn("Did not understand ssh key") continue } err := ssh.AddAuthorizedKey(user, sk) if err != nil { l.WithError(err).WithField("sshKeyConfig", sk).Warn("Failed to authorize key") continue } } default: l.WithField("sshKeyConfig", rk).Warn("Authorized user is missing the keys field or was not understood") } } } else { l.Info("no ssh users to authorize") } var runner func() if c.GetBool("sshd.enabled", false) { ssh.Stop() runner = func() { if err := ssh.Run(listen); err != nil { l.WithField("err", err).Warn("Failed to run the SSH server") } } } else { ssh.Stop() } return runner, nil } func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap *HostMap, pendingHostMap *HostMap, lightHouse *LightHouse, ifce *Interface) { ssh.RegisterCommand(&sshd.Command{ Name: "list-hostmap", ShortDescription: "List all known previously connected hosts", Flags: func() (*flag.FlagSet, interface{}) { fl := flag.NewFlagSet("", flag.ContinueOnError) s := sshListHostMapFlags{} fl.BoolVar(&s.Json, "json", false, "outputs as json with more information") fl.BoolVar(&s.Pretty, "pretty", false, "pretty prints json, assumes -json") return fl, &s }, Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { return sshListHostMap(hostMap, fs, w) }, }) ssh.RegisterCommand(&sshd.Command{ Name: "list-pending-hostmap", ShortDescription: "List all handshaking hosts", Flags: func() (*flag.FlagSet, interface{}) { fl := flag.NewFlagSet("", flag.ContinueOnError) s := sshListHostMapFlags{} fl.BoolVar(&s.Json, "json", false, "outputs as json with more information") fl.BoolVar(&s.Pretty, "pretty", false, "pretty prints json, assumes -json") return fl, &s }, Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { return sshListHostMap(pendingHostMap, fs, w) }, }) ssh.RegisterCommand(&sshd.Command{ Name: "list-lighthouse-addrmap", ShortDescription: "List all lighthouse map entries", Flags: func() (*flag.FlagSet, interface{}) { fl := flag.NewFlagSet("", flag.ContinueOnError) s := sshListHostMapFlags{} fl.BoolVar(&s.Json, "json", false, "outputs as json with more information") fl.BoolVar(&s.Pretty, "pretty", false, "pretty prints json, assumes -json") return fl, &s }, Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { return sshListLighthouseMap(lightHouse, fs, w) }, }) ssh.RegisterCommand(&sshd.Command{ Name: "reload", ShortDescription: "Reloads configuration from disk, same as sending HUP to the process", Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { return sshReload(c, w) }, }) ssh.RegisterCommand(&sshd.Command{ Name: "start-cpu-profile", ShortDescription: "Starts a cpu profile and write output to the provided file", Callback: sshStartCpuProfile, }) ssh.RegisterCommand(&sshd.Command{ Name: "stop-cpu-profile", ShortDescription: "Stops a cpu profile and writes output to the previously provided file", Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { pprof.StopCPUProfile() return w.WriteLine("If a CPU profile was running it is now stopped") }, }) ssh.RegisterCommand(&sshd.Command{ Name: "save-heap-profile", ShortDescription: "Saves a heap profile to the provided path", Callback: sshGetHeapProfile, }) ssh.RegisterCommand(&sshd.Command{ Name: "log-level", ShortDescription: "Gets or sets the current log level", Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { return sshLogLevel(l, fs, a, w) }, }) ssh.RegisterCommand(&sshd.Command{ Name: "log-format", ShortDescription: "Gets or sets the current log format", Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { return sshLogFormat(l, fs, a, w) }, }) ssh.RegisterCommand(&sshd.Command{ Name: "version", ShortDescription: "Prints the currently running version of nebula", Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { return sshVersion(ifce, fs, a, w) }, }) ssh.RegisterCommand(&sshd.Command{ Name: "print-cert", ShortDescription: "Prints the current certificate being used or the certificate for the provided vpn ip", Flags: func() (*flag.FlagSet, interface{}) { fl := flag.NewFlagSet("", flag.ContinueOnError) s := sshPrintCertFlags{} fl.BoolVar(&s.Json, "json", false, "outputs as json") fl.BoolVar(&s.Pretty, "pretty", false, "pretty prints json, assumes -json") fl.BoolVar(&s.Raw, "raw", false, "raw prints the PEM encoded certificate, not compatible with -json or -pretty") return fl, &s }, Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { return sshPrintCert(ifce, fs, a, w) }, }) ssh.RegisterCommand(&sshd.Command{ Name: "print-tunnel", ShortDescription: "Prints json details about a tunnel for the provided vpn ip", Flags: func() (*flag.FlagSet, interface{}) { fl := flag.NewFlagSet("", flag.ContinueOnError) s := sshPrintTunnelFlags{} fl.BoolVar(&s.Pretty, "pretty", false, "pretty prints json") return fl, &s }, Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { return sshPrintTunnel(ifce, fs, a, w) }, }) ssh.RegisterCommand(&sshd.Command{ Name: "print-relays", ShortDescription: "Prints json details about all relay info", Flags: func() (*flag.FlagSet, interface{}) { fl := flag.NewFlagSet("", flag.ContinueOnError) s := sshPrintTunnelFlags{} fl.BoolVar(&s.Pretty, "pretty", false, "pretty prints json") return fl, &s }, Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { return sshPrintRelays(ifce, fs, a, w) }, }) ssh.RegisterCommand(&sshd.Command{ Name: "change-remote", ShortDescription: "Changes the remote address used in the tunnel for the provided vpn ip", Flags: func() (*flag.FlagSet, interface{}) { fl := flag.NewFlagSet("", flag.ContinueOnError) s := sshChangeRemoteFlags{} fl.StringVar(&s.Address, "address", "", "The new remote address, ip:port") return fl, &s }, Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { return sshChangeRemote(ifce, fs, a, w) }, }) ssh.RegisterCommand(&sshd.Command{ Name: "close-tunnel", ShortDescription: "Closes a tunnel for the provided vpn ip", Flags: func() (*flag.FlagSet, interface{}) { fl := flag.NewFlagSet("", flag.ContinueOnError) s := sshCloseTunnelFlags{} fl.BoolVar(&s.LocalOnly, "local-only", false, "Disables notifying the remote that the tunnel is shutting down") return fl, &s }, Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { return sshCloseTunnel(ifce, fs, a, w) }, }) ssh.RegisterCommand(&sshd.Command{ Name: "create-tunnel", ShortDescription: "Creates a tunnel for the provided vpn ip and address", Help: "The lighthouses will be queried for real addresses but you can provide one as well.", Flags: func() (*flag.FlagSet, interface{}) { fl := flag.NewFlagSet("", flag.ContinueOnError) s := sshCreateTunnelFlags{} fl.StringVar(&s.Address, "address", "", "Optionally provide a real remote address, ip:port ") return fl, &s }, Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { return sshCreateTunnel(ifce, fs, a, w) }, }) ssh.RegisterCommand(&sshd.Command{ Name: "query-lighthouse", ShortDescription: "Query the lighthouses for the provided vpn ip", Help: "This command is asynchronous. Only currently known udp ips will be printed.", Callback: func(fs interface{}, a []string, w sshd.StringWriter) error { return sshQueryLighthouse(ifce, fs, a, w) }, }) } func sshListHostMap(hostMap *HostMap, a interface{}, w sshd.StringWriter) error { fs, ok := a.(*sshListHostMapFlags) if !ok { //TODO: error return nil } hm := listHostMap(hostMap) sort.Slice(hm, func(i, j int) bool { return bytes.Compare(hm[i].VpnIp, hm[j].VpnIp) < 0 }) if fs.Json || fs.Pretty { js := json.NewEncoder(w.GetWriter()) if fs.Pretty { js.SetIndent("", " ") } err := js.Encode(hm) if err != nil { //TODO return nil } } else { for _, v := range hm { err := w.WriteLine(fmt.Sprintf("%s: %s", v.VpnIp, v.RemoteAddrs)) if err != nil { return err } } } return nil } func sshListLighthouseMap(lightHouse *LightHouse, a interface{}, w sshd.StringWriter) error { fs, ok := a.(*sshListHostMapFlags) if !ok { //TODO: error return nil } type lighthouseInfo struct { VpnIp string `json:"vpnIp"` Addrs *CacheMap `json:"addrs"` } lightHouse.RLock() addrMap := make([]lighthouseInfo, len(lightHouse.addrMap)) x := 0 for k, v := range lightHouse.addrMap { addrMap[x] = lighthouseInfo{ VpnIp: k.String(), Addrs: v.CopyCache(), } x++ } lightHouse.RUnlock() sort.Slice(addrMap, func(i, j int) bool { return strings.Compare(addrMap[i].VpnIp, addrMap[j].VpnIp) < 0 }) if fs.Json || fs.Pretty { js := json.NewEncoder(w.GetWriter()) if fs.Pretty { js.SetIndent("", " ") } err := js.Encode(addrMap) if err != nil { //TODO return nil } } else { for _, v := range addrMap { b, err := json.Marshal(v.Addrs) if err != nil { return err } err = w.WriteLine(fmt.Sprintf("%s: %s", v.VpnIp, string(b))) if err != nil { return err } } } return nil } func sshStartCpuProfile(fs interface{}, a []string, w sshd.StringWriter) error { if len(a) == 0 { err := w.WriteLine("No path to write profile provided") return err } file, err := os.Create(a[0]) if err != nil { err = w.WriteLine(fmt.Sprintf("Unable to create profile file: %s", err)) return err } err = pprof.StartCPUProfile(file) if err != nil { err = w.WriteLine(fmt.Sprintf("Unable to start cpu profile: %s", err)) return err } err = w.WriteLine(fmt.Sprintf("Started cpu profile, issue stop-cpu-profile to write the output to %s", a)) return err } func sshVersion(ifce *Interface, fs interface{}, a []string, w sshd.StringWriter) error { return w.WriteLine(fmt.Sprintf("%s", ifce.version)) } func sshQueryLighthouse(ifce *Interface, fs interface{}, a []string, w sshd.StringWriter) error { if len(a) == 0 { return w.WriteLine("No vpn ip was provided") } parsedIp := net.ParseIP(a[0]) if parsedIp == nil { return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0])) } vpnIp := iputil.Ip2VpnIp(parsedIp) if vpnIp == 0 { return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0])) } var cm *CacheMap rl := ifce.lightHouse.Query(vpnIp, ifce) if rl != nil { cm = rl.CopyCache() } return json.NewEncoder(w.GetWriter()).Encode(cm) } func sshCloseTunnel(ifce *Interface, fs interface{}, a []string, w sshd.StringWriter) error { flags, ok := fs.(*sshCloseTunnelFlags) if !ok { //TODO: error return nil } if len(a) == 0 { return w.WriteLine("No vpn ip was provided") } parsedIp := net.ParseIP(a[0]) if parsedIp == nil { return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0])) } vpnIp := iputil.Ip2VpnIp(parsedIp) if vpnIp == 0 { return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0])) } hostInfo, err := ifce.hostMap.QueryVpnIp(vpnIp) if err != nil { return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0])) } if !flags.LocalOnly { ifce.send( header.CloseTunnel, 0, hostInfo.ConnectionState, hostInfo, []byte{}, make([]byte, 12, 12), make([]byte, mtu), ) } ifce.closeTunnel(hostInfo) return w.WriteLine("Closed") } func sshCreateTunnel(ifce *Interface, fs interface{}, a []string, w sshd.StringWriter) error { flags, ok := fs.(*sshCreateTunnelFlags) if !ok { //TODO: error return nil } if len(a) == 0 { return w.WriteLine("No vpn ip was provided") } parsedIp := net.ParseIP(a[0]) if parsedIp == nil { return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0])) } vpnIp := iputil.Ip2VpnIp(parsedIp) if vpnIp == 0 { return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0])) } hostInfo, _ := ifce.hostMap.QueryVpnIp(vpnIp) if hostInfo != nil { return w.WriteLine(fmt.Sprintf("Tunnel already exists")) } hostInfo, _ = ifce.handshakeManager.pendingHostMap.QueryVpnIp(vpnIp) if hostInfo != nil { return w.WriteLine(fmt.Sprintf("Tunnel already handshaking")) } var addr *udp.Addr if flags.Address != "" { addr = udp.NewAddrFromString(flags.Address) if addr == nil { return w.WriteLine("Address could not be parsed") } } hostInfo = ifce.handshakeManager.AddVpnIp(vpnIp, ifce.initHostInfo) if addr != nil { hostInfo.SetRemote(addr) } ifce.getOrHandshake(vpnIp) return w.WriteLine("Created") } func sshChangeRemote(ifce *Interface, fs interface{}, a []string, w sshd.StringWriter) error { flags, ok := fs.(*sshChangeRemoteFlags) if !ok { //TODO: error return nil } if len(a) == 0 { return w.WriteLine("No vpn ip was provided") } if flags.Address == "" { return w.WriteLine("No address was provided") } addr := udp.NewAddrFromString(flags.Address) if addr == nil { return w.WriteLine("Address could not be parsed") } parsedIp := net.ParseIP(a[0]) if parsedIp == nil { return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0])) } vpnIp := iputil.Ip2VpnIp(parsedIp) if vpnIp == 0 { return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0])) } hostInfo, err := ifce.hostMap.QueryVpnIp(vpnIp) if err != nil { return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0])) } hostInfo.SetRemote(addr) return w.WriteLine("Changed") } func sshGetHeapProfile(fs interface{}, a []string, w sshd.StringWriter) error { if len(a) == 0 { return w.WriteLine("No path to write profile provided") } file, err := os.Create(a[0]) if err != nil { err = w.WriteLine(fmt.Sprintf("Unable to create profile file: %s", err)) return err } err = pprof.WriteHeapProfile(file) if err != nil { err = w.WriteLine(fmt.Sprintf("Unable to write profile: %s", err)) return err } err = w.WriteLine(fmt.Sprintf("Mem profile created at %s", a)) return err } func sshLogLevel(l *logrus.Logger, fs interface{}, a []string, w sshd.StringWriter) error { if len(a) == 0 { return w.WriteLine(fmt.Sprintf("Log level is: %s", l.Level)) } level, err := logrus.ParseLevel(a[0]) if err != nil { return w.WriteLine(fmt.Sprintf("Unknown log level %s. Possible log levels: %s", a, logrus.AllLevels)) } l.SetLevel(level) return w.WriteLine(fmt.Sprintf("Log level is: %s", l.Level)) } func sshLogFormat(l *logrus.Logger, fs interface{}, a []string, w sshd.StringWriter) error { if len(a) == 0 { return w.WriteLine(fmt.Sprintf("Log format is: %s", reflect.TypeOf(l.Formatter))) } logFormat := strings.ToLower(a[0]) switch logFormat { case "text": l.Formatter = &logrus.TextFormatter{} case "json": l.Formatter = &logrus.JSONFormatter{} default: return fmt.Errorf("unknown log format `%s`. possible formats: %s", logFormat, []string{"text", "json"}) } return w.WriteLine(fmt.Sprintf("Log format is: %s", reflect.TypeOf(l.Formatter))) } func sshPrintCert(ifce *Interface, fs interface{}, a []string, w sshd.StringWriter) error { args, ok := fs.(*sshPrintCertFlags) if !ok { //TODO: error return nil } cert := ifce.certState.certificate if len(a) > 0 { parsedIp := net.ParseIP(a[0]) if parsedIp == nil { return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0])) } vpnIp := iputil.Ip2VpnIp(parsedIp) if vpnIp == 0 { return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0])) } hostInfo, err := ifce.hostMap.QueryVpnIp(vpnIp) if err != nil { return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0])) } cert = hostInfo.GetCert() } if args.Json || args.Pretty { b, err := cert.MarshalJSON() if err != nil { //TODO: handle it return nil } if args.Pretty { buf := new(bytes.Buffer) err := json.Indent(buf, b, "", " ") b = buf.Bytes() if err != nil { //TODO: handle it return nil } } return w.WriteBytes(b) } if args.Raw { b, err := cert.MarshalToPEM() if err != nil { //TODO: handle it return nil } return w.WriteBytes(b) } return w.WriteLine(cert.String()) } func sshPrintRelays(ifce *Interface, fs interface{}, a []string, w sshd.StringWriter) error { args, ok := fs.(*sshPrintTunnelFlags) if !ok { //TODO: error w.WriteLine(fmt.Sprintf("sshPrintRelays failed to convert args type")) return nil } relays := map[uint32]*HostInfo{} ifce.hostMap.Lock() for k, v := range ifce.hostMap.Relays { relays[k] = v } ifce.hostMap.Unlock() type RelayFor struct { Error error Type string State string PeerIp iputil.VpnIp LocalIndex uint32 RemoteIndex uint32 RelayedThrough []iputil.VpnIp } type RelayOutput struct { NebulaIp iputil.VpnIp RelayForIps []RelayFor } type CmdOutput struct { Relays []*RelayOutput } co := CmdOutput{} enc := json.NewEncoder(w.GetWriter()) if args.Pretty { enc.SetIndent("", " ") } for k, v := range relays { ro := RelayOutput{NebulaIp: v.vpnIp} co.Relays = append(co.Relays, &ro) relayHI, err := ifce.hostMap.QueryVpnIp(v.vpnIp) if err != nil { ro.RelayForIps = append(ro.RelayForIps, RelayFor{Error: err}) continue } for _, vpnIp := range relayHI.relayState.CopyRelayForIps() { rf := RelayFor{Error: nil} r, ok := relayHI.relayState.GetRelayForByIp(vpnIp) if ok { t := "" switch r.Type { case ForwardingType: t = "forwarding" case TerminalType: t = "terminal" default: t = "unkown" } s := "" switch r.State { case Requested: s = "requested" case Established: s = "established" default: s = "unknown" } rf.LocalIndex = r.LocalIndex rf.RemoteIndex = r.RemoteIndex rf.PeerIp = r.PeerIp rf.Type = t rf.State = s if rf.LocalIndex != k { rf.Error = fmt.Errorf("hostmap LocalIndex '%v' does not match RelayState LocalIndex", k) } } relayedHI, err := ifce.hostMap.QueryVpnIp(vpnIp) if err == nil { rf.RelayedThrough = append(rf.RelayedThrough, relayedHI.relayState.CopyRelayIps()...) } ro.RelayForIps = append(ro.RelayForIps, rf) } } err := enc.Encode(co) if err != nil { return err } return nil } func sshPrintTunnel(ifce *Interface, fs interface{}, a []string, w sshd.StringWriter) error { args, ok := fs.(*sshPrintTunnelFlags) if !ok { //TODO: error return nil } if len(a) == 0 { return w.WriteLine("No vpn ip was provided") } parsedIp := net.ParseIP(a[0]) if parsedIp == nil { return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0])) } vpnIp := iputil.Ip2VpnIp(parsedIp) if vpnIp == 0 { return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0])) } hostInfo, err := ifce.hostMap.QueryVpnIp(vpnIp) if err != nil { return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0])) } enc := json.NewEncoder(w.GetWriter()) if args.Pretty { enc.SetIndent("", " ") } return enc.Encode(copyHostInfo(hostInfo, ifce.hostMap.preferredRanges)) } func sshReload(c *config.C, w sshd.StringWriter) error { err := w.WriteLine("Reloading config") c.ReloadConfig() return err } nebula-1.6.1+dfsg/sshd/000077500000000000000000000000001434072716400146555ustar00rootroot00000000000000nebula-1.6.1+dfsg/sshd/command.go000066400000000000000000000066561434072716400166370ustar00rootroot00000000000000package sshd import ( "errors" "flag" "fmt" "sort" "strings" "github.com/armon/go-radix" ) // CommandFlags is a function called before help or command execution to parse command line flags // It should return a flag.FlagSet instance and a pointer to the struct that will contain parsed flags type CommandFlags func() (*flag.FlagSet, interface{}) // CommandCallback is the function called when your command should execute. // fs will be a a pointer to the struct provided by Command.Flags callback, if there was one. -h and -help are reserved // and handled automatically for you. // a will be any unconsumed arguments, if no Command.Flags was available this will be all the flags passed in. // w is the writer to use when sending messages back to the client. // If an error is returned by the callback it is logged locally, the callback should handle messaging errors to the user // where appropriate type CommandCallback func(fs interface{}, a []string, w StringWriter) error type Command struct { Name string ShortDescription string Help string Flags CommandFlags Callback CommandCallback } func execCommand(c *Command, args []string, w StringWriter) error { var ( fl *flag.FlagSet fs interface{} ) if c.Flags != nil { fl, fs = c.Flags() if fl != nil { //TODO: handle the error fl.Parse(args) args = fl.Args() } } return c.Callback(fs, args, w) } func dumpCommands(c *radix.Tree, w StringWriter) { err := w.WriteLine("Available commands:") if err != nil { //TODO: log return } cmds := make([]string, 0) for _, l := range allCommands(c) { cmds = append(cmds, fmt.Sprintf("%s - %s", l.Name, l.ShortDescription)) } sort.Strings(cmds) err = w.Write(strings.Join(cmds, "\n") + "\n\n") if err != nil { //TODO: log } } func lookupCommand(c *radix.Tree, sCmd string) (*Command, error) { cmd, ok := c.Get(sCmd) if !ok { return nil, nil } command, ok := cmd.(*Command) if !ok { return nil, errors.New("failed to cast command") } return command, nil } func matchCommand(c *radix.Tree, cmd string) []string { cmds := make([]string, 0) c.WalkPrefix(cmd, func(found string, v interface{}) bool { cmds = append(cmds, found) return false }) sort.Strings(cmds) return cmds } func allCommands(c *radix.Tree) []*Command { cmds := make([]*Command, 0) c.WalkPrefix("", func(found string, v interface{}) bool { cmd, ok := v.(*Command) if ok { cmds = append(cmds, cmd) } return false }) return cmds } func helpCallback(commands *radix.Tree, a []string, w StringWriter) (err error) { // Just typed help if len(a) == 0 { dumpCommands(commands, w) return nil } // We are printing a specific commands help text cmd, err := lookupCommand(commands, a[0]) if err != nil { //TODO: handle error //TODO: message the user return } if cmd != nil { err = w.WriteLine(fmt.Sprintf("%s - %s", cmd.Name, cmd.ShortDescription)) if err != nil { return err } if cmd.Help != "" { err = w.WriteLine(fmt.Sprintf(" %s", cmd.Help)) if err != nil { return err } } if cmd.Flags != nil { fs, _ := cmd.Flags() if fs != nil { fs.SetOutput(w.GetWriter()) fs.PrintDefaults() } } return nil } err = w.WriteLine("Command not available " + a[0]) if err != nil { return err } return nil } func checkHelpArgs(args []string) bool { for _, a := range args { if a == "-h" || a == "-help" { return true } } return false } nebula-1.6.1+dfsg/sshd/server.go000066400000000000000000000113451434072716400165160ustar00rootroot00000000000000package sshd import ( "errors" "fmt" "net" "sync" "github.com/armon/go-radix" "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" ) type SSHServer struct { config *ssh.ServerConfig l *logrus.Entry // Map of user -> authorized keys trustedKeys map[string]map[string]bool // List of available commands helpCommand *Command commands *radix.Tree listener net.Listener // Locks the conns/counter to avoid concurrent map access connsLock sync.Mutex conns map[int]*session counter int } // NewSSHServer creates a new ssh server rigged with default commands and prepares to listen func NewSSHServer(l *logrus.Entry) (*SSHServer, error) { s := &SSHServer{ trustedKeys: make(map[string]map[string]bool), l: l, commands: radix.New(), conns: make(map[int]*session), } s.config = &ssh.ServerConfig{ PublicKeyCallback: s.matchPubKey, //TODO: AuthLogCallback: s.authAttempt, //TODO: version string ServerVersion: fmt.Sprintf("SSH-2.0-Nebula???"), } s.RegisterCommand(&Command{ Name: "help", ShortDescription: "prints available commands or help for specific usage info", Callback: func(a interface{}, args []string, w StringWriter) error { return helpCallback(s.commands, args, w) }, }) return s, nil } func (s *SSHServer) SetHostKey(hostPrivateKey []byte) error { private, err := ssh.ParsePrivateKey(hostPrivateKey) if err != nil { return fmt.Errorf("failed to parse private key: %s", err) } s.config.AddHostKey(private) return nil } func (s *SSHServer) ClearAuthorizedKeys() { s.trustedKeys = make(map[string]map[string]bool) } // AddAuthorizedKey adds an ssh public key for a user func (s *SSHServer) AddAuthorizedKey(user, pubKey string) error { pk, _, _, _, err := ssh.ParseAuthorizedKey([]byte(pubKey)) if err != nil { return err } tk, ok := s.trustedKeys[user] if !ok { tk = make(map[string]bool) s.trustedKeys[user] = tk } tk[string(pk.Marshal())] = true s.l.WithField("sshKey", pubKey).WithField("sshUser", user).Info("Authorized ssh key") return nil } // RegisterCommand adds a command that can be run by a user, by default only `help` is available func (s *SSHServer) RegisterCommand(c *Command) { s.commands.Insert(c.Name, c) } // Run begins listening and accepting connections func (s *SSHServer) Run(addr string) error { var err error s.listener, err = net.Listen("tcp", addr) if err != nil { return err } s.l.WithField("sshListener", addr).Info("SSH server is listening") // Run loops until there is an error s.run() s.closeSessions() s.l.Info("SSH server stopped listening") // We don't return an error because run logs for us return nil } func (s *SSHServer) run() { for { c, err := s.listener.Accept() if err != nil { if !errors.Is(err, net.ErrClosed) { s.l.WithError(err).Warn("Error in listener, shutting down") } return } conn, chans, reqs, err := ssh.NewServerConn(c, s.config) fp := "" if conn != nil { fp = conn.Permissions.Extensions["fp"] } if err != nil { l := s.l.WithError(err).WithField("remoteAddress", c.RemoteAddr()) if conn != nil { l = l.WithField("sshUser", conn.User()) conn.Close() } if fp != "" { l = l.WithField("sshFingerprint", fp) } l.Warn("failed to handshake") continue } l := s.l.WithField("sshUser", conn.User()) l.WithField("remoteAddress", c.RemoteAddr()).WithField("sshFingerprint", fp).Info("ssh user logged in") session := NewSession(s.commands, conn, chans, l.WithField("subsystem", "sshd.session")) s.connsLock.Lock() s.counter++ counter := s.counter s.conns[counter] = session s.connsLock.Unlock() go ssh.DiscardRequests(reqs) go func() { <-session.exitChan s.l.WithField("id", counter).Debug("closing conn") s.connsLock.Lock() delete(s.conns, counter) s.connsLock.Unlock() }() } } func (s *SSHServer) Stop() { // Close the listener, this will cause all session to terminate as well, see SSHServer.Run if s.listener != nil { if err := s.listener.Close(); err != nil { s.l.WithError(err).Warn("Failed to close the sshd listener") } } } func (s *SSHServer) closeSessions() { s.connsLock.Lock() for _, c := range s.conns { c.Close() } s.connsLock.Unlock() } func (s *SSHServer) matchPubKey(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) { pk := string(pubKey.Marshal()) fp := ssh.FingerprintSHA256(pubKey) tk, ok := s.trustedKeys[c.User()] if !ok { return nil, fmt.Errorf("unknown user %s", c.User()) } _, ok = tk[pk] if !ok { return nil, fmt.Errorf("unknown public key for %s (%s)", c.User(), fp) } return &ssh.Permissions{ // Record the public key used for authentication. Extensions: map[string]string{ "fp": fp, "user": c.User(), }, }, nil } nebula-1.6.1+dfsg/sshd/session.go000066400000000000000000000100011434072716400166570ustar00rootroot00000000000000package sshd import ( "fmt" "sort" "strings" "github.com/anmitsu/go-shlex" "github.com/armon/go-radix" "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/terminal" ) type session struct { l *logrus.Entry c *ssh.ServerConn term *terminal.Terminal commands *radix.Tree exitChan chan bool } func NewSession(commands *radix.Tree, conn *ssh.ServerConn, chans <-chan ssh.NewChannel, l *logrus.Entry) *session { s := &session{ commands: radix.NewFromMap(commands.ToMap()), l: l, c: conn, exitChan: make(chan bool), } s.commands.Insert("logout", &Command{ Name: "logout", ShortDescription: "Ends the current session", Callback: func(a interface{}, args []string, w StringWriter) error { s.Close() return nil }, }) go s.handleChannels(chans) return s } func (s *session) handleChannels(chans <-chan ssh.NewChannel) { for newChannel := range chans { if newChannel.ChannelType() != "session" { s.l.WithField("sshChannelType", newChannel.ChannelType()).Error("unknown channel type") newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") continue } channel, requests, err := newChannel.Accept() if err != nil { s.l.WithError(err).Warn("could not accept channel") continue } go s.handleRequests(requests, channel) } } func (s *session) handleRequests(in <-chan *ssh.Request, channel ssh.Channel) { for req := range in { var err error //TODO: maybe support window sizing? switch req.Type { case "shell": if s.term == nil { s.term = s.createTerm(channel) err = req.Reply(true, nil) } else { err = req.Reply(false, nil) } case "pty-req": err = req.Reply(true, nil) case "window-change": err = req.Reply(true, nil) case "exec": var payload = struct{ Value string }{} cErr := ssh.Unmarshal(req.Payload, &payload) if cErr != nil { req.Reply(false, nil) return } req.Reply(true, nil) s.dispatchCommand(payload.Value, &stringWriter{channel}) //TODO: Fix error handling and report the proper status back status := struct{ Status uint32 }{uint32(0)} //TODO: I think this is how we shut down a shell as well? channel.SendRequest("exit-status", false, ssh.Marshal(status)) channel.Close() return default: s.l.WithField("sshRequest", req.Type).Debug("Rejected unknown request") err = req.Reply(false, nil) } if err != nil { s.l.WithError(err).Info("Error handling ssh session requests") s.Close() return } } } func (s *session) createTerm(channel ssh.Channel) *terminal.Terminal { //TODO: PS1 with nebula cert name term := terminal.NewTerminal(channel, s.c.User()+"@nebula > ") term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) { // key 9 is tab if key == 9 { cmds := matchCommand(s.commands, line) if len(cmds) == 1 { return cmds[0] + " ", len(cmds[0]) + 1, true } sort.Strings(cmds) term.Write([]byte(strings.Join(cmds, "\n") + "\n\n")) } return "", 0, false } go s.handleInput(channel) return term } func (s *session) handleInput(channel ssh.Channel) { defer s.Close() w := &stringWriter{w: s.term} for { line, err := s.term.ReadLine() if err != nil { //TODO: log break } s.dispatchCommand(line, w) } } func (s *session) dispatchCommand(line string, w StringWriter) { args, err := shlex.Split(line, true) if err != nil { //todo: LOG IT return } if len(args) == 0 { dumpCommands(s.commands, w) return } c, err := lookupCommand(s.commands, args[0]) if err != nil { //TODO: handle the error return } if c == nil { err := w.WriteLine(fmt.Sprintf("did not understand: %s", line)) //TODO: log error _ = err dumpCommands(s.commands, w) return } if checkHelpArgs(args) { s.dispatchCommand(fmt.Sprintf("%s %s", "help", c.Name), w) return } err = execCommand(c, args[1:], w) if err != nil { //TODO: log the error } return } func (s *session) Close() { s.c.Close() s.exitChan <- true } nebula-1.6.1+dfsg/sshd/writer.go000066400000000000000000000010141434072716400165140ustar00rootroot00000000000000package sshd import "io" type StringWriter interface { WriteLine(string) error Write(string) error WriteBytes([]byte) error GetWriter() io.Writer } type stringWriter struct { w io.Writer } func (w *stringWriter) WriteLine(s string) error { return w.Write(s + "\n") } func (w *stringWriter) Write(s string) error { _, err := w.w.Write([]byte(s)) return err } func (w *stringWriter) WriteBytes(b []byte) error { _, err := w.w.Write(b) return err } func (w *stringWriter) GetWriter() io.Writer { return w.w } nebula-1.6.1+dfsg/stats.go000066400000000000000000000070411434072716400154030ustar00rootroot00000000000000package nebula import ( "errors" "fmt" "log" "net" "net/http" "runtime" "time" graphite "github.com/cyberdelia/go-metrics-graphite" mp "github.com/nbrownus/go-metrics-prometheus" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rcrowley/go-metrics" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/config" ) // startStats initializes stats from config. On success, if any futher work // is needed to serve stats, it returns a func to handle that work. If no // work is needed, it'll return nil. On failure, it returns nil, error. func startStats(l *logrus.Logger, c *config.C, buildVersion string, configTest bool) (func(), error) { mType := c.GetString("stats.type", "") if mType == "" || mType == "none" { return nil, nil } interval := c.GetDuration("stats.interval", 0) if interval == 0 { return nil, fmt.Errorf("stats.interval was an invalid duration: %s", c.GetString("stats.interval", "")) } var startFn func() switch mType { case "graphite": err := startGraphiteStats(l, interval, c, configTest) if err != nil { return nil, err } case "prometheus": var err error startFn, err = startPrometheusStats(l, interval, c, buildVersion, configTest) if err != nil { return nil, err } default: return nil, fmt.Errorf("stats.type was not understood: %s", mType) } metrics.RegisterDebugGCStats(metrics.DefaultRegistry) metrics.RegisterRuntimeMemStats(metrics.DefaultRegistry) go metrics.CaptureDebugGCStats(metrics.DefaultRegistry, interval) go metrics.CaptureRuntimeMemStats(metrics.DefaultRegistry, interval) return startFn, nil } func startGraphiteStats(l *logrus.Logger, i time.Duration, c *config.C, configTest bool) error { proto := c.GetString("stats.protocol", "tcp") host := c.GetString("stats.host", "") if host == "" { return errors.New("stats.host can not be empty") } prefix := c.GetString("stats.prefix", "nebula") addr, err := net.ResolveTCPAddr(proto, host) if err != nil { return fmt.Errorf("error while setting up graphite sink: %s", err) } if !configTest { l.Infof("Starting graphite. Interval: %s, prefix: %s, addr: %s", i, prefix, addr) go graphite.Graphite(metrics.DefaultRegistry, i, prefix, addr) } return nil } func startPrometheusStats(l *logrus.Logger, i time.Duration, c *config.C, buildVersion string, configTest bool) (func(), error) { namespace := c.GetString("stats.namespace", "") subsystem := c.GetString("stats.subsystem", "") listen := c.GetString("stats.listen", "") if listen == "" { return nil, fmt.Errorf("stats.listen should not be empty") } path := c.GetString("stats.path", "") if path == "" { return nil, fmt.Errorf("stats.path should not be empty") } pr := prometheus.NewRegistry() pClient := mp.NewPrometheusProvider(metrics.DefaultRegistry, namespace, subsystem, pr, i) if !configTest { go pClient.UpdatePrometheusMetrics() } // Export our version information as labels on a static gauge g := prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "info", Help: "Version information for the Nebula binary", ConstLabels: prometheus.Labels{ "version": buildVersion, "goversion": runtime.Version(), }, }) pr.MustRegister(g) g.Set(1) var startFn func() if !configTest { startFn = func() { l.Infof("Prometheus stats listening on %s at %s", listen, path) http.Handle(path, promhttp.HandlerFor(pr, promhttp.HandlerOpts{ErrorLog: l})) log.Fatal(http.ListenAndServe(listen, nil)) } } return startFn, nil } nebula-1.6.1+dfsg/test/000077500000000000000000000000001434072716400146735ustar00rootroot00000000000000nebula-1.6.1+dfsg/test/assert.go000066400000000000000000000067431434072716400165350ustar00rootroot00000000000000package test import ( "fmt" "reflect" "testing" "time" "unsafe" "github.com/stretchr/testify/assert" ) // AssertDeepCopyEqual checks to see if two variables have the same values but DO NOT share any memory // There is currently a special case for `time.loc` (as this code traverses into unexported fields) func AssertDeepCopyEqual(t *testing.T, a interface{}, b interface{}) { v1 := reflect.ValueOf(a) v2 := reflect.ValueOf(b) if !assert.Equal(t, v1.Type(), v2.Type()) { return } traverseDeepCopy(t, v1, v2, v1.Type().String()) } func traverseDeepCopy(t *testing.T, v1 reflect.Value, v2 reflect.Value, name string) bool { switch v1.Kind() { case reflect.Array: for i := 0; i < v1.Len(); i++ { if !traverseDeepCopy(t, v1.Index(i), v2.Index(i), fmt.Sprintf("%s[%v]", name, i)) { return false } } return true case reflect.Slice: if v1.IsNil() || v2.IsNil() { return assert.Equal(t, v1.IsNil(), v2.IsNil(), "%s are not both nil %+v, %+v", name, v1, v2) } if !assert.Equal(t, v1.Len(), v2.Len(), "%s did not have the same length", name) { return false } // A slice with cap 0 if v1.Cap() != 0 && !assert.NotEqual(t, v1.Pointer(), v2.Pointer(), "%s point to the same slice %v == %v", name, v1.Pointer(), v2.Pointer()) { return false } v1c := v1.Cap() v2c := v2.Cap() if v1c > 0 && v2c > 0 && v1.Slice(0, v1c).Slice(v1c-1, v1c-1).Pointer() == v2.Slice(0, v2c).Slice(v2c-1, v2c-1).Pointer() { return assert.Fail(t, "", "%s share some underlying memory", name) } for i := 0; i < v1.Len(); i++ { if !traverseDeepCopy(t, v1.Index(i), v2.Index(i), fmt.Sprintf("%s[%v]", name, i)) { return false } } return true case reflect.Interface: if v1.IsNil() || v2.IsNil() { return assert.Equal(t, v1.IsNil(), v2.IsNil(), "%s are not both nil", name) } return traverseDeepCopy(t, v1.Elem(), v2.Elem(), name) case reflect.Ptr: local := reflect.ValueOf(time.Local).Pointer() if local == v1.Pointer() && local == v2.Pointer() { return true } if !assert.NotEqual(t, v1.Pointer(), v2.Pointer(), "%s points to the same memory", name) { return false } return traverseDeepCopy(t, v1.Elem(), v2.Elem(), name) case reflect.Struct: for i, n := 0, v1.NumField(); i < n; i++ { if !traverseDeepCopy(t, v1.Field(i), v2.Field(i), name+"."+v1.Type().Field(i).Name) { return false } } return true case reflect.Map: if v1.IsNil() || v2.IsNil() { return assert.Equal(t, v1.IsNil(), v2.IsNil(), "%s are not both nil", name) } if !assert.Equal(t, v1.Len(), v2.Len(), "%s are not the same length", name) { return false } if !assert.NotEqual(t, v1.Pointer(), v2.Pointer(), "%s point to the same memory", name) { return false } for _, k := range v1.MapKeys() { val1 := v1.MapIndex(k) val2 := v2.MapIndex(k) if !assert.True(t, val1.IsValid(), "%s is an invalid key in %s", k, name) { return false } if !assert.True(t, val2.IsValid(), "%s is an invalid key in %s", k, name) { return false } if !traverseDeepCopy(t, val1, val2, name+fmt.Sprintf("%s[%s]", name, k)) { return false } } return true default: if v1.CanInterface() && v2.CanInterface() { return assert.Equal(t, v1.Interface(), v2.Interface(), "%s was not equal", name) } e1 := reflect.NewAt(v1.Type(), unsafe.Pointer(v1.UnsafeAddr())).Elem().Interface() e2 := reflect.NewAt(v2.Type(), unsafe.Pointer(v2.UnsafeAddr())).Elem().Interface() return assert.Equal(t, e1, e2, "%s (unexported) was not equal", name) } } nebula-1.6.1+dfsg/test/logger.go000066400000000000000000000005651434072716400165070ustar00rootroot00000000000000package test import ( "io/ioutil" "os" "github.com/sirupsen/logrus" ) func NewLogger() *logrus.Logger { l := logrus.New() v := os.Getenv("TEST_LOGS") if v == "" { l.SetOutput(ioutil.Discard) return l } switch v { case "2": l.SetLevel(logrus.DebugLevel) case "3": l.SetLevel(logrus.TraceLevel) default: l.SetLevel(logrus.InfoLevel) } return l } nebula-1.6.1+dfsg/test/tun.go000066400000000000000000000011301434072716400160230ustar00rootroot00000000000000package test import ( "errors" "io" "net" "github.com/slackhq/nebula/iputil" ) type NoopTun struct{} func (NoopTun) RouteFor(iputil.VpnIp) iputil.VpnIp { return 0 } func (NoopTun) Activate() error { return nil } func (NoopTun) Cidr() *net.IPNet { return nil } func (NoopTun) Name() string { return "noop" } func (NoopTun) Read([]byte) (int, error) { return 0, nil } func (NoopTun) Write([]byte) (int, error) { return 0, nil } func (NoopTun) NewMultiQueueReader() (io.ReadWriteCloser, error) { return nil, errors.New("unsupported") } func (NoopTun) Close() error { return nil } nebula-1.6.1+dfsg/timeout.go000066400000000000000000000106011434072716400157270ustar00rootroot00000000000000package nebula import ( "time" "github.com/slackhq/nebula/firewall" ) // How many timer objects should be cached const timerCacheMax = 50000 var emptyFWPacket = firewall.Packet{} type TimerWheel struct { // Current tick current int // Cheat on finding the length of the wheel wheelLen int // Last time we ticked, since we are lazy ticking lastTick *time.Time // Durations of a tick and the entire wheel tickDuration time.Duration wheelDuration time.Duration // The actual wheel which is just a set of singly linked lists, head/tail pointers wheel []*TimeoutList // Singly linked list of items that have timed out of the wheel expired *TimeoutList // Item cache to avoid garbage collect itemCache *TimeoutItem itemsCached int } // Represents a tick in the wheel type TimeoutList struct { Head *TimeoutItem Tail *TimeoutItem } // Represents an item within a tick type TimeoutItem struct { Packet firewall.Packet Next *TimeoutItem } // Builds a timer wheel and identifies the tick duration and wheel duration from the provided values // Purge must be called once per entry to actually remove anything func NewTimerWheel(min, max time.Duration) *TimerWheel { //TODO provide an error //if min >= max { // return nil //} // Round down and add 1 so we can have the smallest # of ticks in the wheel and still account for a full // max duration wLen := int((max / min) + 1) tw := TimerWheel{ wheelLen: wLen, wheel: make([]*TimeoutList, wLen), tickDuration: min, wheelDuration: max, expired: &TimeoutList{}, } for i := range tw.wheel { tw.wheel[i] = &TimeoutList{} } return &tw } // Add will add a firewall.Packet to the wheel in it's proper timeout func (tw *TimerWheel) Add(v firewall.Packet, timeout time.Duration) *TimeoutItem { // Check and see if we should progress the tick tw.advance(time.Now()) i := tw.findWheel(timeout) // Try to fetch off the cache ti := tw.itemCache if ti != nil { tw.itemCache = ti.Next tw.itemsCached-- ti.Next = nil } else { ti = &TimeoutItem{} } // Relink and return ti.Packet = v if tw.wheel[i].Tail == nil { tw.wheel[i].Head = ti tw.wheel[i].Tail = ti } else { tw.wheel[i].Tail.Next = ti tw.wheel[i].Tail = ti } return ti } func (tw *TimerWheel) Purge() (firewall.Packet, bool) { if tw.expired.Head == nil { return emptyFWPacket, false } ti := tw.expired.Head tw.expired.Head = ti.Next if tw.expired.Head == nil { tw.expired.Tail = nil } // Clear out the items references ti.Next = nil // Maybe cache it for later if tw.itemsCached < timerCacheMax { ti.Next = tw.itemCache tw.itemCache = ti tw.itemsCached++ } return ti.Packet, true } // advance will move the wheel forward by proper number of ticks. The caller _should_ lock the wheel before calling this func (tw *TimerWheel) findWheel(timeout time.Duration) (i int) { if timeout < tw.tickDuration { // Can't track anything below the set resolution timeout = tw.tickDuration } else if timeout > tw.wheelDuration { // We aren't handling timeouts greater than the wheels duration timeout = tw.wheelDuration } // Find the next highest, rounding up tick := int(((timeout - 1) / tw.tickDuration) + 1) // Add another tick since the current tick may almost be over then map it to the wheel from our // current position tick += tw.current + 1 if tick >= tw.wheelLen { tick -= tw.wheelLen } return tick } // advance will lock and move the wheel forward by proper number of ticks. func (tw *TimerWheel) advance(now time.Time) { if tw.lastTick == nil { tw.lastTick = &now } // We want to round down ticks := int(now.Sub(*tw.lastTick) / tw.tickDuration) adv := ticks if ticks > tw.wheelLen { ticks = tw.wheelLen } for i := 0; i < ticks; i++ { tw.current++ if tw.current >= tw.wheelLen { tw.current = 0 } if tw.wheel[tw.current].Head != nil { // We need to append the expired items as to not starve evicting the oldest ones if tw.expired.Tail == nil { tw.expired.Head = tw.wheel[tw.current].Head tw.expired.Tail = tw.wheel[tw.current].Tail } else { tw.expired.Tail.Next = tw.wheel[tw.current].Head tw.expired.Tail = tw.wheel[tw.current].Tail } tw.wheel[tw.current].Head = nil tw.wheel[tw.current].Tail = nil } } // Advance the tick based on duration to avoid losing some accuracy newTick := tw.lastTick.Add(tw.tickDuration * time.Duration(adv)) tw.lastTick = &newTick } nebula-1.6.1+dfsg/timeout_system.go000066400000000000000000000103421434072716400173350ustar00rootroot00000000000000package nebula import ( "sync" "time" "github.com/slackhq/nebula/iputil" ) // How many timer objects should be cached const systemTimerCacheMax = 50000 type SystemTimerWheel struct { // Current tick current int // Cheat on finding the length of the wheel wheelLen int // Last time we ticked, since we are lazy ticking lastTick *time.Time // Durations of a tick and the entire wheel tickDuration time.Duration wheelDuration time.Duration // The actual wheel which is just a set of singly linked lists, head/tail pointers wheel []*SystemTimeoutList // Singly linked list of items that have timed out of the wheel expired *SystemTimeoutList // Item cache to avoid garbage collect itemCache *SystemTimeoutItem itemsCached int lock sync.Mutex } // Represents a tick in the wheel type SystemTimeoutList struct { Head *SystemTimeoutItem Tail *SystemTimeoutItem } // Represents an item within a tick type SystemTimeoutItem struct { Item iputil.VpnIp Next *SystemTimeoutItem } // Builds a timer wheel and identifies the tick duration and wheel duration from the provided values // Purge must be called once per entry to actually remove anything func NewSystemTimerWheel(min, max time.Duration) *SystemTimerWheel { //TODO provide an error //if min >= max { // return nil //} // Round down and add 1 so we can have the smallest # of ticks in the wheel and still account for a full // max duration wLen := int((max / min) + 1) tw := SystemTimerWheel{ wheelLen: wLen, wheel: make([]*SystemTimeoutList, wLen), tickDuration: min, wheelDuration: max, expired: &SystemTimeoutList{}, } for i := range tw.wheel { tw.wheel[i] = &SystemTimeoutList{} } return &tw } func (tw *SystemTimerWheel) Add(v iputil.VpnIp, timeout time.Duration) *SystemTimeoutItem { tw.lock.Lock() defer tw.lock.Unlock() // Check and see if we should progress the tick //tw.advance(time.Now()) i := tw.findWheel(timeout) // Try to fetch off the cache ti := tw.itemCache if ti != nil { tw.itemCache = ti.Next ti.Next = nil tw.itemsCached-- } else { ti = &SystemTimeoutItem{} } // Relink and return ti.Item = v ti.Next = tw.wheel[i].Head tw.wheel[i].Head = ti if tw.wheel[i].Tail == nil { tw.wheel[i].Tail = ti } return ti } func (tw *SystemTimerWheel) Purge() interface{} { tw.lock.Lock() defer tw.lock.Unlock() if tw.expired.Head == nil { return nil } ti := tw.expired.Head tw.expired.Head = ti.Next if tw.expired.Head == nil { tw.expired.Tail = nil } p := ti.Item // Clear out the items references ti.Item = 0 ti.Next = nil // Maybe cache it for later if tw.itemsCached < systemTimerCacheMax { ti.Next = tw.itemCache tw.itemCache = ti tw.itemsCached++ } return p } func (tw *SystemTimerWheel) findWheel(timeout time.Duration) (i int) { if timeout < tw.tickDuration { // Can't track anything below the set resolution timeout = tw.tickDuration } else if timeout > tw.wheelDuration { // We aren't handling timeouts greater than the wheels duration timeout = tw.wheelDuration } // Find the next highest, rounding up tick := int(((timeout - 1) / tw.tickDuration) + 1) // Add another tick since the current tick may almost be over then map it to the wheel from our // current position tick += tw.current + 1 if tick >= tw.wheelLen { tick -= tw.wheelLen } return tick } func (tw *SystemTimerWheel) advance(now time.Time) { tw.lock.Lock() defer tw.lock.Unlock() if tw.lastTick == nil { tw.lastTick = &now } // We want to round down ticks := int(now.Sub(*tw.lastTick) / tw.tickDuration) //l.Infoln("Ticks: ", ticks) for i := 0; i < ticks; i++ { tw.current++ //l.Infoln("Tick: ", tw.current) if tw.current >= tw.wheelLen { tw.current = 0 } // We need to append the expired items as to not starve evicting the oldest ones if tw.expired.Tail == nil { tw.expired.Head = tw.wheel[tw.current].Head tw.expired.Tail = tw.wheel[tw.current].Tail } else { tw.expired.Tail.Next = tw.wheel[tw.current].Head if tw.wheel[tw.current].Tail != nil { tw.expired.Tail = tw.wheel[tw.current].Tail } } //l.Infoln("Head: ", tw.expired.Head, "Tail: ", tw.expired.Tail) tw.wheel[tw.current].Head = nil tw.wheel[tw.current].Tail = nil tw.lastTick = &now } } nebula-1.6.1+dfsg/timeout_system_test.go000066400000000000000000000071111434072716400203740ustar00rootroot00000000000000package nebula import ( "net" "testing" "time" "github.com/slackhq/nebula/iputil" "github.com/stretchr/testify/assert" ) func TestNewSystemTimerWheel(t *testing.T) { // Make sure we get an object we expect tw := NewSystemTimerWheel(time.Second, time.Second*10) assert.Equal(t, 11, tw.wheelLen) assert.Equal(t, 0, tw.current) assert.Nil(t, tw.lastTick) assert.Equal(t, time.Second*1, tw.tickDuration) assert.Equal(t, time.Second*10, tw.wheelDuration) assert.Len(t, tw.wheel, 11) // Assert the math is correct tw = NewSystemTimerWheel(time.Second*3, time.Second*10) assert.Equal(t, 4, tw.wheelLen) tw = NewSystemTimerWheel(time.Second*120, time.Minute*10) assert.Equal(t, 6, tw.wheelLen) } func TestSystemTimerWheel_findWheel(t *testing.T) { tw := NewSystemTimerWheel(time.Second, time.Second*10) assert.Len(t, tw.wheel, 11) // Current + tick + 1 since we don't know how far into current we are assert.Equal(t, 2, tw.findWheel(time.Second*1)) // Scale up to min duration assert.Equal(t, 2, tw.findWheel(time.Millisecond*1)) // Make sure we hit that last index assert.Equal(t, 0, tw.findWheel(time.Second*10)) // Scale down to max duration assert.Equal(t, 0, tw.findWheel(time.Second*11)) tw.current = 1 // Make sure we account for the current position properly assert.Equal(t, 3, tw.findWheel(time.Second*1)) assert.Equal(t, 1, tw.findWheel(time.Second*10)) } func TestSystemTimerWheel_Add(t *testing.T) { tw := NewSystemTimerWheel(time.Second, time.Second*10) fp1 := iputil.Ip2VpnIp(net.ParseIP("1.2.3.4")) tw.Add(fp1, time.Second*1) // Make sure we set head and tail properly assert.NotNil(t, tw.wheel[2]) assert.Equal(t, fp1, tw.wheel[2].Head.Item) assert.Nil(t, tw.wheel[2].Head.Next) assert.Equal(t, fp1, tw.wheel[2].Tail.Item) assert.Nil(t, tw.wheel[2].Tail.Next) // Make sure we only modify head fp2 := iputil.Ip2VpnIp(net.ParseIP("1.2.3.4")) tw.Add(fp2, time.Second*1) assert.Equal(t, fp2, tw.wheel[2].Head.Item) assert.Equal(t, fp1, tw.wheel[2].Head.Next.Item) assert.Equal(t, fp1, tw.wheel[2].Tail.Item) assert.Nil(t, tw.wheel[2].Tail.Next) // Make sure we use free'd items first tw.itemCache = &SystemTimeoutItem{} tw.itemsCached = 1 tw.Add(fp2, time.Second*1) assert.Nil(t, tw.itemCache) assert.Equal(t, 0, tw.itemsCached) } func TestSystemTimerWheel_Purge(t *testing.T) { // First advance should set the lastTick and do nothing else tw := NewSystemTimerWheel(time.Second, time.Second*10) assert.Nil(t, tw.lastTick) tw.advance(time.Now()) assert.NotNil(t, tw.lastTick) assert.Equal(t, 0, tw.current) fps := []iputil.VpnIp{9, 10, 11, 12} //fp1 := ip2int(net.ParseIP("1.2.3.4")) tw.Add(fps[0], time.Second*1) tw.Add(fps[1], time.Second*1) tw.Add(fps[2], time.Second*2) tw.Add(fps[3], time.Second*2) ta := time.Now().Add(time.Second * 3) lastTick := *tw.lastTick tw.advance(ta) assert.Equal(t, 3, tw.current) assert.True(t, tw.lastTick.After(lastTick)) // Make sure we get all 4 packets back for i := 0; i < 4; i++ { assert.Contains(t, fps, tw.Purge()) } // Make sure there aren't any leftover assert.Nil(t, tw.Purge()) assert.Nil(t, tw.expired.Head) assert.Nil(t, tw.expired.Tail) // Make sure we cached the free'd items assert.Equal(t, 4, tw.itemsCached) ci := tw.itemCache for i := 0; i < 4; i++ { assert.NotNil(t, ci) ci = ci.Next } assert.Nil(t, ci) // Lets make sure we roll over properly ta = ta.Add(time.Second * 5) tw.advance(ta) assert.Equal(t, 8, tw.current) ta = ta.Add(time.Second * 2) tw.advance(ta) assert.Equal(t, 10, tw.current) ta = ta.Add(time.Second * 1) tw.advance(ta) assert.Equal(t, 0, tw.current) } nebula-1.6.1+dfsg/timeout_test.go000066400000000000000000000070411434072716400167720ustar00rootroot00000000000000package nebula import ( "testing" "time" "github.com/slackhq/nebula/firewall" "github.com/stretchr/testify/assert" ) func TestNewTimerWheel(t *testing.T) { // Make sure we get an object we expect tw := NewTimerWheel(time.Second, time.Second*10) assert.Equal(t, 11, tw.wheelLen) assert.Equal(t, 0, tw.current) assert.Nil(t, tw.lastTick) assert.Equal(t, time.Second*1, tw.tickDuration) assert.Equal(t, time.Second*10, tw.wheelDuration) assert.Len(t, tw.wheel, 11) // Assert the math is correct tw = NewTimerWheel(time.Second*3, time.Second*10) assert.Equal(t, 4, tw.wheelLen) tw = NewTimerWheel(time.Second*120, time.Minute*10) assert.Equal(t, 6, tw.wheelLen) } func TestTimerWheel_findWheel(t *testing.T) { tw := NewTimerWheel(time.Second, time.Second*10) assert.Len(t, tw.wheel, 11) // Current + tick + 1 since we don't know how far into current we are assert.Equal(t, 2, tw.findWheel(time.Second*1)) // Scale up to min duration assert.Equal(t, 2, tw.findWheel(time.Millisecond*1)) // Make sure we hit that last index assert.Equal(t, 0, tw.findWheel(time.Second*10)) // Scale down to max duration assert.Equal(t, 0, tw.findWheel(time.Second*11)) tw.current = 1 // Make sure we account for the current position properly assert.Equal(t, 3, tw.findWheel(time.Second*1)) assert.Equal(t, 1, tw.findWheel(time.Second*10)) } func TestTimerWheel_Add(t *testing.T) { tw := NewTimerWheel(time.Second, time.Second*10) fp1 := firewall.Packet{} tw.Add(fp1, time.Second*1) // Make sure we set head and tail properly assert.NotNil(t, tw.wheel[2]) assert.Equal(t, fp1, tw.wheel[2].Head.Packet) assert.Nil(t, tw.wheel[2].Head.Next) assert.Equal(t, fp1, tw.wheel[2].Tail.Packet) assert.Nil(t, tw.wheel[2].Tail.Next) // Make sure we only modify head fp2 := firewall.Packet{} tw.Add(fp2, time.Second*1) assert.Equal(t, fp2, tw.wheel[2].Head.Packet) assert.Equal(t, fp1, tw.wheel[2].Head.Next.Packet) assert.Equal(t, fp1, tw.wheel[2].Tail.Packet) assert.Nil(t, tw.wheel[2].Tail.Next) // Make sure we use free'd items first tw.itemCache = &TimeoutItem{} tw.itemsCached = 1 tw.Add(fp2, time.Second*1) assert.Nil(t, tw.itemCache) assert.Equal(t, 0, tw.itemsCached) } func TestTimerWheel_Purge(t *testing.T) { // First advance should set the lastTick and do nothing else tw := NewTimerWheel(time.Second, time.Second*10) assert.Nil(t, tw.lastTick) tw.advance(time.Now()) assert.NotNil(t, tw.lastTick) assert.Equal(t, 0, tw.current) fps := []firewall.Packet{ {LocalIP: 1}, {LocalIP: 2}, {LocalIP: 3}, {LocalIP: 4}, } tw.Add(fps[0], time.Second*1) tw.Add(fps[1], time.Second*1) tw.Add(fps[2], time.Second*2) tw.Add(fps[3], time.Second*2) ta := time.Now().Add(time.Second * 3) lastTick := *tw.lastTick tw.advance(ta) assert.Equal(t, 3, tw.current) assert.True(t, tw.lastTick.After(lastTick)) // Make sure we get all 4 packets back for i := 0; i < 4; i++ { p, has := tw.Purge() assert.True(t, has) assert.Equal(t, fps[i], p) } // Make sure there aren't any leftover _, ok := tw.Purge() assert.False(t, ok) assert.Nil(t, tw.expired.Head) assert.Nil(t, tw.expired.Tail) // Make sure we cached the free'd items assert.Equal(t, 4, tw.itemsCached) ci := tw.itemCache for i := 0; i < 4; i++ { assert.NotNil(t, ci) ci = ci.Next } assert.Nil(t, ci) // Lets make sure we roll over properly ta = ta.Add(time.Second * 5) tw.advance(ta) assert.Equal(t, 8, tw.current) ta = ta.Add(time.Second * 2) tw.advance(ta) assert.Equal(t, 10, tw.current) ta = ta.Add(time.Second * 1) tw.advance(ta) assert.Equal(t, 0, tw.current) } nebula-1.6.1+dfsg/udp/000077500000000000000000000000001434072716400145045ustar00rootroot00000000000000nebula-1.6.1+dfsg/udp/conn.go000066400000000000000000000005141434072716400157700ustar00rootroot00000000000000package udp import ( "github.com/slackhq/nebula/firewall" "github.com/slackhq/nebula/header" ) const MTU = 9001 type EncReader func( addr *Addr, via interface{}, out []byte, packet []byte, header *header.H, fwPacket *firewall.Packet, lhh LightHouseHandlerFunc, nb []byte, q int, localCache firewall.ConntrackCache, ) nebula-1.6.1+dfsg/udp/temp.go000066400000000000000000000010461434072716400160010ustar00rootroot00000000000000package udp import ( "github.com/slackhq/nebula/header" "github.com/slackhq/nebula/iputil" ) type EncWriter interface { SendVia(via interface{}, relay interface{}, ad, nb, out []byte, nocopy bool, ) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) Handshake(vpnIp iputil.VpnIp) } //TODO: The items in this file belong in their own packages but doing that in a single PR is a nightmare type LightHouseHandlerFunc func(rAddr *Addr, vpnIp iputil.VpnIp, p []byte, w EncWriter) nebula-1.6.1+dfsg/udp/udp_all.go000066400000000000000000000025501434072716400164550ustar00rootroot00000000000000package udp import ( "encoding/json" "fmt" "net" "strconv" ) type m map[string]interface{} type Addr struct { IP net.IP Port uint16 } func NewAddr(ip net.IP, port uint16) *Addr { addr := Addr{IP: make([]byte, net.IPv6len), Port: port} copy(addr.IP, ip.To16()) return &addr } func NewAddrFromString(s string) *Addr { ip, port, err := ParseIPAndPort(s) //TODO: handle err _ = err return &Addr{IP: ip.To16(), Port: port} } func (ua *Addr) Equals(t *Addr) bool { if t == nil || ua == nil { return t == nil && ua == nil } return ua.IP.Equal(t.IP) && ua.Port == t.Port } func (ua *Addr) String() string { if ua == nil { return "" } return net.JoinHostPort(ua.IP.String(), fmt.Sprintf("%v", ua.Port)) } func (ua *Addr) MarshalJSON() ([]byte, error) { if ua == nil { return nil, nil } return json.Marshal(m{"ip": ua.IP, "port": ua.Port}) } func (ua *Addr) Copy() *Addr { if ua == nil { return nil } nu := Addr{ Port: ua.Port, IP: make(net.IP, len(ua.IP)), } copy(nu.IP, ua.IP) return &nu } func ParseIPAndPort(s string) (net.IP, uint16, error) { rIp, sPort, err := net.SplitHostPort(s) if err != nil { return nil, 0, err } addr, err := net.ResolveIPAddr("ip", rIp) if err != nil { return nil, 0, err } iPort, err := strconv.Atoi(sPort) if err != nil { return nil, 0, err } return addr.IP, uint16(iPort), nil } nebula-1.6.1+dfsg/udp/udp_android.go000066400000000000000000000013121434072716400173200ustar00rootroot00000000000000//go:build !e2e_testing // +build !e2e_testing package udp import ( "fmt" "net" "syscall" "golang.org/x/sys/unix" ) func NewListenConfig(multi bool) net.ListenConfig { return net.ListenConfig{ Control: func(network, address string, c syscall.RawConn) error { if multi { var controlErr error err := c.Control(func(fd uintptr) { if err := syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1); err != nil { controlErr = fmt.Errorf("SO_REUSEPORT failed: %v", err) return } }) if err != nil { return err } if controlErr != nil { return controlErr } } return nil }, } } func (u *Conn) Rebind() error { return nil } nebula-1.6.1+dfsg/udp/udp_darwin.go000066400000000000000000000016441434072716400171740ustar00rootroot00000000000000//go:build !e2e_testing // +build !e2e_testing package udp // Darwin support is primarily implemented in udp_generic, besides NewListenConfig import ( "fmt" "net" "syscall" "golang.org/x/sys/unix" ) func NewListenConfig(multi bool) net.ListenConfig { return net.ListenConfig{ Control: func(network, address string, c syscall.RawConn) error { if multi { var controlErr error err := c.Control(func(fd uintptr) { if err := syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1); err != nil { controlErr = fmt.Errorf("SO_REUSEPORT failed: %v", err) return } }) if err != nil { return err } if controlErr != nil { return controlErr } } return nil }, } } func (u *Conn) Rebind() error { file, err := u.File() if err != nil { return err } return syscall.SetsockoptInt(int(file.Fd()), unix.IPPROTO_IPV6, unix.IPV6_BOUND_IF, 0) } nebula-1.6.1+dfsg/udp/udp_freebsd.go000066400000000000000000000014371434072716400173220ustar00rootroot00000000000000//go:build !e2e_testing // +build !e2e_testing package udp // FreeBSD support is primarily implemented in udp_generic, besides NewListenConfig import ( "fmt" "net" "syscall" "golang.org/x/sys/unix" ) func NewListenConfig(multi bool) net.ListenConfig { return net.ListenConfig{ Control: func(network, address string, c syscall.RawConn) error { if multi { var controlErr error err := c.Control(func(fd uintptr) { if err := syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1); err != nil { controlErr = fmt.Errorf("SO_REUSEPORT failed: %v", err) return } }) if err != nil { return err } if controlErr != nil { return controlErr } } return nil }, } } func (u *Conn) Rebind() error { return nil } nebula-1.6.1+dfsg/udp/udp_generic.go000066400000000000000000000041361434072716400173230ustar00rootroot00000000000000//go:build (!linux || android) && !e2e_testing // +build !linux android // +build !e2e_testing // udp_generic implements the nebula UDP interface in pure Go stdlib. This // means it can be used on platforms like Darwin and Windows. package udp import ( "context" "fmt" "net" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/firewall" "github.com/slackhq/nebula/header" ) type Conn struct { *net.UDPConn l *logrus.Logger } func NewListener(l *logrus.Logger, ip string, port int, multi bool, batch int) (*Conn, error) { lc := NewListenConfig(multi) pc, err := lc.ListenPacket(context.TODO(), "udp", fmt.Sprintf("%s:%d", ip, port)) if err != nil { return nil, err } if uc, ok := pc.(*net.UDPConn); ok { return &Conn{UDPConn: uc, l: l}, nil } return nil, fmt.Errorf("Unexpected PacketConn: %T %#v", pc, pc) } func (uc *Conn) WriteTo(b []byte, addr *Addr) error { _, err := uc.UDPConn.WriteToUDP(b, &net.UDPAddr{IP: addr.IP, Port: int(addr.Port)}) return err } func (uc *Conn) LocalAddr() (*Addr, error) { a := uc.UDPConn.LocalAddr() switch v := a.(type) { case *net.UDPAddr: addr := &Addr{IP: make([]byte, len(v.IP))} copy(addr.IP, v.IP) addr.Port = uint16(v.Port) return addr, nil default: return nil, fmt.Errorf("LocalAddr returned: %#v", a) } } func (u *Conn) ReloadConfig(c *config.C) { // TODO } func NewUDPStatsEmitter(udpConns []*Conn) func() { // No UDP stats for non-linux return func() {} } type rawMessage struct { Len uint32 } func (u *Conn) ListenOut(r EncReader, lhf LightHouseHandlerFunc, cache *firewall.ConntrackCacheTicker, q int) { plaintext := make([]byte, MTU) buffer := make([]byte, MTU) h := &header.H{} fwPacket := &firewall.Packet{} udpAddr := &Addr{IP: make([]byte, 16)} nb := make([]byte, 12, 12) for { // Just read one packet at a time n, rua, err := u.ReadFromUDP(buffer) if err != nil { u.l.WithError(err).Error("Failed to read packets") continue } udpAddr.IP = rua.IP udpAddr.Port = uint16(rua.Port) r(udpAddr, nil, plaintext[:0], buffer[:n], h, fwPacket, lhf, nb, q, cache.Get(u.l)) } } nebula-1.6.1+dfsg/udp/udp_linux.go000066400000000000000000000162701434072716400170500ustar00rootroot00000000000000//go:build !android && !e2e_testing // +build !android,!e2e_testing package udp import ( "encoding/binary" "fmt" "net" "syscall" "unsafe" "github.com/rcrowley/go-metrics" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/firewall" "github.com/slackhq/nebula/header" "golang.org/x/sys/unix" ) //TODO: make it support reload as best you can! type Conn struct { sysFd int l *logrus.Logger batch int } var x int // From linux/sock_diag.h const ( _SK_MEMINFO_RMEM_ALLOC = iota _SK_MEMINFO_RCVBUF _SK_MEMINFO_WMEM_ALLOC _SK_MEMINFO_SNDBUF _SK_MEMINFO_FWD_ALLOC _SK_MEMINFO_WMEM_QUEUED _SK_MEMINFO_OPTMEM _SK_MEMINFO_BACKLOG _SK_MEMINFO_DROPS _SK_MEMINFO_VARS ) type _SK_MEMINFO [_SK_MEMINFO_VARS]uint32 func NewListener(l *logrus.Logger, ip string, port int, multi bool, batch int) (*Conn, error) { syscall.ForkLock.RLock() fd, err := unix.Socket(unix.AF_INET6, unix.SOCK_DGRAM, unix.IPPROTO_UDP) if err == nil { unix.CloseOnExec(fd) } syscall.ForkLock.RUnlock() if err != nil { unix.Close(fd) return nil, fmt.Errorf("unable to open socket: %s", err) } var lip [16]byte copy(lip[:], net.ParseIP(ip)) if multi { if err = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_REUSEPORT, 1); err != nil { return nil, fmt.Errorf("unable to set SO_REUSEPORT: %s", err) } } //TODO: support multiple listening IPs (for limiting ipv6) if err = unix.Bind(fd, &unix.SockaddrInet6{Addr: lip, Port: port}); err != nil { return nil, fmt.Errorf("unable to bind to socket: %s", err) } //TODO: this may be useful for forcing threads into specific cores //unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_INCOMING_CPU, x) //v, err := unix.GetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_INCOMING_CPU) //l.Println(v, err) return &Conn{sysFd: fd, l: l, batch: batch}, err } func (u *Conn) Rebind() error { return nil } func (u *Conn) SetRecvBuffer(n int) error { return unix.SetsockoptInt(u.sysFd, unix.SOL_SOCKET, unix.SO_RCVBUFFORCE, n) } func (u *Conn) SetSendBuffer(n int) error { return unix.SetsockoptInt(u.sysFd, unix.SOL_SOCKET, unix.SO_SNDBUFFORCE, n) } func (u *Conn) GetRecvBuffer() (int, error) { return unix.GetsockoptInt(int(u.sysFd), unix.SOL_SOCKET, unix.SO_RCVBUF) } func (u *Conn) GetSendBuffer() (int, error) { return unix.GetsockoptInt(int(u.sysFd), unix.SOL_SOCKET, unix.SO_SNDBUF) } func (u *Conn) LocalAddr() (*Addr, error) { sa, err := unix.Getsockname(u.sysFd) if err != nil { return nil, err } addr := &Addr{} switch sa := sa.(type) { case *unix.SockaddrInet4: addr.IP = net.IP{sa.Addr[0], sa.Addr[1], sa.Addr[2], sa.Addr[3]}.To16() addr.Port = uint16(sa.Port) case *unix.SockaddrInet6: addr.IP = sa.Addr[0:] addr.Port = uint16(sa.Port) } return addr, nil } func (u *Conn) ListenOut(r EncReader, lhf LightHouseHandlerFunc, cache *firewall.ConntrackCacheTicker, q int) { plaintext := make([]byte, MTU) h := &header.H{} fwPacket := &firewall.Packet{} udpAddr := &Addr{} nb := make([]byte, 12, 12) //TODO: should we track this? //metric := metrics.GetOrRegisterHistogram("test.batch_read", nil, metrics.NewExpDecaySample(1028, 0.015)) msgs, buffers, names := u.PrepareRawMessages(u.batch) read := u.ReadMulti if u.batch == 1 { read = u.ReadSingle } for { n, err := read(msgs) if err != nil { u.l.WithError(err).Error("Failed to read packets") continue } //metric.Update(int64(n)) for i := 0; i < n; i++ { udpAddr.IP = names[i][8:24] udpAddr.Port = binary.BigEndian.Uint16(names[i][2:4]) r(udpAddr, nil, plaintext[:0], buffers[i][:msgs[i].Len], h, fwPacket, lhf, nb, q, cache.Get(u.l)) } } } func (u *Conn) ReadSingle(msgs []rawMessage) (int, error) { for { n, _, err := unix.Syscall6( unix.SYS_RECVMSG, uintptr(u.sysFd), uintptr(unsafe.Pointer(&(msgs[0].Hdr))), 0, 0, 0, 0, ) if err != 0 { return 0, &net.OpError{Op: "recvmsg", Err: err} } msgs[0].Len = uint32(n) return 1, nil } } func (u *Conn) ReadMulti(msgs []rawMessage) (int, error) { for { n, _, err := unix.Syscall6( unix.SYS_RECVMMSG, uintptr(u.sysFd), uintptr(unsafe.Pointer(&msgs[0])), uintptr(len(msgs)), unix.MSG_WAITFORONE, 0, 0, ) if err != 0 { return 0, &net.OpError{Op: "recvmmsg", Err: err} } return int(n), nil } } func (u *Conn) WriteTo(b []byte, addr *Addr) error { var rsa unix.RawSockaddrInet6 rsa.Family = unix.AF_INET6 p := (*[2]byte)(unsafe.Pointer(&rsa.Port)) p[0] = byte(addr.Port >> 8) p[1] = byte(addr.Port) copy(rsa.Addr[:], addr.IP) for { _, _, err := unix.Syscall6( unix.SYS_SENDTO, uintptr(u.sysFd), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(0), uintptr(unsafe.Pointer(&rsa)), uintptr(unix.SizeofSockaddrInet6), ) if err != 0 { return &net.OpError{Op: "sendto", Err: err} } //TODO: handle incomplete writes return nil } } func (u *Conn) ReloadConfig(c *config.C) { b := c.GetInt("listen.read_buffer", 0) if b > 0 { err := u.SetRecvBuffer(b) if err == nil { s, err := u.GetRecvBuffer() if err == nil { u.l.WithField("size", s).Info("listen.read_buffer was set") } else { u.l.WithError(err).Warn("Failed to get listen.read_buffer") } } else { u.l.WithError(err).Error("Failed to set listen.read_buffer") } } b = c.GetInt("listen.write_buffer", 0) if b > 0 { err := u.SetSendBuffer(b) if err == nil { s, err := u.GetSendBuffer() if err == nil { u.l.WithField("size", s).Info("listen.write_buffer was set") } else { u.l.WithError(err).Warn("Failed to get listen.write_buffer") } } else { u.l.WithError(err).Error("Failed to set listen.write_buffer") } } } func (u *Conn) getMemInfo(meminfo *_SK_MEMINFO) error { var vallen uint32 = 4 * _SK_MEMINFO_VARS _, _, err := unix.Syscall6(unix.SYS_GETSOCKOPT, uintptr(u.sysFd), uintptr(unix.SOL_SOCKET), uintptr(unix.SO_MEMINFO), uintptr(unsafe.Pointer(meminfo)), uintptr(unsafe.Pointer(&vallen)), 0) if err != 0 { return err } return nil } func NewUDPStatsEmitter(udpConns []*Conn) func() { // Check if our kernel supports SO_MEMINFO before registering the gauges var udpGauges [][_SK_MEMINFO_VARS]metrics.Gauge var meminfo _SK_MEMINFO if err := udpConns[0].getMemInfo(&meminfo); err == nil { udpGauges = make([][_SK_MEMINFO_VARS]metrics.Gauge, len(udpConns)) for i := range udpConns { udpGauges[i] = [_SK_MEMINFO_VARS]metrics.Gauge{ metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.rmem_alloc", i), nil), metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.rcvbuf", i), nil), metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.wmem_alloc", i), nil), metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.sndbuf", i), nil), metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.fwd_alloc", i), nil), metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.wmem_queued", i), nil), metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.optmem", i), nil), metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.backlog", i), nil), metrics.GetOrRegisterGauge(fmt.Sprintf("udp.%d.drops", i), nil), } } } return func() { for i, gauges := range udpGauges { if err := udpConns[i].getMemInfo(&meminfo); err == nil { for j := 0; j < _SK_MEMINFO_VARS; j++ { gauges[j].Update(int64(meminfo[j])) } } } } } nebula-1.6.1+dfsg/udp/udp_linux_32.go000066400000000000000000000020731434072716400173500ustar00rootroot00000000000000//go:build linux && (386 || amd64p32 || arm || mips || mipsle) && !android && !e2e_testing // +build linux // +build 386 amd64p32 arm mips mipsle // +build !android // +build !e2e_testing package udp import ( "golang.org/x/sys/unix" ) type iovec struct { Base *byte Len uint32 } type msghdr struct { Name *byte Namelen uint32 Iov *iovec Iovlen uint32 Control *byte Controllen uint32 Flags int32 } type rawMessage struct { Hdr msghdr Len uint32 } func (u *Conn) PrepareRawMessages(n int) ([]rawMessage, [][]byte, [][]byte) { msgs := make([]rawMessage, n) buffers := make([][]byte, n) names := make([][]byte, n) for i := range msgs { buffers[i] = make([]byte, MTU) names[i] = make([]byte, unix.SizeofSockaddrInet6) //TODO: this is still silly, no need for an array vs := []iovec{ {Base: &buffers[i][0], Len: uint32(len(buffers[i]))}, } msgs[i].Hdr.Iov = &vs[0] msgs[i].Hdr.Iovlen = uint32(len(vs)) msgs[i].Hdr.Name = &names[i][0] msgs[i].Hdr.Namelen = uint32(len(names[i])) } return msgs, buffers, names } nebula-1.6.1+dfsg/udp/udp_linux_64.go000066400000000000000000000022621434072716400173550ustar00rootroot00000000000000//go:build linux && (amd64 || arm64 || ppc64 || ppc64le || mips64 || mips64le || s390x || riscv64) && !android && !e2e_testing // +build linux // +build amd64 arm64 ppc64 ppc64le mips64 mips64le s390x riscv64 // +build !android // +build !e2e_testing package udp import ( "golang.org/x/sys/unix" ) type iovec struct { Base *byte Len uint64 } type msghdr struct { Name *byte Namelen uint32 Pad0 [4]byte Iov *iovec Iovlen uint64 Control *byte Controllen uint64 Flags int32 Pad1 [4]byte } type rawMessage struct { Hdr msghdr Len uint32 Pad0 [4]byte } func (u *Conn) PrepareRawMessages(n int) ([]rawMessage, [][]byte, [][]byte) { msgs := make([]rawMessage, n) buffers := make([][]byte, n) names := make([][]byte, n) for i := range msgs { buffers[i] = make([]byte, MTU) names[i] = make([]byte, unix.SizeofSockaddrInet6) //TODO: this is still silly, no need for an array vs := []iovec{ {Base: &buffers[i][0], Len: uint64(len(buffers[i]))}, } msgs[i].Hdr.Iov = &vs[0] msgs[i].Hdr.Iovlen = uint64(len(vs)) msgs[i].Hdr.Name = &names[i][0] msgs[i].Hdr.Namelen = uint32(len(names[i])) } return msgs, buffers, names } nebula-1.6.1+dfsg/udp/udp_tester.go000066400000000000000000000065161434072716400172210ustar00rootroot00000000000000//go:build e2e_testing // +build e2e_testing package udp import ( "fmt" "net" "github.com/sirupsen/logrus" "github.com/slackhq/nebula/config" "github.com/slackhq/nebula/firewall" "github.com/slackhq/nebula/header" ) type Packet struct { ToIp net.IP ToPort uint16 FromIp net.IP FromPort uint16 Data []byte } func (u *Packet) Copy() *Packet { n := &Packet{ ToIp: make(net.IP, len(u.ToIp)), ToPort: u.ToPort, FromIp: make(net.IP, len(u.FromIp)), FromPort: u.FromPort, Data: make([]byte, len(u.Data)), } copy(n.ToIp, u.ToIp) copy(n.FromIp, u.FromIp) copy(n.Data, u.Data) return n } type Conn struct { Addr *Addr RxPackets chan *Packet // Packets to receive into nebula TxPackets chan *Packet // Packets transmitted outside by nebula l *logrus.Logger } func NewListener(l *logrus.Logger, ip string, port int, _ bool, _ int) (*Conn, error) { return &Conn{ Addr: &Addr{net.ParseIP(ip), uint16(port)}, RxPackets: make(chan *Packet, 10), TxPackets: make(chan *Packet, 10), l: l, }, nil } // Send will place a UdpPacket onto the receive queue for nebula to consume // this is an encrypted packet or a handshake message in most cases // packets were transmitted from another nebula node, you can send them with Tun.Send func (u *Conn) Send(packet *Packet) { h := &header.H{} if err := h.Parse(packet.Data); err != nil { panic(err) } if u.l.Level >= logrus.InfoLevel { u.l.WithField("header", h). WithField("udpAddr", fmt.Sprintf("%v:%v", packet.FromIp, packet.FromPort)). WithField("dataLen", len(packet.Data)). Info("UDP receiving injected packet") } u.RxPackets <- packet } // Get will pull a UdpPacket from the transmit queue // nebula meant to send this message on the network, it will be encrypted // packets were ingested from the tun side (in most cases), you can send them with Tun.Send func (u *Conn) Get(block bool) *Packet { if block { return <-u.TxPackets } select { case p := <-u.TxPackets: return p default: return nil } } //********************************************************************************************************************// // Below this is boilerplate implementation to make nebula actually work //********************************************************************************************************************// func (u *Conn) WriteTo(b []byte, addr *Addr) error { p := &Packet{ Data: make([]byte, len(b), len(b)), FromIp: make([]byte, 16), FromPort: u.Addr.Port, ToIp: make([]byte, 16), ToPort: addr.Port, } copy(p.Data, b) copy(p.ToIp, addr.IP.To16()) copy(p.FromIp, u.Addr.IP.To16()) u.TxPackets <- p return nil } func (u *Conn) ListenOut(r EncReader, lhf LightHouseHandlerFunc, cache *firewall.ConntrackCacheTicker, q int) { plaintext := make([]byte, MTU) h := &header.H{} fwPacket := &firewall.Packet{} ua := &Addr{IP: make([]byte, 16)} nb := make([]byte, 12, 12) for { p, ok := <-u.RxPackets if !ok { return } ua.Port = p.FromPort copy(ua.IP, p.FromIp.To16()) r(ua, nil, plaintext[:0], p.Data, h, fwPacket, lhf, nb, q, cache.Get(u.l)) } } func (u *Conn) ReloadConfig(*config.C) {} func NewUDPStatsEmitter(_ []*Conn) func() { // No UDP stats for non-linux return func() {} } func (u *Conn) LocalAddr() (*Addr, error) { return u.Addr, nil } func (u *Conn) Rebind() error { return nil } nebula-1.6.1+dfsg/udp/udp_windows.go000066400000000000000000000012371434072716400174000ustar00rootroot00000000000000//go:build !e2e_testing // +build !e2e_testing package udp // Windows support is primarily implemented in udp_generic, besides NewListenConfig import ( "fmt" "net" "syscall" ) func NewListenConfig(multi bool) net.ListenConfig { return net.ListenConfig{ Control: func(network, address string, c syscall.RawConn) error { if multi { // There is no way to support multiple listeners safely on Windows: // https://docs.microsoft.com/en-us/windows/desktop/winsock/using-so-reuseaddr-and-so-exclusiveaddruse return fmt.Errorf("multiple udp listeners not supported on windows") } return nil }, } } func (u *Conn) Rebind() error { return nil } nebula-1.6.1+dfsg/util/000077500000000000000000000000001434072716400146715ustar00rootroot00000000000000nebula-1.6.1+dfsg/util/error.go000066400000000000000000000014511434072716400163520ustar00rootroot00000000000000package util import ( "errors" "github.com/sirupsen/logrus" ) type ContextualError struct { RealError error Fields map[string]interface{} Context string } func NewContextualError(msg string, fields map[string]interface{}, realError error) ContextualError { return ContextualError{Context: msg, Fields: fields, RealError: realError} } func (ce ContextualError) Error() string { if ce.RealError == nil { return ce.Context } return ce.RealError.Error() } func (ce ContextualError) Unwrap() error { if ce.RealError == nil { return errors.New(ce.Context) } return ce.RealError } func (ce *ContextualError) Log(lr *logrus.Logger) { if ce.RealError != nil { lr.WithFields(ce.Fields).WithError(ce.RealError).Error(ce.Context) } else { lr.WithFields(ce.Fields).Error(ce.Context) } } nebula-1.6.1+dfsg/util/error_test.go000066400000000000000000000031711434072716400174120ustar00rootroot00000000000000package util import ( "errors" "testing" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" ) type m map[string]interface{} type TestLogWriter struct { Logs []string } func NewTestLogWriter() *TestLogWriter { return &TestLogWriter{Logs: make([]string, 0)} } func (tl *TestLogWriter) Write(p []byte) (n int, err error) { tl.Logs = append(tl.Logs, string(p)) return len(p), nil } func (tl *TestLogWriter) Reset() { tl.Logs = tl.Logs[:0] } func TestContextualError_Log(t *testing.T) { l := logrus.New() l.Formatter = &logrus.TextFormatter{ DisableTimestamp: true, DisableColors: true, } tl := NewTestLogWriter() l.Out = tl // Test a full context line tl.Reset() e := NewContextualError("test message", m{"field": "1"}, errors.New("error")) e.Log(l) assert.Equal(t, []string{"level=error msg=\"test message\" error=error field=1\n"}, tl.Logs) // Test a line with an error and msg but no fields tl.Reset() e = NewContextualError("test message", nil, errors.New("error")) e.Log(l) assert.Equal(t, []string{"level=error msg=\"test message\" error=error\n"}, tl.Logs) // Test just a context and fields tl.Reset() e = NewContextualError("test message", m{"field": "1"}, nil) e.Log(l) assert.Equal(t, []string{"level=error msg=\"test message\" field=1\n"}, tl.Logs) // Test just a context tl.Reset() e = NewContextualError("test message", nil, nil) e.Log(l) assert.Equal(t, []string{"level=error msg=\"test message\"\n"}, tl.Logs) // Test just an error tl.Reset() e = NewContextualError("", nil, errors.New("error")) e.Log(l) assert.Equal(t, []string{"level=error error=error\n"}, tl.Logs) } nebula-1.6.1+dfsg/wintun/000077500000000000000000000000001434072716400152405ustar00rootroot00000000000000nebula-1.6.1+dfsg/wintun/device.go000066400000000000000000000015061434072716400170300ustar00rootroot00000000000000//go:build windows // +build windows /* SPDX-License-Identifier: MIT * * Copyright (C) 2017-2021 WireGuard LLC. All Rights Reserved. */ //NOTE: this file was forked from https://git.zx2c4.com/wireguard-go/tree/tun/tun.go?id=851efb1bb65555e0f765a3361c8eb5ac47435b19 package wintun import ( "os" ) type Device interface { File() *os.File // returns the file descriptor of the device Read([]byte, int) (int, error) // read a packet from the device (without any additional headers) Write([]byte, int) (int, error) // writes a packet to the device (without any additional headers) Flush() error // flush all previous writes to the device Name() (string, error) // fetches and returns the current name Close() error // stops the device and closes the event channel } nebula-1.6.1+dfsg/wintun/tun.go000066400000000000000000000126651434072716400164070ustar00rootroot00000000000000//go:build windows // +build windows /* SPDX-License-Identifier: MIT * * Copyright (C) 2018-2021 WireGuard LLC. All Rights Reserved. */ //NOTE: This file was forked from https://git.zx2c4.com/wireguard-go/tree/tun/tun_windows.go?id=851efb1bb65555e0f765a3361c8eb5ac47435b19 // Mainly to shed functionality we won't be using and to fix names that display in the system package wintun import ( "errors" "fmt" "os" "sync" "sync/atomic" "time" _ "unsafe" "golang.org/x/sys/windows" "golang.zx2c4.com/wintun" ) const ( rateMeasurementGranularity = uint64((time.Second / 2) / time.Nanosecond) spinloopRateThreshold = 800000000 / 8 // 800mbps spinloopDuration = uint64(time.Millisecond / 80 / time.Nanosecond) // ~1gbit/s ) type rateJuggler struct { current uint64 nextByteCount uint64 nextStartTime int64 changing int32 } type NativeTun struct { wt *wintun.Adapter name string handle windows.Handle rate rateJuggler session wintun.Session readWait windows.Handle running sync.WaitGroup closeOnce sync.Once close int32 } var WintunTunnelType = "Nebula" var WintunStaticRequestedGUID *windows.GUID //go:linkname procyield runtime.procyield func procyield(cycles uint32) //go:linkname nanotime runtime.nanotime func nanotime() int64 // // CreateTUN creates a Wintun interface with the given name. Should a Wintun // interface with the same name exist, it is reused. // func CreateTUN(ifname string, mtu int) (Device, error) { return CreateTUNWithRequestedGUID(ifname, WintunStaticRequestedGUID, mtu) } // // CreateTUNWithRequestedGUID creates a Wintun interface with the given name and // a requested GUID. Should a Wintun interface with the same name exist, it is reused. // func CreateTUNWithRequestedGUID(ifname string, requestedGUID *windows.GUID, mtu int) (Device, error) { wt, err := wintun.CreateAdapter(ifname, WintunTunnelType, requestedGUID) if err != nil { return nil, fmt.Errorf("Error creating interface: %w", err) } tun := &NativeTun{ wt: wt, name: ifname, handle: windows.InvalidHandle, } tun.session, err = wt.StartSession(0x800000) // Ring capacity, 8 MiB if err != nil { tun.wt.Close() return nil, fmt.Errorf("Error starting session: %w", err) } tun.readWait = tun.session.ReadWaitEvent() return tun, nil } func (tun *NativeTun) Name() (string, error) { return tun.name, nil } func (tun *NativeTun) File() *os.File { return nil } func (tun *NativeTun) Close() error { var err error tun.closeOnce.Do(func() { atomic.StoreInt32(&tun.close, 1) windows.SetEvent(tun.readWait) tun.running.Wait() tun.session.End() if tun.wt != nil { tun.wt.Close() } }) return err } // Note: Read() and Write() assume the caller comes only from a single thread; there's no locking. func (tun *NativeTun) Read(buff []byte, offset int) (int, error) { tun.running.Add(1) defer tun.running.Done() retry: if atomic.LoadInt32(&tun.close) == 1 { return 0, os.ErrClosed } start := nanotime() shouldSpin := atomic.LoadUint64(&tun.rate.current) >= spinloopRateThreshold && uint64(start-atomic.LoadInt64(&tun.rate.nextStartTime)) <= rateMeasurementGranularity*2 for { if atomic.LoadInt32(&tun.close) == 1 { return 0, os.ErrClosed } packet, err := tun.session.ReceivePacket() switch err { case nil: packetSize := len(packet) copy(buff[offset:], packet) tun.session.ReleaseReceivePacket(packet) tun.rate.update(uint64(packetSize)) return packetSize, nil case windows.ERROR_NO_MORE_ITEMS: if !shouldSpin || uint64(nanotime()-start) >= spinloopDuration { windows.WaitForSingleObject(tun.readWait, windows.INFINITE) goto retry } procyield(1) continue case windows.ERROR_HANDLE_EOF: return 0, os.ErrClosed case windows.ERROR_INVALID_DATA: return 0, errors.New("Send ring corrupt") } return 0, fmt.Errorf("Read failed: %w", err) } } func (tun *NativeTun) Flush() error { return nil } func (tun *NativeTun) Write(buff []byte, offset int) (int, error) { tun.running.Add(1) defer tun.running.Done() if atomic.LoadInt32(&tun.close) == 1 { return 0, os.ErrClosed } packetSize := len(buff) - offset tun.rate.update(uint64(packetSize)) packet, err := tun.session.AllocateSendPacket(packetSize) if err == nil { copy(packet, buff[offset:]) tun.session.SendPacket(packet) return packetSize, nil } switch err { case windows.ERROR_HANDLE_EOF: return 0, os.ErrClosed case windows.ERROR_BUFFER_OVERFLOW: return 0, nil // Dropping when ring is full. } return 0, fmt.Errorf("Write failed: %w", err) } // LUID returns Windows interface instance ID. func (tun *NativeTun) LUID() uint64 { tun.running.Add(1) defer tun.running.Done() if atomic.LoadInt32(&tun.close) == 1 { return 0 } return tun.wt.LUID() } // RunningVersion returns the running version of the Wintun driver. func (tun *NativeTun) RunningVersion() (version uint32, err error) { return wintun.RunningVersion() } func (rate *rateJuggler) update(packetLen uint64) { now := nanotime() total := atomic.AddUint64(&rate.nextByteCount, packetLen) period := uint64(now - atomic.LoadInt64(&rate.nextStartTime)) if period >= rateMeasurementGranularity { if !atomic.CompareAndSwapInt32(&rate.changing, 0, 1) { return } atomic.StoreInt64(&rate.nextStartTime, now) atomic.StoreUint64(&rate.current, total*uint64(time.Second/time.Nanosecond)/period) atomic.StoreUint64(&rate.nextByteCount, 0) atomic.StoreInt32(&rate.changing, 0) } }