golang-github-lucas-clemente-quic-go-0.38.2/ 0000775 0000000 0000000 00000000000 14545452366 0020612 5 ustar 00root root 0000000 0000000 golang-github-lucas-clemente-quic-go-0.38.2/.circleci/ 0000775 0000000 0000000 00000000000 14545452366 0022445 5 ustar 00root root 0000000 0000000 golang-github-lucas-clemente-quic-go-0.38.2/.circleci/config.yml 0000664 0000000 0000000 00000002644 14545452366 0024443 0 ustar 00root root 0000000 0000000 version: 2.1
executors:
test-go120:
docker:
- image: "cimg/go:1.20"
environment:
runrace: true
TIMESCALE_FACTOR: 3
jobs:
"test": &test
executor: test-go120
steps:
- checkout
- run:
name: "Build infos"
command: go version
- run:
name: "Run tools tests"
command: go run github.com/onsi/ginkgo/v2/ginkgo -race -r -v -randomize-all -trace integrationtests/tools
- run:
name: "Run self integration tests"
command: go run github.com/onsi/ginkgo/v2/ginkgo -v -randomize-all -trace integrationtests/self
- run:
name: "Run version negotiation tests"
command: go run github.com/onsi/ginkgo/v2/ginkgo -v -randomize-all -trace integrationtests/versionnegotiation
- run:
name: "Run self integration tests with race detector"
command: go run github.com/onsi/ginkgo/v2/ginkgo -race -v -randomize-all -trace integrationtests/self
- run:
name: "Run self integration tests with qlog"
command: go run github.com/onsi/ginkgo/v2/ginkgo -v -randomize-all -trace integrationtests/self -- -qlog
- run:
name: "Run version negotiation tests with qlog"
command: go run github.com/onsi/ginkgo/v2/ginkgo -v -randomize-all -trace integrationtests/versionnegotiation -- -qlog
go120:
<<: *test
workflows:
workflow:
jobs:
- go120
golang-github-lucas-clemente-quic-go-0.38.2/.githooks/ 0000775 0000000 0000000 00000000000 14545452366 0022517 5 ustar 00root root 0000000 0000000 golang-github-lucas-clemente-quic-go-0.38.2/.githooks/README.md 0000664 0000000 0000000 00000000231 14545452366 0023772 0 ustar 00root root 0000000 0000000 # Git Hooks
This directory contains useful Git hooks for working with quic-go.
Install them by running
```bash
git config core.hooksPath .githooks
```
golang-github-lucas-clemente-quic-go-0.38.2/.githooks/pre-commit 0000775 0000000 0000000 00000001622 14545452366 0024522 0 ustar 00root root 0000000 0000000 #!/bin/bash
# Check that test files don't contain focussed test cases.
errored=false
for f in $(git diff --diff-filter=d --cached --name-only); do
if [[ $f != *_test.go ]]; then continue; fi
output=$(git show :"$f" | grep -n -e "FIt(" -e "FContext(" -e "FDescribe(")
if [ $? -eq 0 ]; then
echo "$f contains a focussed test:"
echo "$output"
echo ""
errored=true
fi
done
pushd ./integrationtests/gomodvendor > /dev/null
go mod tidy
if [[ -n $(git diff --diff-filter=d --name-only -- "go.mod" "go.sum") ]]; then
echo "go.mod / go.sum in integrationtests/gomodvendor not tidied"
errored=true
fi
popd > /dev/null
# Check that all Go files are properly gofumpt-ed.
output=$(gofumpt -d $(git diff --diff-filter=d --cached --name-only -- '*.go'))
if [ -n "$output" ]; then
echo "Found files that are not properly gofumpt-ed."
echo "$output"
errored=true
fi
if [ "$errored" = true ]; then
exit 1
fi
golang-github-lucas-clemente-quic-go-0.38.2/.github/ 0000775 0000000 0000000 00000000000 14545452366 0022152 5 ustar 00root root 0000000 0000000 golang-github-lucas-clemente-quic-go-0.38.2/.github/workflows/ 0000775 0000000 0000000 00000000000 14545452366 0024207 5 ustar 00root root 0000000 0000000 golang-github-lucas-clemente-quic-go-0.38.2/.github/workflows/build-interop-docker.yml 0000664 0000000 0000000 00000002746 14545452366 0030765 0 ustar 00root root 0000000 0000000 name: Build interop Docker image
on:
push:
branches:
- master
tags:
- 'v*'
jobs:
interop:
runs-on: ${{ fromJSON(vars['DOCKER_RUNNER_UBUNTU'] || '"ubuntu-latest"') }}
steps:
- uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
platforms: linux/amd64,linux/arm64
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: set tag name
id: tag
# Tagged releases won't be picked up by the interop runner automatically,
# but they can be useful when debugging regressions.
run: |
if [[ $GITHUB_REF == refs/tags/* ]]; then
echo "tag=${GITHUB_REF#refs/tags/}" | tee -a $GITHUB_OUTPUT;
echo "gitref=${GITHUB_REF#refs/tags/}" | tee -a $GITHUB_OUTPUT;
else
echo 'tag=latest' | tee -a $GITHUB_OUTPUT;
echo 'gitref=${{ github.sha }}' | tee -a $GITHUB_OUTPUT;
fi
- uses: docker/build-push-action@v4
with:
context: "{{defaultContext}}:interop"
platforms: linux/amd64,linux/arm64
push: true
build-args: |
GITREF=${{ steps.tag.outputs.gitref }}
tags: martenseemann/quic-go-interop:${{ steps.tag.outputs.tag }}
golang-github-lucas-clemente-quic-go-0.38.2/.github/workflows/cross-compile.sh 0000775 0000000 0000000 00000001502 14545452366 0027323 0 ustar 00root root 0000000 0000000 #!/bin/bash
set -e
dist="$1"
goos=$(echo "$dist" | cut -d "/" -f1)
goarch=$(echo "$dist" | cut -d "/" -f2)
# cross-compiling for android is a pain...
if [[ "$goos" == "android" ]]; then exit; fi
# iOS builds require Cgo, see https://github.com/golang/go/issues/43343
# Cgo would then need a C cross compilation setup. Not worth the hassle.
if [[ "$goos" == "ios" ]]; then exit; fi
# Write all log output to a temporary file instead of to stdout.
# That allows running this script in parallel, while preserving the correct order of the output.
log_file=$(mktemp)
error_handler() {
cat "$log_file" >&2
rm "$log_file"
exit 1
}
trap 'error_handler' ERR
echo "$dist" >> "$log_file"
out="main-$goos-$goarch"
GOOS=$goos GOARCH=$goarch go build -o $out example/main.go >> "$log_file" 2>&1
rm $out
cat "$log_file"
rm "$log_file"
golang-github-lucas-clemente-quic-go-0.38.2/.github/workflows/cross-compile.yml 0000664 0000000 0000000 00000001447 14545452366 0027517 0 ustar 00root root 0000000 0000000 on: [push, pull_request]
jobs:
crosscompile:
strategy:
fail-fast: false
matrix:
go: [ "1.20.x", "1.21.x" ]
runs-on: ${{ fromJSON(vars['CROSS_COMPILE_RUNNER_UBUNTU'] || '"ubuntu-latest"') }}
name: "Cross Compilation (Go ${{matrix.go}})"
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go }}
- name: Install build utils
run: |
sudo apt-get update
sudo apt-get install -y gcc-multilib
- name: Install dependencies
run: go build example/main.go
- name: Run cross compilation
# run in parallel on as many cores as are available on the machine
run: go tool dist list | xargs -I % -P "$(nproc)" .github/workflows/cross-compile.sh %
golang-github-lucas-clemente-quic-go-0.38.2/.github/workflows/go-generate.sh 0000775 0000000 0000000 00000001110 14545452366 0026734 0 ustar 00root root 0000000 0000000 #!/usr/bin/env bash
set -e
DIR=$(pwd)
TMP=$(mktemp -d)
cd "$TMP"
cp -r "$DIR" orig
cp -r "$DIR" generated
cd generated
# delete all go-generated files generated (that adhere to the comment convention)
grep --include \*.go -lrIZ "^// Code generated .* DO NOT EDIT\.$" . | xargs --null rm
# First regenerate sys_conn_buffers_write.go.
# If it doesn't exist, the following mockgen calls will fail.
go generate -run "sys_conn_buffers_write.go"
# now generate everything
go generate ./...
cd ..
# don't compare fuzzing corpora
diff --exclude=corpus --exclude=.git -ruN orig generated
golang-github-lucas-clemente-quic-go-0.38.2/.github/workflows/go-generate.yml 0000664 0000000 0000000 00000000515 14545452366 0027130 0 ustar 00root root 0000000 0000000 on: [push, pull_request]
jobs:
gogenerate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: "1.20.x"
- name: Install dependencies
run: go build
- name: Run code generators
run: .github/workflows/go-generate.sh
golang-github-lucas-clemente-quic-go-0.38.2/.github/workflows/integration.yml 0000664 0000000 0000000 00000005721 14545452366 0027262 0 ustar 00root root 0000000 0000000 on: [push, pull_request]
jobs:
integration:
strategy:
fail-fast: false
matrix:
os: [ "ubuntu" ]
go: [ "1.20.x", "1.21.x" ]
include:
- os: "windows"
go: "1.21.x"
- os: "macos"
go: "1.21.x"
runs-on: ${{ fromJSON(vars[format('INTEGRATION_RUNNER_{0}', matrix.os)] || format('"{0}-latest"', matrix.os)) }}
env:
DEBUG: false # set this to true to export qlogs and save them as artifacts
TIMESCALE_FACTOR: 3
name: Integration Tests (${{ matrix.os }}, Go ${{ matrix.go }})
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
stable: '!contains(${{ matrix.go }}, "beta") && !contains(${{ matrix.go }}, "rc")'
go-version: ${{ matrix.go }}
- run: go version
- name: set qlogger
if: env.DEBUG == 'true'
run: echo "QLOGFLAG= -qlog" >> $GITHUB_ENV
- name: Run other tests
run: |
go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace -skip-package self,versionnegotiation integrationtests
go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace integrationtests/versionnegotiation -- ${{ env.QLOGFLAG }}
- name: Run self tests, using QUIC v1
if: success() || failure() # run this step even if the previous one failed
run: go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace integrationtests/self -- -version=1 ${{ env.QLOGFLAG }}
- name: Run self tests, using QUIC v2
if: success() || failure() # run this step even if the previous one failed
run: go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace integrationtests/self -- -version=2 ${{ env.QLOGFLAG }}
- name: Run set tests, with GSO enabled
if: success() || failure() # run this step even if the previous one failed
env:
QUIC_GO_ENABLE_GSO: true
run: go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace integrationtests/self -- -version=1 ${{ env.QLOGFLAG }}
- name: Run tests (32 bit)
if: ${{ matrix.os != 'macos' && (success() || failure()) }} # run this step even if the previous one failed
env:
GOARCH: 386
run: |
go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace -skip-package self,versionnegotiation integrationtests
go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace integrationtests/versionnegotiation -- ${{ env.QLOGFLAG }}
go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace integrationtests/self -- ${{ env.QLOGFLAG }}
- name: save qlogs
if: ${{ always() && env.DEBUG == 'true' }}
uses: actions/upload-artifact@v2
with:
name: qlogs
path: integrationtests/self/*.qlog
golang-github-lucas-clemente-quic-go-0.38.2/.github/workflows/lint.yml 0000664 0000000 0000000 00000004371 14545452366 0025705 0 ustar 00root root 0000000 0000000 on: [push, pull_request]
jobs:
check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
skip-pkg-cache: true
go-version: "1.20.x"
- name: Check that no non-test files import Ginkgo or Gomega
run: .github/workflows/no_ginkgo.sh
- name: Check that go.mod is tidied
run: |
cp go.mod go.mod.orig
cp go.sum go.sum.orig
go mod tidy
diff go.mod go.mod.orig
diff go.sum go.sum.orig
- name: Check that go mod vendor works
run: |
cd integrationtests/gomodvendor
go mod vendor
golangci-lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: "1.20.x"
- name: golangci-lint (Linux)
uses: golangci/golangci-lint-action@v3
with:
skip-pkg-cache: true
args: --timeout=3m
version: v1.52.2
- name: golangci-lint (Windows)
if: success() || failure() # run this step even if the previous one failed
uses: golangci/golangci-lint-action@v3
env:
GOOS: "windows"
with:
skip-pkg-cache: true
args: --timeout=3m
version: v1.52.2
- name: golangci-lint (OSX)
if: success() || failure() # run this step even if the previous one failed
uses: golangci/golangci-lint-action@v3
env:
GOOS: "darwin"
with:
skip-pkg-cache: true
args: --timeout=3m
version: v1.52.2
- name: golangci-lint (FreeBSD)
if: success() || failure() # run this step even if the previous one failed
uses: golangci/golangci-lint-action@v3
env:
GOOS: "freebsd"
with:
skip-pkg-cache: true
args: --timeout=3m
version: v1.52.2
- name: golangci-lint (others)
if: success() || failure() # run this step even if the previous one failed
uses: golangci/golangci-lint-action@v3
env:
GOOS: "solaris" # some OS that we don't have any build tags for
with:
skip-pkg-cache: true
args: --timeout=3m
version: v1.52.2
golang-github-lucas-clemente-quic-go-0.38.2/.github/workflows/no_ginkgo.sh 0000775 0000000 0000000 00000000726 14545452366 0026525 0 ustar 00root root 0000000 0000000 #!/usr/bin/env bash
# Verify that no non-test files import Ginkgo or Gomega.
set -e
HAS_TESTING=false
cd ..
for f in $(find . -name "*.go" ! -name "*_test.go" ! -name "tools.go"); do
if grep -q "github.com/onsi/ginkgo" $f; then
echo "$f imports github.com/onsi/ginkgo/v2"
HAS_TESTING=true
fi
if grep -q "github.com/onsi/gomega" $f; then
echo "$f imports github.com/onsi/gomega"
HAS_TESTING=true
fi
done
if "$HAS_TESTING"; then
exit 1
fi
exit 0
golang-github-lucas-clemente-quic-go-0.38.2/.github/workflows/unit.yml 0000664 0000000 0000000 00000004000 14545452366 0025703 0 ustar 00root root 0000000 0000000 on: [push, pull_request]
jobs:
unit:
strategy:
fail-fast: false
matrix:
os: [ "ubuntu", "windows", "macos" ]
go: [ "1.20.x", "1.21.x" ]
runs-on: ${{ fromJSON(vars[format('UNIT_RUNNER_{0}', matrix.os)] || format('"{0}-latest"', matrix.os)) }}
name: Unit tests (${{ matrix.os}}, Go ${{ matrix.go }})
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go }}
- run: go version
- name: Run tests
env:
TIMESCALE_FACTOR: 10
run: go run github.com/onsi/ginkgo/v2/ginkgo -r -v -cover -randomize-all -randomize-suites -trace -skip-package integrationtests
- name: Run tests as root
if: ${{ matrix.os == 'ubuntu' }}
env:
TIMESCALE_FACTOR: 10
FILE: sys_conn_helper_linux_test.go
run: |
test -f $FILE # make sure the file actually exists
go run github.com/onsi/ginkgo/v2/ginkgo build -cover -tags root .
sudo ./quic-go.test -ginkgo.v -ginkgo.trace -ginkgo.randomize-all -ginkgo.focus-file=$FILE -test.coverprofile coverage-root.txt
rm quic-go.test
- name: Run tests (32 bit)
if: ${{ matrix.os != 'macos' }} # can't run 32 bit tests on OSX.
env:
TIMESCALE_FACTOR: 10
GOARCH: 386
run: go run github.com/onsi/ginkgo/v2/ginkgo -r -v -cover -coverprofile coverage.txt -output-dir . -randomize-all -randomize-suites -trace -skip-package integrationtests
- name: Run tests with race detector
if: ${{ matrix.os == 'ubuntu' }} # speed things up. Windows and OSX VMs are slow
env:
TIMESCALE_FACTOR: 20
run: go run github.com/onsi/ginkgo/v2/ginkgo -r -v -race -randomize-all -randomize-suites -trace -skip-package integrationtests
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
files: coverage.txt,coverage-root.txt
env_vars: OS=${{ matrix.os }}, GO=${{ matrix.go }}
golang-github-lucas-clemente-quic-go-0.38.2/.gitignore 0000664 0000000 0000000 00000000322 14545452366 0022577 0 ustar 00root root 0000000 0000000 debug
debug.test
main
mockgen_tmp.go
*.qtr
*.qlog
*.txt
race.[0-9]*
fuzzing/*/*.zip
fuzzing/*/coverprofile
fuzzing/*/crashers
fuzzing/*/sonarprofile
fuzzing/*/suppressions
fuzzing/*/corpus/
gomock_reflect_*/
golang-github-lucas-clemente-quic-go-0.38.2/.golangci.yml 0000664 0000000 0000000 00000002000 14545452366 0023166 0 ustar 00root root 0000000 0000000 run:
skip-files:
- internal/handshake/cipher_suite.go
linters-settings:
depguard:
type: blacklist
packages:
- github.com/marten-seemann/qtls
- github.com/quic-go/qtls-go1-19
- github.com/quic-go/qtls-go1-20
packages-with-error-message:
- github.com/marten-seemann/qtls: "importing qtls only allowed in internal/qtls"
- github.com/quic-go/qtls-go1-19: "importing qtls only allowed in internal/qtls"
- github.com/quic-go/qtls-go1-20: "importing qtls only allowed in internal/qtls"
misspell:
ignore-words:
- ect
linters:
disable-all: true
enable:
- asciicheck
- depguard
- exhaustive
- exportloopref
- goimports
- gofmt # redundant, since gofmt *should* be a no-op after gofumpt
- gofumpt
- gosimple
- ineffassign
- misspell
- prealloc
- staticcheck
- stylecheck
- unconvert
- unparam
- unused
- vet
issues:
exclude-rules:
- path: internal/qtls
linters:
- depguard
golang-github-lucas-clemente-quic-go-0.38.2/Changelog.md 0000664 0000000 0000000 00000010613 14545452366 0023024 0 ustar 00root root 0000000 0000000 # Changelog
## v0.22.0 (2021-07-25)
- Use `ReadBatch` to read multiple UDP packets from the socket with a single syscall
- Add a config option (`Config.DisableVersionNegotiationPackets`) to disable sending of Version Negotiation packets
- Drop support for QUIC draft versions 32 and 34
- Remove the `RetireBugBackwardsCompatibilityMode`, which was intended to mitigate a bug when retiring connection IDs in quic-go in v0.17.2 and ealier
## v0.21.2 (2021-07-15)
- Update qtls (for Go 1.15, 1.16 and 1.17rc1) to include the fix for the crypto/tls panic (see https://groups.google.com/g/golang-dev/c/5LJ2V7rd-Ag/m/YGLHVBZ6AAAJ for details)
## v0.21.0 (2021-06-01)
- quic-go now supports RFC 9000!
## v0.20.0 (2021-03-19)
- Remove the `quic.Config.HandshakeTimeout`. Introduce a `quic.Config.HandshakeIdleTimeout`.
## v0.17.1 (2020-06-20)
- Supports QUIC WG draft-29.
- Improve bundling of ACK frames (#2543).
## v0.16.0 (2020-05-31)
- Supports QUIC WG draft-28.
## v0.15.0 (2020-03-01)
- Supports QUIC WG draft-27.
- Add support for 0-RTT.
- Remove `Session.Close()`. Applications need to pass an application error code to the transport using `Session.CloseWithError()`.
- Make the TLS Cipher Suites configurable (via `tls.Config.CipherSuites`).
## v0.14.0 (2019-12-04)
- Supports QUIC WG draft-24.
## v0.13.0 (2019-11-05)
- Supports QUIC WG draft-23.
- Add an `EarlyListener` that allows sending of 0.5-RTT data.
- Add a `TokenStore` to store address validation tokens.
- Issue and use new connection IDs during a connection.
## v0.12.0 (2019-08-05)
- Implement HTTP/3.
- Rename `quic.Cookie` to `quic.Token` and `quic.Config.AcceptCookie` to `quic.Config.AcceptToken`.
- Distinguish between Retry tokens and tokens sent in NEW_TOKEN frames.
- Enforce application protocol negotiation (via `tls.Config.NextProtos`).
- Use a varint for error codes.
- Add support for [quic-trace](https://github.com/google/quic-trace).
- Add a context to `Listener.Accept`, `Session.Accept{Uni}Stream` and `Session.Open{Uni}StreamSync`.
- Implement TLS key updates.
## v0.11.0 (2019-04-05)
- Drop support for gQUIC. For qQUIC support, please switch to the *gquic* branch.
- Implement QUIC WG draft-19.
- Use [qtls](https://github.com/marten-seemann/qtls) for TLS 1.3.
- Return a `tls.ConnectionState` from `quic.Session.ConnectionState()`.
- Remove the error return values from `quic.Stream.CancelRead()` and `quic.Stream.CancelWrite()`
## v0.10.0 (2018-08-28)
- Add support for QUIC 44, drop support for QUIC 42.
## v0.9.0 (2018-08-15)
- Add a `quic.Config` option for the length of the connection ID (for IETF QUIC).
- Split Session.Close into one method for regular closing and one for closing with an error.
## v0.8.0 (2018-06-26)
- Add support for unidirectional streams (for IETF QUIC).
- Add a `quic.Config` option for the maximum number of incoming streams.
- Add support for QUIC 42 and 43.
- Add dial functions that use a context.
- Multiplex clients on a net.PacketConn, when using Dial(conn).
## v0.7.0 (2018-02-03)
- The lower boundary for packets included in ACKs is now derived, and the value sent in STOP_WAITING frames is ignored.
- Remove `DialNonFWSecure` and `DialAddrNonFWSecure`.
- Expose the `ConnectionState` in the `Session` (experimental API).
- Implement packet pacing.
## v0.6.0 (2017-12-12)
- Add support for QUIC 39, drop support for QUIC 35 - 37
- Added `quic.Config` options for maximal flow control windows
- Add a `quic.Config` option for QUIC versions
- Add a `quic.Config` option to request omission of the connection ID from a server
- Add a `quic.Config` option to configure the source address validation
- Add a `quic.Config` option to configure the handshake timeout
- Add a `quic.Config` option to configure the idle timeout
- Add a `quic.Config` option to configure keep-alive
- Rename the STK to Cookie
- Implement `net.Conn`-style deadlines for streams
- Remove the `tls.Config` from the `quic.Config`. The `tls.Config` must now be passed to the `Dial` and `Listen` functions as a separate parameter. See the [Godoc](https://godoc.org/github.com/quic-go/quic-go) for details.
- Changed the log level environment variable to only accept strings ("DEBUG", "INFO", "ERROR"), see [the wiki](https://github.com/quic-go/quic-go/wiki/Logging) for more details.
- Rename the `h2quic.QuicRoundTripper` to `h2quic.RoundTripper`
- Changed `h2quic.Server.Serve()` to accept a `net.PacketConn`
- Drop support for Go 1.7 and 1.8.
- Various bugfixes
golang-github-lucas-clemente-quic-go-0.38.2/LICENSE 0000664 0000000 0000000 00000002103 14545452366 0021613 0 ustar 00root root 0000000 0000000 MIT License
Copyright (c) 2016 the quic-go authors & Google, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
golang-github-lucas-clemente-quic-go-0.38.2/README.md 0000664 0000000 0000000 00000043217 14545452366 0022100 0 ustar 00root root 0000000 0000000 # A QUIC implementation in pure Go
[](https://pkg.go.dev/github.com/quic-go/quic-go)
[](https://codecov.io/gh/quic-go/quic-go/)
[](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:quic-go)
quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go. It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)).
In addition to these base RFCs, it also implements the following RFCs:
* Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221))
* Datagram Packetization Layer Path MTU Discovery (DPLPMTUD, [RFC 8899](https://datatracker.ietf.org/doc/html/rfc8899))
* QUIC Version 2 ([RFC 9369](https://datatracker.ietf.org/doc/html/rfc9369))
## Using QUIC
### Running a Server
The central entry point is the `quic.Transport`. A transport manages QUIC connections running on a single UDP socket. Since QUIC uses Connection IDs, it can demultiplex a listener (accepting incoming connections) and an arbitrary number of outgoing QUIC connections on the same UDP socket.
```go
udpConn, err := net.ListenUDP("udp4", &net.UDPAddr{Port: 1234})
// ... error handling
tr := quic.Transport{
Conn: udpConn,
}
ln, err := tr.Listen(tlsConf, quicConf)
// ... error handling
go func() {
for {
conn, err := ln.Accept()
// ... error handling
// handle the connection, usually in a new Go routine
}
}()
```
The listener `ln` can now be used to accept incoming QUIC connections by (repeatedly) calling the `Accept` method (see below for more information on the `quic.Connection`).
As a shortcut, `quic.Listen` and `quic.ListenAddr` can be used without explicitly initializing a `quic.Transport`:
```
ln, err := quic.Listen(udpConn, tlsConf, quicConf)
```
When using the shortcut, it's not possible to reuse the same UDP socket for outgoing connections.
### Running a Client
As mentioned above, multiple outgoing connections can share a single UDP socket, since QUIC uses Connection IDs to demultiplex connections.
```go
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) // 3s handshake timeout
defer cancel()
conn, err := tr.Dial(ctx, , , )
// ... error handling
```
As a shortcut, `quic.Dial` and `quic.DialAddr` can be used without explictly initializing a `quic.Transport`:
```go
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) // 3s handshake timeout
defer cancel()
conn, err := quic.Dial(ctx, conn, , , )
```
Just as we saw before when used a similar shortcut to run a server, it's also not possible to reuse the same UDP socket for other outgoing connections, or to listen for incoming connections.
### Using a QUIC Connection
#### Accepting Streams
QUIC is a stream-multiplexed transport. A `quic.Connection` fundamentally differs from the `net.Conn` and the `net.PacketConn` interface defined in the standard library. Data is sent and received on (unidirectional and bidirectional) streams (and, if supported, in [datagrams](#quic-datagrams)), not on the connection itself. The stream state machine is described in detail in [Section 3 of RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000#section-3).
Note: A unidirectional stream is a stream that the initiator can only write to (`quic.SendStream`), and the receiver can only read from (`quic.ReceiveStream`). A bidirectional stream (`quic.Stream`) allows reading from and writing to for both sides.
On the receiver side, streams are accepted using the `AcceptStream` (for bidirectional) and `AcceptUniStream` functions. For most user cases, it makes sense to call these functions in a loop:
```go
for {
str, err := conn.AcceptStream(context.Background()) // for bidirectional streams
// ... error handling
// handle the stream, usually in a new Go routine
}
```
These functions return an error when the underlying QUIC connection is closed.
#### Opening Streams
There are two slightly different ways to open streams, one synchronous and one (potentially) asynchronous. This API is necessary since the receiver grants us a certain number of streams that we're allowed to open. It may grant us additional streams later on (typically when existing streams are closed), but it means that at the time we want to open a new stream, we might not be able to do so.
Using the synchronous method `OpenStreamSync` for bidirectional streams, and `OpenUniStreamSync` for unidirectional streams, an application can block until the peer allows opening additional streams. In case that we're allowed to open a new stream, these methods return right away:
```go
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
str, err := conn.OpenStreamSync(ctx) // wait up to 5s to open a new bidirectional stream
```
The asynchronous version never blocks. If it's currently not possible to open a new stream, it returns a `net.Error` timeout error:
```go
str, err := conn.OpenStream()
if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
// It's currently not possible to open another stream,
// but it might be possible later, once the peer allowed us to do so.
}
```
These functions return an error when the underlying QUIC connection is closed.
#### Using Streams
Using QUIC streams is pretty straightforward. The `quic.ReceiveStream` implements the `io.Reader` interface, and the `quic.SendStream` implements the `io.Writer` interface. A bidirectional stream (`quic.Stream`) implements both these interfaces. Conceptually, a bidirectional stream can be thought of as the composition of two unidirectional streams in opposite directions.
Calling `Close` on a `quic.SendStream` or a `quic.Stream` closes the send side of the stream. On the receiver side, this will be surfaced as an `io.EOF` returned from the `io.Reader` once all data has been consumed. Note that for bidirectional streams, `Close` _only_ closes the send side of the stream. It is still possible to read from the stream until the peer closes or resets the stream.
In case the application wishes to abort sending on a `quic.SendStream` or a `quic.Stream` , it can reset the send side by calling `CancelWrite` with an application-defined error code (an unsigned 62-bit number). On the receiver side, this surfaced as a `quic.StreamError` containing that error code on the `io.Reader`. Note that for bidirectional streams, `CancelWrite` _only_ resets the send side of the stream. It is still possible to read from the stream until the peer closes or resets the stream.
Conversely, in case the application wishes to abort receiving from a `quic.ReceiveStream` or a `quic.Stream`, it can ask the sender to abort data transmission by calling `CancelRead` with an application-defined error code (an unsigned 62-bit number). On the receiver side, this surfaced as a `quic.StreamError` containing that error code on the `io.Writer`. Note that for bidirectional streams, `CancelWrite` _only_ resets the receive side of the stream. It is still possible to write to the stream.
A bidirectional stream is only closed once both the read and the write side of the stream have been either closed and reset. Only then the peer is granted a new stream according to the maximum number of concurrent streams configured via `quic.Config.MaxIncomingStreams`.
### Configuring QUIC
The `quic.Config` struct passed to both the listen and dial calls (see above) contains a wide range of configuration options for QUIC connections, incl. the ability to fine-tune flow control limits, the number of streams that the peer is allowed to open concurrently, keep-alives, idle timeouts, and many more. Please refer to the documentation for the `quic.Config` for details.
The `quic.Transport` contains a few configuration options that don't apply to any single QUIC connection, but to all connections handled by that transport. It is highly recommend to set the `StatelessResetToken`, which allows endpoints to quickly recover from crashes / reboots of our node (see [Section 10.3 of RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000#section-10.3)).
### Closing a Connection
#### When the remote Peer closes the Connection
In case the peer closes the QUIC connection, all calls to open streams, accept streams, as well as all methods on streams immediately return an error. Additionally, it is set as cancellation cause of the connection context. Users can use errors assertions to find out what exactly went wrong:
* `quic.VersionNegotiationError`: Happens during the handshake, if there is no overlap between our and the remote's supported QUIC versions.
* `quic.HandshakeTimeoutError`: Happens if the QUIC handshake doesn't complete within the time specified in `quic.Config.HandshakeTimeout`.
* `quic.IdleTimeoutError`: Happens after completion of the handshake if the connection is idle for longer than the minimum of both peers idle timeouts (as configured by `quic.Config.IdleTimeout`). The connection is considered idle when no stream data (and datagrams, if applicable) are exchanged for that period. The QUIC connection can be instructed to regularly send a packet to prevent a connection from going idle by setting `quic.Config.KeepAlive`. However, this is no guarantee that the peer doesn't suddenly go away (e.g. by abruptly shutting down the node or by crashing), or by a NAT binding expiring, in which case this error might still occur.
* `quic.StatelessResetError`: Happens when the remote peer lost the state required to decrypt the packet. This requires the `quic.Transport.StatelessResetToken` to be configured by the peer.
* `quic.TransportError`: Happens if when the QUIC protocol is violated. Unless the error code is `APPLICATION_ERROR`, this will not happen unless one of the QUIC stacks involved is misbehaving. Please open an issue if you encounter this error.
* `quic.ApplicationError`: Happens when the remote decides to close the connection, see below.
#### Initiated by the Application
A `quic.Connection` can be closed using `CloseWithError`:
```go
conn.CloseWithError(0x42, "error 0x42 occurred")
```
Applications can transmit both an error code (an unsigned 62-bit number) as well as a UTF-8 encoded human-readable reason. The error code allows the receiver to learn why the connection was closed, and the reason can be useful for debugging purposes.
On the receiver side, this is surfaced as a `quic.ApplicationError`.
### QUIC Datagrams
Unreliable datagrams are a QUIC extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221)) that is negotiated during the handshake. Support can be enabled by setting the `quic.Config.EnableDatagram` flag. Note that this doesn't guarantee that the peer also supports datagrams. Whether or not the feature negotiation succeeded can be learned from the `quic.ConnectionState.SupportsDatagrams` obtained from `quic.Connection.ConnectionState()`.
QUIC DATAGRAMs are a new QUIC frame type sent in QUIC 1-RTT packets (i.e. after completion of the handshake). Therefore, they're end-to-end encrypted and congestion-controlled. However, if a DATAGRAM frame is deemed lost by QUIC's loss detection mechanism, they are not automatically retransmitted.
Datagrams are sent using the `SendMessage` method on the `quic.Connection`:
```go
conn.SendMessage([]byte("foobar"))
```
And received using `ReceiveMessage`:
```go
msg, err := conn.ReceiveMessage()
```
Note that this code path is currently not optimized. It works for datagrams that are sent occasionally, but it doesn't achieve the same throughput as writing data on a stream. Please get in touch on issue #3766 if your use case relies on high datagram throughput, or if you'd like to help fix this issue. There are also some restrictions regarding the maximum message size (see #3599).
## Using HTTP/3
### As a server
See the [example server](example/main.go). Starting a QUIC server is very similar to the standard library http package in Go:
```go
http.Handle("/", http.FileServer(http.Dir(wwwDir)))
http3.ListenAndServeQUIC("localhost:4242", "/path/to/cert/chain.pem", "/path/to/privkey.pem", nil)
```
### As a client
See the [example client](example/client/main.go). Use a `http3.RoundTripper` as a `Transport` in a `http.Client`.
```go
http.Client{
Transport: &http3.RoundTripper{},
}
```
## Projects using quic-go
| Project | Description | Stars |
|-----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------|
| [AdGuardHome](https://github.com/AdguardTeam/AdGuardHome) | Free and open source, powerful network-wide ads & trackers blocking DNS server. |  |
| [algernon](https://github.com/xyproto/algernon) | Small self-contained pure-Go web server with Lua, Markdown, HTTP/2, QUIC, Redis and PostgreSQL support |  |
| [caddy](https://github.com/caddyserver/caddy/) | Fast, multi-platform web server with automatic HTTPS |  |
| [cloudflared](https://github.com/cloudflare/cloudflared) | A tunneling daemon that proxies traffic from the Cloudflare network to your origins |  |
| [go-libp2p](https://github.com/libp2p/go-libp2p) | libp2p implementation in Go, powering [Kubo](https://github.com/ipfs/kubo) (IPFS) and [Lotus](https://github.com/filecoin-project/lotus) (Filecoin), among others |  |
| [Mercure](https://github.com/dunglas/mercure) | An open, easy, fast, reliable and battery-efficient solution for real-time communications |  |
| [OONI Probe](https://github.com/ooni/probe-cli) | Next generation OONI Probe. Library and CLI tool. |  |
| [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization |  |
| [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy |  |
| [v2ray-core](https://github.com/v2fly/v2ray-core) | A platform for building proxies to bypass network restrictions |  |
| [YoMo](https://github.com/yomorun/yomo) | Streaming Serverless Framework for Geo-distributed System |  |
If you'd like to see your project added to this list, please send us a PR.
## Release Policy
quic-go always aims to support the latest two Go releases.
### Dependency on forked crypto/tls
Since the standard library didn't provide any QUIC APIs before the Go 1.21 release, we had to fork crypto/tls to add the required APIs ourselves: [qtls for Go 1.20](https://github.com/quic-go/qtls-go1-20).
This had led to a lot of pain in the Go ecosystem, and we're happy that we can rely on Go 1.21 going forward.
## Contributing
We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/quic-go/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment.
golang-github-lucas-clemente-quic-go-0.38.2/SECURITY.md 0000664 0000000 0000000 00000001507 14545452366 0022406 0 ustar 00root root 0000000 0000000 # Security Policy
quic-go still in development. This means that there may be problems in our protocols,
or there may be mistakes in our implementations.
We take security vulnerabilities very seriously. If you discover a security issue,
please bring it to our attention right away!
## Reporting a Vulnerability
If you find a vulnerability that may affect live deployments -- for example, by exposing
a remote execution exploit -- please [**report privately**](https://github.com/quic-go/quic-go/security/advisories/new).
Please **DO NOT file a public issue**.
If the issue is an implementation weakness that cannot be immediately exploited or
something not yet deployed, just discuss it openly.
## Reporting a non security bug
For non-security bugs, please simply file a GitHub [issue](https://github.com/quic-go/quic-go/issues/new).
golang-github-lucas-clemente-quic-go-0.38.2/buffer_pool.go 0000664 0000000 0000000 00000004324 14545452366 0023446 0 ustar 00root root 0000000 0000000 package quic
import (
"sync"
"github.com/quic-go/quic-go/internal/protocol"
)
type packetBuffer struct {
Data []byte
// refCount counts how many packets Data is used in.
// It doesn't support concurrent use.
// It is > 1 when used for coalesced packet.
refCount int
}
// Split increases the refCount.
// It must be called when a packet buffer is used for more than one packet,
// e.g. when splitting coalesced packets.
func (b *packetBuffer) Split() {
b.refCount++
}
// Decrement decrements the reference counter.
// It doesn't put the buffer back into the pool.
func (b *packetBuffer) Decrement() {
b.refCount--
if b.refCount < 0 {
panic("negative packetBuffer refCount")
}
}
// MaybeRelease puts the packet buffer back into the pool,
// if the reference counter already reached 0.
func (b *packetBuffer) MaybeRelease() {
// only put the packetBuffer back if it's not used any more
if b.refCount == 0 {
b.putBack()
}
}
// Release puts back the packet buffer into the pool.
// It should be called when processing is definitely finished.
func (b *packetBuffer) Release() {
b.Decrement()
if b.refCount != 0 {
panic("packetBuffer refCount not zero")
}
b.putBack()
}
// Len returns the length of Data
func (b *packetBuffer) Len() protocol.ByteCount { return protocol.ByteCount(len(b.Data)) }
func (b *packetBuffer) Cap() protocol.ByteCount { return protocol.ByteCount(cap(b.Data)) }
func (b *packetBuffer) putBack() {
if cap(b.Data) == protocol.MaxPacketBufferSize {
bufferPool.Put(b)
return
}
if cap(b.Data) == protocol.MaxLargePacketBufferSize {
largeBufferPool.Put(b)
return
}
panic("putPacketBuffer called with packet of wrong size!")
}
var bufferPool, largeBufferPool sync.Pool
func getPacketBuffer() *packetBuffer {
buf := bufferPool.Get().(*packetBuffer)
buf.refCount = 1
buf.Data = buf.Data[:0]
return buf
}
func getLargePacketBuffer() *packetBuffer {
buf := largeBufferPool.Get().(*packetBuffer)
buf.refCount = 1
buf.Data = buf.Data[:0]
return buf
}
func init() {
bufferPool.New = func() any {
return &packetBuffer{Data: make([]byte, 0, protocol.MaxPacketBufferSize)}
}
largeBufferPool.New = func() any {
return &packetBuffer{Data: make([]byte, 0, protocol.MaxLargePacketBufferSize)}
}
}
golang-github-lucas-clemente-quic-go-0.38.2/buffer_pool_test.go 0000664 0000000 0000000 00000002604 14545452366 0024504 0 ustar 00root root 0000000 0000000 package quic
import (
"github.com/quic-go/quic-go/internal/protocol"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Buffer Pool", func() {
It("returns buffers of cap", func() {
buf1 := getPacketBuffer()
Expect(buf1.Data).To(HaveCap(protocol.MaxPacketBufferSize))
buf2 := getLargePacketBuffer()
Expect(buf2.Data).To(HaveCap(protocol.MaxLargePacketBufferSize))
})
It("releases buffers", func() {
buf1 := getPacketBuffer()
buf1.Release()
buf2 := getLargePacketBuffer()
buf2.Release()
})
It("gets the length", func() {
buf := getPacketBuffer()
buf.Data = append(buf.Data, []byte("foobar")...)
Expect(buf.Len()).To(BeEquivalentTo(6))
})
It("panics if wrong-sized buffers are passed", func() {
buf := getPacketBuffer()
buf.Data = make([]byte, 10)
Expect(func() { buf.Release() }).To(Panic())
})
It("panics if it is released twice", func() {
buf := getPacketBuffer()
buf.Release()
Expect(func() { buf.Release() }).To(Panic())
})
It("panics if it is decremented too many times", func() {
buf := getPacketBuffer()
buf.Decrement()
Expect(func() { buf.Decrement() }).To(Panic())
})
It("waits until all parts have been released", func() {
buf := getPacketBuffer()
buf.Split()
buf.Split()
// now we have 3 parts
buf.Decrement()
buf.Decrement()
buf.Decrement()
Expect(func() { buf.Decrement() }).To(Panic())
})
})
golang-github-lucas-clemente-quic-go-0.38.2/client.go 0000664 0000000 0000000 00000015736 14545452366 0022433 0 ustar 00root root 0000000 0000000 package quic
import (
"context"
"crypto/tls"
"errors"
"net"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/logging"
)
type client struct {
sendConn sendConn
use0RTT bool
packetHandlers packetHandlerManager
onClose func()
tlsConf *tls.Config
config *Config
connIDGenerator ConnectionIDGenerator
srcConnID protocol.ConnectionID
destConnID protocol.ConnectionID
initialPacketNumber protocol.PacketNumber
hasNegotiatedVersion bool
version protocol.VersionNumber
handshakeChan chan struct{}
conn quicConn
tracer logging.ConnectionTracer
tracingID uint64
logger utils.Logger
}
// make it possible to mock connection ID for initial generation in the tests
var generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial
// DialAddr establishes a new QUIC connection to a server.
// It resolves the address, and then creates a new UDP connection to dial the QUIC server.
// When the QUIC connection is closed, this UDP connection is closed.
// See Dial for more details.
func DialAddr(ctx context.Context, addr string, tlsConf *tls.Config, conf *Config) (Connection, error) {
udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
if err != nil {
return nil, err
}
udpAddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return nil, err
}
tr, err := setupTransport(udpConn, tlsConf, true)
if err != nil {
return nil, err
}
return tr.dial(ctx, udpAddr, addr, tlsConf, conf, false)
}
// DialAddrEarly establishes a new 0-RTT QUIC connection to a server.
// See DialAddr for more details.
func DialAddrEarly(ctx context.Context, addr string, tlsConf *tls.Config, conf *Config) (EarlyConnection, error) {
udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
if err != nil {
return nil, err
}
udpAddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return nil, err
}
tr, err := setupTransport(udpConn, tlsConf, true)
if err != nil {
return nil, err
}
conn, err := tr.dial(ctx, udpAddr, addr, tlsConf, conf, true)
if err != nil {
tr.Close()
return nil, err
}
return conn, nil
}
// DialEarly establishes a new 0-RTT QUIC connection to a server using a net.PacketConn.
// See Dial for more details.
func DialEarly(ctx context.Context, c net.PacketConn, addr net.Addr, tlsConf *tls.Config, conf *Config) (EarlyConnection, error) {
dl, err := setupTransport(c, tlsConf, false)
if err != nil {
return nil, err
}
conn, err := dl.DialEarly(ctx, addr, tlsConf, conf)
if err != nil {
dl.Close()
return nil, err
}
return conn, nil
}
// Dial establishes a new QUIC connection to a server using a net.PacketConn.
// If the PacketConn satisfies the OOBCapablePacketConn interface (as a net.UDPConn does),
// ECN and packet info support will be enabled. In this case, ReadMsgUDP and WriteMsgUDP
// will be used instead of ReadFrom and WriteTo to read/write packets.
// The tls.Config must define an application protocol (using NextProtos).
//
// This is a convenience function. More advanced use cases should instantiate a Transport,
// which offers configuration options for a more fine-grained control of the connection establishment,
// including reusing the underlying UDP socket for multiple QUIC connections.
func Dial(ctx context.Context, c net.PacketConn, addr net.Addr, tlsConf *tls.Config, conf *Config) (Connection, error) {
dl, err := setupTransport(c, tlsConf, false)
if err != nil {
return nil, err
}
conn, err := dl.Dial(ctx, addr, tlsConf, conf)
if err != nil {
dl.Close()
return nil, err
}
return conn, nil
}
func setupTransport(c net.PacketConn, tlsConf *tls.Config, createdPacketConn bool) (*Transport, error) {
if tlsConf == nil {
return nil, errors.New("quic: tls.Config not set")
}
return &Transport{
Conn: c,
createdConn: createdPacketConn,
isSingleUse: true,
}, nil
}
func dial(
ctx context.Context,
conn sendConn,
connIDGenerator ConnectionIDGenerator,
packetHandlers packetHandlerManager,
tlsConf *tls.Config,
config *Config,
onClose func(),
use0RTT bool,
) (quicConn, error) {
c, err := newClient(conn, connIDGenerator, config, tlsConf, onClose, use0RTT)
if err != nil {
return nil, err
}
c.packetHandlers = packetHandlers
c.tracingID = nextConnTracingID()
if c.config.Tracer != nil {
c.tracer = c.config.Tracer(context.WithValue(ctx, ConnectionTracingKey, c.tracingID), protocol.PerspectiveClient, c.destConnID)
}
if c.tracer != nil {
c.tracer.StartedConnection(c.sendConn.LocalAddr(), c.sendConn.RemoteAddr(), c.srcConnID, c.destConnID)
}
if err := c.dial(ctx); err != nil {
return nil, err
}
return c.conn, nil
}
func newClient(sendConn sendConn, connIDGenerator ConnectionIDGenerator, config *Config, tlsConf *tls.Config, onClose func(), use0RTT bool) (*client, error) {
srcConnID, err := connIDGenerator.GenerateConnectionID()
if err != nil {
return nil, err
}
destConnID, err := generateConnectionIDForInitial()
if err != nil {
return nil, err
}
c := &client{
connIDGenerator: connIDGenerator,
srcConnID: srcConnID,
destConnID: destConnID,
sendConn: sendConn,
use0RTT: use0RTT,
onClose: onClose,
tlsConf: tlsConf,
config: config,
version: config.Versions[0],
handshakeChan: make(chan struct{}),
logger: utils.DefaultLogger.WithPrefix("client"),
}
return c, nil
}
func (c *client) dial(ctx context.Context) error {
c.logger.Infof("Starting new connection to %s (%s -> %s), source connection ID %s, destination connection ID %s, version %s", c.tlsConf.ServerName, c.sendConn.LocalAddr(), c.sendConn.RemoteAddr(), c.srcConnID, c.destConnID, c.version)
c.conn = newClientConnection(
c.sendConn,
c.packetHandlers,
c.destConnID,
c.srcConnID,
c.connIDGenerator,
c.config,
c.tlsConf,
c.initialPacketNumber,
c.use0RTT,
c.hasNegotiatedVersion,
c.tracer,
c.tracingID,
c.logger,
c.version,
)
c.packetHandlers.Add(c.srcConnID, c.conn)
errorChan := make(chan error, 1)
recreateChan := make(chan errCloseForRecreating)
go func() {
err := c.conn.run()
var recreateErr *errCloseForRecreating
if errors.As(err, &recreateErr) {
recreateChan <- *recreateErr
return
}
if c.onClose != nil {
c.onClose()
}
errorChan <- err // returns as soon as the connection is closed
}()
// only set when we're using 0-RTT
// Otherwise, earlyConnChan will be nil. Receiving from a nil chan blocks forever.
var earlyConnChan <-chan struct{}
if c.use0RTT {
earlyConnChan = c.conn.earlyConnReady()
}
select {
case <-ctx.Done():
c.conn.shutdown()
return ctx.Err()
case err := <-errorChan:
return err
case recreateErr := <-recreateChan:
c.initialPacketNumber = recreateErr.nextPacketNumber
c.version = recreateErr.nextVersion
c.hasNegotiatedVersion = true
return c.dial(ctx)
case <-earlyConnChan:
// ready to send 0-RTT data
return nil
case <-c.conn.HandshakeComplete():
// handshake successfully completed
return nil
}
}
golang-github-lucas-clemente-quic-go-0.38.2/client_test.go 0000664 0000000 0000000 00000025526 14545452366 0023470 0 ustar 00root root 0000000 0000000 package quic
import (
"context"
"crypto/tls"
"errors"
"net"
"time"
mocklogging "github.com/quic-go/quic-go/internal/mocks/logging"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/logging"
"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
type nullMultiplexer struct{}
func (n nullMultiplexer) AddConn(indexableConn) {}
func (n nullMultiplexer) RemoveConn(indexableConn) error { return nil }
var _ = Describe("Client", func() {
var (
cl *client
packetConn *MockSendConn
connID protocol.ConnectionID
origMultiplexer multiplexer
tlsConf *tls.Config
tracer *mocklogging.MockConnectionTracer
config *Config
originalClientConnConstructor func(
conn sendConn,
runner connRunner,
destConnID protocol.ConnectionID,
srcConnID protocol.ConnectionID,
connIDGenerator ConnectionIDGenerator,
conf *Config,
tlsConf *tls.Config,
initialPacketNumber protocol.PacketNumber,
enable0RTT bool,
hasNegotiatedVersion bool,
tracer logging.ConnectionTracer,
tracingID uint64,
logger utils.Logger,
v protocol.VersionNumber,
) quicConn
)
BeforeEach(func() {
tlsConf = &tls.Config{NextProtos: []string{"proto1"}}
connID = protocol.ParseConnectionID([]byte{0, 0, 0, 0, 0, 0, 0x13, 0x37})
originalClientConnConstructor = newClientConnection
tracer = mocklogging.NewMockConnectionTracer(mockCtrl)
config = &Config{
Tracer: func(ctx context.Context, perspective logging.Perspective, id ConnectionID) logging.ConnectionTracer {
return tracer
},
Versions: []protocol.VersionNumber{protocol.Version1},
}
Eventually(areConnsRunning).Should(BeFalse())
packetConn = NewMockSendConn(mockCtrl)
packetConn.EXPECT().LocalAddr().Return(&net.UDPAddr{}).AnyTimes()
packetConn.EXPECT().RemoteAddr().Return(&net.UDPAddr{}).AnyTimes()
cl = &client{
srcConnID: connID,
destConnID: connID,
version: protocol.Version1,
sendConn: packetConn,
tracer: tracer,
logger: utils.DefaultLogger,
}
getMultiplexer() // make the sync.Once execute
// replace the clientMuxer. getMultiplexer will now return the nullMultiplexer
origMultiplexer = connMuxer
connMuxer = &nullMultiplexer{}
})
AfterEach(func() {
connMuxer = origMultiplexer
newClientConnection = originalClientConnConstructor
})
AfterEach(func() {
if s, ok := cl.conn.(*connection); ok {
s.shutdown()
}
Eventually(areConnsRunning).Should(BeFalse())
})
Context("Dialing", func() {
var origGenerateConnectionIDForInitial func() (protocol.ConnectionID, error)
BeforeEach(func() {
origGenerateConnectionIDForInitial = generateConnectionIDForInitial
generateConnectionIDForInitial = func() (protocol.ConnectionID, error) {
return connID, nil
}
})
AfterEach(func() {
generateConnectionIDForInitial = origGenerateConnectionIDForInitial
})
It("returns after the handshake is complete", func() {
manager := NewMockPacketHandlerManager(mockCtrl)
manager.EXPECT().Add(gomock.Any(), gomock.Any())
run := make(chan struct{})
newClientConnection = func(
_ sendConn,
_ connRunner,
_ protocol.ConnectionID,
_ protocol.ConnectionID,
_ ConnectionIDGenerator,
_ *Config,
_ *tls.Config,
_ protocol.PacketNumber,
enable0RTT bool,
_ bool,
_ logging.ConnectionTracer,
_ uint64,
_ utils.Logger,
_ protocol.VersionNumber,
) quicConn {
Expect(enable0RTT).To(BeFalse())
conn := NewMockQUICConn(mockCtrl)
conn.EXPECT().run().Do(func() { close(run) })
c := make(chan struct{})
close(c)
conn.EXPECT().HandshakeComplete().Return(c)
return conn
}
cl, err := newClient(packetConn, &protocol.DefaultConnectionIDGenerator{}, populateConfig(config), tlsConf, nil, false)
Expect(err).ToNot(HaveOccurred())
cl.packetHandlers = manager
Expect(cl).ToNot(BeNil())
Expect(cl.dial(context.Background())).To(Succeed())
Eventually(run).Should(BeClosed())
})
It("returns early connections", func() {
manager := NewMockPacketHandlerManager(mockCtrl)
manager.EXPECT().Add(gomock.Any(), gomock.Any())
readyChan := make(chan struct{})
done := make(chan struct{})
newClientConnection = func(
_ sendConn,
runner connRunner,
_ protocol.ConnectionID,
_ protocol.ConnectionID,
_ ConnectionIDGenerator,
_ *Config,
_ *tls.Config,
_ protocol.PacketNumber,
enable0RTT bool,
_ bool,
_ logging.ConnectionTracer,
_ uint64,
_ utils.Logger,
_ protocol.VersionNumber,
) quicConn {
Expect(enable0RTT).To(BeTrue())
conn := NewMockQUICConn(mockCtrl)
conn.EXPECT().run().Do(func() { close(done) })
conn.EXPECT().HandshakeComplete().Return(make(chan struct{}))
conn.EXPECT().earlyConnReady().Return(readyChan)
return conn
}
cl, err := newClient(packetConn, &protocol.DefaultConnectionIDGenerator{}, populateConfig(config), tlsConf, nil, true)
Expect(err).ToNot(HaveOccurred())
cl.packetHandlers = manager
Expect(cl).ToNot(BeNil())
Expect(cl.dial(context.Background())).To(Succeed())
Eventually(done).Should(BeClosed())
})
It("returns an error that occurs while waiting for the handshake to complete", func() {
manager := NewMockPacketHandlerManager(mockCtrl)
manager.EXPECT().Add(gomock.Any(), gomock.Any())
testErr := errors.New("early handshake error")
newClientConnection = func(
_ sendConn,
_ connRunner,
_ protocol.ConnectionID,
_ protocol.ConnectionID,
_ ConnectionIDGenerator,
_ *Config,
_ *tls.Config,
_ protocol.PacketNumber,
_ bool,
_ bool,
_ logging.ConnectionTracer,
_ uint64,
_ utils.Logger,
_ protocol.VersionNumber,
) quicConn {
conn := NewMockQUICConn(mockCtrl)
conn.EXPECT().run().Return(testErr)
conn.EXPECT().HandshakeComplete().Return(make(chan struct{}))
conn.EXPECT().earlyConnReady().Return(make(chan struct{}))
return conn
}
var closed bool
cl, err := newClient(packetConn, &protocol.DefaultConnectionIDGenerator{}, populateConfig(config), tlsConf, func() { closed = true }, true)
Expect(err).ToNot(HaveOccurred())
cl.packetHandlers = manager
Expect(cl).ToNot(BeNil())
Expect(cl.dial(context.Background())).To(MatchError(testErr))
Expect(closed).To(BeTrue())
})
Context("quic.Config", func() {
It("setups with the right values", func() {
tokenStore := NewLRUTokenStore(10, 4)
config := &Config{
HandshakeIdleTimeout: 1337 * time.Minute,
MaxIdleTimeout: 42 * time.Hour,
MaxIncomingStreams: 1234,
MaxIncomingUniStreams: 4321,
TokenStore: tokenStore,
EnableDatagrams: true,
}
c := populateConfig(config)
Expect(c.HandshakeIdleTimeout).To(Equal(1337 * time.Minute))
Expect(c.MaxIdleTimeout).To(Equal(42 * time.Hour))
Expect(c.MaxIncomingStreams).To(BeEquivalentTo(1234))
Expect(c.MaxIncomingUniStreams).To(BeEquivalentTo(4321))
Expect(c.TokenStore).To(Equal(tokenStore))
Expect(c.EnableDatagrams).To(BeTrue())
})
It("disables bidirectional streams", func() {
config := &Config{
MaxIncomingStreams: -1,
MaxIncomingUniStreams: 4321,
}
c := populateConfig(config)
Expect(c.MaxIncomingStreams).To(BeZero())
Expect(c.MaxIncomingUniStreams).To(BeEquivalentTo(4321))
})
It("disables unidirectional streams", func() {
config := &Config{
MaxIncomingStreams: 1234,
MaxIncomingUniStreams: -1,
}
c := populateConfig(config)
Expect(c.MaxIncomingStreams).To(BeEquivalentTo(1234))
Expect(c.MaxIncomingUniStreams).To(BeZero())
})
It("fills in default values if options are not set in the Config", func() {
c := populateConfig(&Config{})
Expect(c.Versions).To(Equal(protocol.SupportedVersions))
Expect(c.HandshakeIdleTimeout).To(Equal(protocol.DefaultHandshakeIdleTimeout))
Expect(c.MaxIdleTimeout).To(Equal(protocol.DefaultIdleTimeout))
})
})
It("creates new connections with the right parameters", func() {
config := &Config{Versions: []protocol.VersionNumber{protocol.Version1}}
c := make(chan struct{})
var version protocol.VersionNumber
var conf *Config
done := make(chan struct{})
newClientConnection = func(
connP sendConn,
_ connRunner,
_ protocol.ConnectionID,
_ protocol.ConnectionID,
_ ConnectionIDGenerator,
configP *Config,
_ *tls.Config,
_ protocol.PacketNumber,
_ bool,
_ bool,
_ logging.ConnectionTracer,
_ uint64,
_ utils.Logger,
versionP protocol.VersionNumber,
) quicConn {
version = versionP
conf = configP
close(c)
// TODO: check connection IDs?
conn := NewMockQUICConn(mockCtrl)
conn.EXPECT().run()
conn.EXPECT().HandshakeComplete().Return(make(chan struct{}))
conn.EXPECT().destroy(gomock.Any()).MaxTimes(1)
close(done)
return conn
}
packetConn := NewMockPacketConn(mockCtrl)
packetConn.EXPECT().ReadFrom(gomock.Any()).DoAndReturn(func([]byte) (int, net.Addr, error) {
<-done
return 0, nil, errors.New("closed")
})
packetConn.EXPECT().LocalAddr()
packetConn.EXPECT().SetReadDeadline(gomock.Any()).AnyTimes()
_, err := Dial(context.Background(), packetConn, &net.UDPAddr{}, tlsConf, config)
Expect(err).ToNot(HaveOccurred())
Eventually(c).Should(BeClosed())
Expect(version).To(Equal(config.Versions[0]))
Expect(conf.Versions).To(Equal(config.Versions))
})
It("creates a new connections after version negotiation", func() {
var counter int
newClientConnection = func(
_ sendConn,
runner connRunner,
_ protocol.ConnectionID,
connID protocol.ConnectionID,
_ ConnectionIDGenerator,
configP *Config,
_ *tls.Config,
pn protocol.PacketNumber,
_ bool,
hasNegotiatedVersion bool,
_ logging.ConnectionTracer,
_ uint64,
_ utils.Logger,
versionP protocol.VersionNumber,
) quicConn {
conn := NewMockQUICConn(mockCtrl)
conn.EXPECT().HandshakeComplete().Return(make(chan struct{}))
if counter == 0 {
Expect(pn).To(BeZero())
Expect(hasNegotiatedVersion).To(BeFalse())
conn.EXPECT().run().DoAndReturn(func() error {
runner.Remove(connID)
return &errCloseForRecreating{
nextPacketNumber: 109,
nextVersion: 789,
}
})
} else {
Expect(pn).To(Equal(protocol.PacketNumber(109)))
Expect(hasNegotiatedVersion).To(BeTrue())
conn.EXPECT().run()
conn.EXPECT().destroy(gomock.Any())
}
counter++
return conn
}
config := &Config{Tracer: config.Tracer, Versions: []protocol.VersionNumber{protocol.Version1}}
tracer.EXPECT().StartedConnection(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
_, err := DialAddr(context.Background(), "localhost:7890", tlsConf, config)
Expect(err).ToNot(HaveOccurred())
Expect(counter).To(Equal(2))
})
})
})
golang-github-lucas-clemente-quic-go-0.38.2/closed_conn.go 0000664 0000000 0000000 00000004167 14545452366 0023437 0 ustar 00root root 0000000 0000000 package quic
import (
"math/bits"
"net"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/utils"
)
// A closedLocalConn is a connection that we closed locally.
// When receiving packets for such a connection, we need to retransmit the packet containing the CONNECTION_CLOSE frame,
// with an exponential backoff.
type closedLocalConn struct {
counter uint32
perspective protocol.Perspective
logger utils.Logger
sendPacket func(net.Addr, packetInfo)
}
var _ packetHandler = &closedLocalConn{}
// newClosedLocalConn creates a new closedLocalConn and runs it.
func newClosedLocalConn(sendPacket func(net.Addr, packetInfo), pers protocol.Perspective, logger utils.Logger) packetHandler {
return &closedLocalConn{
sendPacket: sendPacket,
perspective: pers,
logger: logger,
}
}
func (c *closedLocalConn) handlePacket(p receivedPacket) {
c.counter++
// exponential backoff
// only send a CONNECTION_CLOSE for the 1st, 2nd, 4th, 8th, 16th, ... packet arriving
if bits.OnesCount32(c.counter) != 1 {
return
}
c.logger.Debugf("Received %d packets after sending CONNECTION_CLOSE. Retransmitting.", c.counter)
c.sendPacket(p.remoteAddr, p.info)
}
func (c *closedLocalConn) shutdown() {}
func (c *closedLocalConn) destroy(error) {}
func (c *closedLocalConn) getPerspective() protocol.Perspective { return c.perspective }
// A closedRemoteConn is a connection that was closed remotely.
// For such a connection, we might receive reordered packets that were sent before the CONNECTION_CLOSE.
// We can just ignore those packets.
type closedRemoteConn struct {
perspective protocol.Perspective
}
var _ packetHandler = &closedRemoteConn{}
func newClosedRemoteConn(pers protocol.Perspective) packetHandler {
return &closedRemoteConn{perspective: pers}
}
func (s *closedRemoteConn) handlePacket(receivedPacket) {}
func (s *closedRemoteConn) shutdown() {}
func (s *closedRemoteConn) destroy(error) {}
func (s *closedRemoteConn) getPerspective() protocol.Perspective { return s.perspective }
golang-github-lucas-clemente-quic-go-0.38.2/closed_conn_test.go 0000664 0000000 0000000 00000002070 14545452366 0024465 0 ustar 00root root 0000000 0000000 package quic
import (
"net"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/utils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Closed local connection", func() {
It("tells its perspective", func() {
conn := newClosedLocalConn(nil, protocol.PerspectiveClient, utils.DefaultLogger)
Expect(conn.getPerspective()).To(Equal(protocol.PerspectiveClient))
// stop the connection
conn.shutdown()
})
It("repeats the packet containing the CONNECTION_CLOSE frame", func() {
written := make(chan net.Addr, 1)
conn := newClosedLocalConn(
func(addr net.Addr, _ packetInfo) { written <- addr },
protocol.PerspectiveClient,
utils.DefaultLogger,
)
addr := &net.UDPAddr{IP: net.IPv4(127, 1, 2, 3), Port: 1337}
for i := 1; i <= 20; i++ {
conn.handlePacket(receivedPacket{remoteAddr: addr})
if i == 1 || i == 2 || i == 4 || i == 8 || i == 16 {
Expect(written).To(Receive(Equal(addr))) // receive the CONNECTION_CLOSE
} else {
Expect(written).ToNot(Receive())
}
}
})
})
golang-github-lucas-clemente-quic-go-0.38.2/codecov.yml 0000664 0000000 0000000 00000001175 14545452366 0022763 0 ustar 00root root 0000000 0000000 coverage:
round: nearest
ignore:
- streams_map_incoming_bidi.go
- streams_map_incoming_uni.go
- streams_map_outgoing_bidi.go
- streams_map_outgoing_uni.go
- http3/gzip_reader.go
- interop/
- internal/ackhandler/packet_linkedlist.go
- internal/handshake/cipher_suite.go
- internal/utils/byteinterval_linkedlist.go
- internal/utils/newconnectionid_linkedlist.go
- internal/utils/packetinterval_linkedlist.go
- internal/utils/linkedlist/linkedlist.go
- logging/null_tracer.go
- fuzzing/
- metrics/
status:
project:
default:
threshold: 0.5
patch: false
golang-github-lucas-clemente-quic-go-0.38.2/config.go 0000664 0000000 0000000 00000011100 14545452366 0022377 0 ustar 00root root 0000000 0000000 package quic
import (
"fmt"
"net"
"time"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/quicvarint"
)
// Clone clones a Config
func (c *Config) Clone() *Config {
copy := *c
return ©
}
func (c *Config) handshakeTimeout() time.Duration {
return utils.Max(protocol.DefaultHandshakeTimeout, 2*c.HandshakeIdleTimeout)
}
func validateConfig(config *Config) error {
if config == nil {
return nil
}
const maxStreams = 1 << 60
if config.MaxIncomingStreams > maxStreams {
config.MaxIncomingStreams = maxStreams
}
if config.MaxIncomingUniStreams > maxStreams {
config.MaxIncomingUniStreams = maxStreams
}
if config.MaxStreamReceiveWindow > quicvarint.Max {
config.MaxStreamReceiveWindow = quicvarint.Max
}
if config.MaxConnectionReceiveWindow > quicvarint.Max {
config.MaxConnectionReceiveWindow = quicvarint.Max
}
// check that all QUIC versions are actually supported
for _, v := range config.Versions {
if !protocol.IsValidVersion(v) {
return fmt.Errorf("invalid QUIC version: %s", v)
}
}
return nil
}
// populateServerConfig populates fields in the quic.Config with their default values, if none are set
// it may be called with nil
func populateServerConfig(config *Config) *Config {
config = populateConfig(config)
if config.MaxTokenAge == 0 {
config.MaxTokenAge = protocol.TokenValidity
}
if config.MaxRetryTokenAge == 0 {
config.MaxRetryTokenAge = protocol.RetryTokenValidity
}
if config.RequireAddressValidation == nil {
config.RequireAddressValidation = func(net.Addr) bool { return false }
}
return config
}
// populateConfig populates fields in the quic.Config with their default values, if none are set
// it may be called with nil
func populateConfig(config *Config) *Config {
if config == nil {
config = &Config{}
}
versions := config.Versions
if len(versions) == 0 {
versions = protocol.SupportedVersions
}
handshakeIdleTimeout := protocol.DefaultHandshakeIdleTimeout
if config.HandshakeIdleTimeout != 0 {
handshakeIdleTimeout = config.HandshakeIdleTimeout
}
idleTimeout := protocol.DefaultIdleTimeout
if config.MaxIdleTimeout != 0 {
idleTimeout = config.MaxIdleTimeout
}
initialStreamReceiveWindow := config.InitialStreamReceiveWindow
if initialStreamReceiveWindow == 0 {
initialStreamReceiveWindow = protocol.DefaultInitialMaxStreamData
}
maxStreamReceiveWindow := config.MaxStreamReceiveWindow
if maxStreamReceiveWindow == 0 {
maxStreamReceiveWindow = protocol.DefaultMaxReceiveStreamFlowControlWindow
}
initialConnectionReceiveWindow := config.InitialConnectionReceiveWindow
if initialConnectionReceiveWindow == 0 {
initialConnectionReceiveWindow = protocol.DefaultInitialMaxData
}
maxConnectionReceiveWindow := config.MaxConnectionReceiveWindow
if maxConnectionReceiveWindow == 0 {
maxConnectionReceiveWindow = protocol.DefaultMaxReceiveConnectionFlowControlWindow
}
maxIncomingStreams := config.MaxIncomingStreams
if maxIncomingStreams == 0 {
maxIncomingStreams = protocol.DefaultMaxIncomingStreams
} else if maxIncomingStreams < 0 {
maxIncomingStreams = 0
}
maxIncomingUniStreams := config.MaxIncomingUniStreams
if maxIncomingUniStreams == 0 {
maxIncomingUniStreams = protocol.DefaultMaxIncomingUniStreams
} else if maxIncomingUniStreams < 0 {
maxIncomingUniStreams = 0
}
return &Config{
GetConfigForClient: config.GetConfigForClient,
Versions: versions,
HandshakeIdleTimeout: handshakeIdleTimeout,
MaxIdleTimeout: idleTimeout,
MaxTokenAge: config.MaxTokenAge,
MaxRetryTokenAge: config.MaxRetryTokenAge,
RequireAddressValidation: config.RequireAddressValidation,
KeepAlivePeriod: config.KeepAlivePeriod,
InitialStreamReceiveWindow: initialStreamReceiveWindow,
MaxStreamReceiveWindow: maxStreamReceiveWindow,
InitialConnectionReceiveWindow: initialConnectionReceiveWindow,
MaxConnectionReceiveWindow: maxConnectionReceiveWindow,
AllowConnectionWindowIncrease: config.AllowConnectionWindowIncrease,
MaxIncomingStreams: maxIncomingStreams,
MaxIncomingUniStreams: maxIncomingUniStreams,
TokenStore: config.TokenStore,
EnableDatagrams: config.EnableDatagrams,
DisablePathMTUDiscovery: config.DisablePathMTUDiscovery,
DisableVersionNegotiationPackets: config.DisableVersionNegotiationPackets,
Allow0RTT: config.Allow0RTT,
Tracer: config.Tracer,
}
}
golang-github-lucas-clemente-quic-go-0.38.2/config_test.go 0000664 0000000 0000000 00000016273 14545452366 0023456 0 ustar 00root root 0000000 0000000 package quic
import (
"context"
"errors"
"fmt"
"net"
"reflect"
"time"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/logging"
"github.com/quic-go/quic-go/quicvarint"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Config", func() {
Context("validating", func() {
It("validates a nil config", func() {
Expect(validateConfig(nil)).To(Succeed())
})
It("validates a config with normal values", func() {
conf := populateServerConfig(&Config{
MaxIncomingStreams: 5,
MaxStreamReceiveWindow: 10,
})
Expect(validateConfig(conf)).To(Succeed())
Expect(conf.MaxIncomingStreams).To(BeEquivalentTo(5))
Expect(conf.MaxStreamReceiveWindow).To(BeEquivalentTo(10))
})
It("clips too large values for the stream limits", func() {
conf := &Config{
MaxIncomingStreams: 1<<60 + 1,
MaxIncomingUniStreams: 1<<60 + 2,
}
Expect(validateConfig(conf)).To(Succeed())
Expect(conf.MaxIncomingStreams).To(BeEquivalentTo(int64(1 << 60)))
Expect(conf.MaxIncomingUniStreams).To(BeEquivalentTo(int64(1 << 60)))
})
It("clips too large values for the flow control windows", func() {
conf := &Config{
MaxStreamReceiveWindow: quicvarint.Max + 1,
MaxConnectionReceiveWindow: quicvarint.Max + 2,
}
Expect(validateConfig(conf)).To(Succeed())
Expect(conf.MaxStreamReceiveWindow).To(BeEquivalentTo(uint64(quicvarint.Max)))
Expect(conf.MaxConnectionReceiveWindow).To(BeEquivalentTo(uint64(quicvarint.Max)))
})
})
configWithNonZeroNonFunctionFields := func() *Config {
c := &Config{}
v := reflect.ValueOf(c).Elem()
typ := v.Type()
for i := 0; i < typ.NumField(); i++ {
f := v.Field(i)
if !f.CanSet() {
// unexported field; not cloned.
continue
}
switch fn := typ.Field(i).Name; fn {
case "GetConfigForClient", "RequireAddressValidation", "GetLogWriter", "AllowConnectionWindowIncrease", "Tracer":
// Can't compare functions.
case "Versions":
f.Set(reflect.ValueOf([]VersionNumber{1, 2, 3}))
case "ConnectionIDLength":
f.Set(reflect.ValueOf(8))
case "ConnectionIDGenerator":
f.Set(reflect.ValueOf(&protocol.DefaultConnectionIDGenerator{ConnLen: protocol.DefaultConnectionIDLength}))
case "HandshakeIdleTimeout":
f.Set(reflect.ValueOf(time.Second))
case "MaxIdleTimeout":
f.Set(reflect.ValueOf(time.Hour))
case "MaxTokenAge":
f.Set(reflect.ValueOf(2 * time.Hour))
case "MaxRetryTokenAge":
f.Set(reflect.ValueOf(2 * time.Minute))
case "TokenStore":
f.Set(reflect.ValueOf(NewLRUTokenStore(2, 3)))
case "InitialStreamReceiveWindow":
f.Set(reflect.ValueOf(uint64(1234)))
case "MaxStreamReceiveWindow":
f.Set(reflect.ValueOf(uint64(9)))
case "InitialConnectionReceiveWindow":
f.Set(reflect.ValueOf(uint64(4321)))
case "MaxConnectionReceiveWindow":
f.Set(reflect.ValueOf(uint64(10)))
case "MaxIncomingStreams":
f.Set(reflect.ValueOf(int64(11)))
case "MaxIncomingUniStreams":
f.Set(reflect.ValueOf(int64(12)))
case "StatelessResetKey":
f.Set(reflect.ValueOf(&StatelessResetKey{1, 2, 3, 4}))
case "KeepAlivePeriod":
f.Set(reflect.ValueOf(time.Second))
case "EnableDatagrams":
f.Set(reflect.ValueOf(true))
case "DisableVersionNegotiationPackets":
f.Set(reflect.ValueOf(true))
case "DisablePathMTUDiscovery":
f.Set(reflect.ValueOf(true))
case "Allow0RTT":
f.Set(reflect.ValueOf(true))
default:
Fail(fmt.Sprintf("all fields must be accounted for, but saw unknown field %q", fn))
}
}
return c
}
It("uses 10s handshake timeout for short handshake idle timeouts", func() {
c := &Config{HandshakeIdleTimeout: time.Second}
Expect(c.handshakeTimeout()).To(Equal(protocol.DefaultHandshakeTimeout))
})
It("uses twice the handshake idle timeouts for the handshake timeout, for long handshake idle timeouts", func() {
c := &Config{HandshakeIdleTimeout: time.Second * 11 / 2}
Expect(c.handshakeTimeout()).To(Equal(11 * time.Second))
})
Context("cloning", func() {
It("clones function fields", func() {
var calledAddrValidation, calledAllowConnectionWindowIncrease, calledTracer bool
c1 := &Config{
GetConfigForClient: func(info *ClientHelloInfo) (*Config, error) { return nil, errors.New("nope") },
AllowConnectionWindowIncrease: func(Connection, uint64) bool { calledAllowConnectionWindowIncrease = true; return true },
RequireAddressValidation: func(net.Addr) bool { calledAddrValidation = true; return true },
Tracer: func(context.Context, logging.Perspective, ConnectionID) logging.ConnectionTracer {
calledTracer = true
return nil
},
}
c2 := c1.Clone()
c2.RequireAddressValidation(&net.UDPAddr{})
Expect(calledAddrValidation).To(BeTrue())
c2.AllowConnectionWindowIncrease(nil, 1234)
Expect(calledAllowConnectionWindowIncrease).To(BeTrue())
_, err := c2.GetConfigForClient(&ClientHelloInfo{})
Expect(err).To(MatchError("nope"))
c2.Tracer(context.Background(), logging.PerspectiveClient, protocol.ConnectionID{})
Expect(calledTracer).To(BeTrue())
})
It("clones non-function fields", func() {
c := configWithNonZeroNonFunctionFields()
Expect(c.Clone()).To(Equal(c))
})
It("returns a copy", func() {
c1 := &Config{
MaxIncomingStreams: 100,
RequireAddressValidation: func(net.Addr) bool { return true },
}
c2 := c1.Clone()
c2.MaxIncomingStreams = 200
c2.RequireAddressValidation = func(net.Addr) bool { return false }
Expect(c1.MaxIncomingStreams).To(BeEquivalentTo(100))
Expect(c1.RequireAddressValidation(&net.UDPAddr{})).To(BeTrue())
})
})
Context("populating", func() {
It("populates function fields", func() {
var calledAddrValidation bool
c1 := &Config{}
c1.RequireAddressValidation = func(net.Addr) bool { calledAddrValidation = true; return true }
c2 := populateConfig(c1)
c2.RequireAddressValidation(&net.UDPAddr{})
Expect(calledAddrValidation).To(BeTrue())
})
It("copies non-function fields", func() {
c := configWithNonZeroNonFunctionFields()
Expect(populateConfig(c)).To(Equal(c))
})
It("populates empty fields with default values", func() {
c := populateConfig(&Config{})
Expect(c.Versions).To(Equal(protocol.SupportedVersions))
Expect(c.HandshakeIdleTimeout).To(Equal(protocol.DefaultHandshakeIdleTimeout))
Expect(c.InitialStreamReceiveWindow).To(BeEquivalentTo(protocol.DefaultInitialMaxStreamData))
Expect(c.MaxStreamReceiveWindow).To(BeEquivalentTo(protocol.DefaultMaxReceiveStreamFlowControlWindow))
Expect(c.InitialConnectionReceiveWindow).To(BeEquivalentTo(protocol.DefaultInitialMaxData))
Expect(c.MaxConnectionReceiveWindow).To(BeEquivalentTo(protocol.DefaultMaxReceiveConnectionFlowControlWindow))
Expect(c.MaxIncomingStreams).To(BeEquivalentTo(protocol.DefaultMaxIncomingStreams))
Expect(c.MaxIncomingUniStreams).To(BeEquivalentTo(protocol.DefaultMaxIncomingUniStreams))
Expect(c.DisableVersionNegotiationPackets).To(BeFalse())
Expect(c.DisablePathMTUDiscovery).To(BeFalse())
Expect(c.GetConfigForClient).To(BeNil())
})
It("populates empty fields with default values, for the server", func() {
c := populateServerConfig(&Config{})
Expect(c.RequireAddressValidation).ToNot(BeNil())
})
})
})
golang-github-lucas-clemente-quic-go-0.38.2/conn_id_generator.go 0000664 0000000 0000000 00000011013 14545452366 0024614 0 ustar 00root root 0000000 0000000 package quic
import (
"fmt"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/internal/wire"
)
type connIDGenerator struct {
generator ConnectionIDGenerator
highestSeq uint64
activeSrcConnIDs map[uint64]protocol.ConnectionID
initialClientDestConnID *protocol.ConnectionID // nil for the client
addConnectionID func(protocol.ConnectionID)
getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken
removeConnectionID func(protocol.ConnectionID)
retireConnectionID func(protocol.ConnectionID)
replaceWithClosed func([]protocol.ConnectionID, protocol.Perspective, []byte)
queueControlFrame func(wire.Frame)
}
func newConnIDGenerator(
initialConnectionID protocol.ConnectionID,
initialClientDestConnID *protocol.ConnectionID, // nil for the client
addConnectionID func(protocol.ConnectionID),
getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken,
removeConnectionID func(protocol.ConnectionID),
retireConnectionID func(protocol.ConnectionID),
replaceWithClosed func([]protocol.ConnectionID, protocol.Perspective, []byte),
queueControlFrame func(wire.Frame),
generator ConnectionIDGenerator,
) *connIDGenerator {
m := &connIDGenerator{
generator: generator,
activeSrcConnIDs: make(map[uint64]protocol.ConnectionID),
addConnectionID: addConnectionID,
getStatelessResetToken: getStatelessResetToken,
removeConnectionID: removeConnectionID,
retireConnectionID: retireConnectionID,
replaceWithClosed: replaceWithClosed,
queueControlFrame: queueControlFrame,
}
m.activeSrcConnIDs[0] = initialConnectionID
m.initialClientDestConnID = initialClientDestConnID
return m
}
func (m *connIDGenerator) SetMaxActiveConnIDs(limit uint64) error {
if m.generator.ConnectionIDLen() == 0 {
return nil
}
// The active_connection_id_limit transport parameter is the number of
// connection IDs the peer will store. This limit includes the connection ID
// used during the handshake, and the one sent in the preferred_address
// transport parameter.
// We currently don't send the preferred_address transport parameter,
// so we can issue (limit - 1) connection IDs.
for i := uint64(len(m.activeSrcConnIDs)); i < utils.Min(limit, protocol.MaxIssuedConnectionIDs); i++ {
if err := m.issueNewConnID(); err != nil {
return err
}
}
return nil
}
func (m *connIDGenerator) Retire(seq uint64, sentWithDestConnID protocol.ConnectionID) error {
if seq > m.highestSeq {
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: fmt.Sprintf("retired connection ID %d (highest issued: %d)", seq, m.highestSeq),
}
}
connID, ok := m.activeSrcConnIDs[seq]
// We might already have deleted this connection ID, if this is a duplicate frame.
if !ok {
return nil
}
if connID == sentWithDestConnID {
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: fmt.Sprintf("retired connection ID %d (%s), which was used as the Destination Connection ID on this packet", seq, connID),
}
}
m.retireConnectionID(connID)
delete(m.activeSrcConnIDs, seq)
// Don't issue a replacement for the initial connection ID.
if seq == 0 {
return nil
}
return m.issueNewConnID()
}
func (m *connIDGenerator) issueNewConnID() error {
connID, err := m.generator.GenerateConnectionID()
if err != nil {
return err
}
m.activeSrcConnIDs[m.highestSeq+1] = connID
m.addConnectionID(connID)
m.queueControlFrame(&wire.NewConnectionIDFrame{
SequenceNumber: m.highestSeq + 1,
ConnectionID: connID,
StatelessResetToken: m.getStatelessResetToken(connID),
})
m.highestSeq++
return nil
}
func (m *connIDGenerator) SetHandshakeComplete() {
if m.initialClientDestConnID != nil {
m.retireConnectionID(*m.initialClientDestConnID)
m.initialClientDestConnID = nil
}
}
func (m *connIDGenerator) RemoveAll() {
if m.initialClientDestConnID != nil {
m.removeConnectionID(*m.initialClientDestConnID)
}
for _, connID := range m.activeSrcConnIDs {
m.removeConnectionID(connID)
}
}
func (m *connIDGenerator) ReplaceWithClosed(pers protocol.Perspective, connClose []byte) {
connIDs := make([]protocol.ConnectionID, 0, len(m.activeSrcConnIDs)+1)
if m.initialClientDestConnID != nil {
connIDs = append(connIDs, *m.initialClientDestConnID)
}
for _, connID := range m.activeSrcConnIDs {
connIDs = append(connIDs, connID)
}
m.replaceWithClosed(connIDs, pers, connClose)
}
golang-github-lucas-clemente-quic-go-0.38.2/conn_id_generator_test.go 0000664 0000000 0000000 00000016446 14545452366 0025672 0 ustar 00root root 0000000 0000000 package quic
import (
"fmt"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/wire"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Connection ID Generator", func() {
var (
addedConnIDs []protocol.ConnectionID
retiredConnIDs []protocol.ConnectionID
removedConnIDs []protocol.ConnectionID
replacedWithClosed []protocol.ConnectionID
queuedFrames []wire.Frame
g *connIDGenerator
)
initialConnID := protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5, 6, 7})
initialClientDestConnID := protocol.ParseConnectionID([]byte{0xa, 0xb, 0xc, 0xd, 0xe})
connIDToToken := func(c protocol.ConnectionID) protocol.StatelessResetToken {
b := c.Bytes()[0]
return protocol.StatelessResetToken{b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b}
}
BeforeEach(func() {
addedConnIDs = nil
retiredConnIDs = nil
removedConnIDs = nil
queuedFrames = nil
replacedWithClosed = nil
g = newConnIDGenerator(
initialConnID,
&initialClientDestConnID,
func(c protocol.ConnectionID) { addedConnIDs = append(addedConnIDs, c) },
connIDToToken,
func(c protocol.ConnectionID) { removedConnIDs = append(removedConnIDs, c) },
func(c protocol.ConnectionID) { retiredConnIDs = append(retiredConnIDs, c) },
func(cs []protocol.ConnectionID, _ protocol.Perspective, _ []byte) {
replacedWithClosed = append(replacedWithClosed, cs...)
},
func(f wire.Frame) { queuedFrames = append(queuedFrames, f) },
&protocol.DefaultConnectionIDGenerator{ConnLen: initialConnID.Len()},
)
})
It("issues new connection IDs", func() {
Expect(g.SetMaxActiveConnIDs(4)).To(Succeed())
Expect(retiredConnIDs).To(BeEmpty())
Expect(addedConnIDs).To(HaveLen(3))
for i := 0; i < len(addedConnIDs)-1; i++ {
Expect(addedConnIDs[i]).ToNot(Equal(addedConnIDs[i+1]))
}
Expect(queuedFrames).To(HaveLen(3))
for i := 0; i < 3; i++ {
f := queuedFrames[i]
Expect(f).To(BeAssignableToTypeOf(&wire.NewConnectionIDFrame{}))
nf := f.(*wire.NewConnectionIDFrame)
Expect(nf.SequenceNumber).To(BeEquivalentTo(i + 1))
Expect(nf.ConnectionID.Len()).To(Equal(7))
Expect(nf.StatelessResetToken).To(Equal(connIDToToken(nf.ConnectionID)))
}
})
It("limits the number of connection IDs that it issues", func() {
Expect(g.SetMaxActiveConnIDs(9999999)).To(Succeed())
Expect(retiredConnIDs).To(BeEmpty())
Expect(addedConnIDs).To(HaveLen(protocol.MaxIssuedConnectionIDs - 1))
Expect(queuedFrames).To(HaveLen(protocol.MaxIssuedConnectionIDs - 1))
})
// SetMaxActiveConnIDs is called twice when dialing a 0-RTT connection:
// once for the restored from the old connections, once when we receive the transport parameters
Context("dealing with 0-RTT", func() {
It("doesn't issue new connection IDs when SetMaxActiveConnIDs is called with the same value", func() {
Expect(g.SetMaxActiveConnIDs(4)).To(Succeed())
Expect(queuedFrames).To(HaveLen(3))
queuedFrames = nil
Expect(g.SetMaxActiveConnIDs(4)).To(Succeed())
Expect(queuedFrames).To(BeEmpty())
})
It("issues more connection IDs if the server allows a higher limit on the resumed connection", func() {
Expect(g.SetMaxActiveConnIDs(3)).To(Succeed())
Expect(queuedFrames).To(HaveLen(2))
queuedFrames = nil
Expect(g.SetMaxActiveConnIDs(6)).To(Succeed())
Expect(queuedFrames).To(HaveLen(3))
})
It("issues more connection IDs if the server allows a higher limit on the resumed connection, when connection IDs were retired in between", func() {
Expect(g.SetMaxActiveConnIDs(3)).To(Succeed())
Expect(queuedFrames).To(HaveLen(2))
queuedFrames = nil
g.Retire(1, protocol.ConnectionID{})
Expect(queuedFrames).To(HaveLen(1))
queuedFrames = nil
Expect(g.SetMaxActiveConnIDs(6)).To(Succeed())
Expect(queuedFrames).To(HaveLen(3))
})
})
It("errors if the peers tries to retire a connection ID that wasn't yet issued", func() {
Expect(g.Retire(1, protocol.ConnectionID{})).To(MatchError(&qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "retired connection ID 1 (highest issued: 0)",
}))
})
It("errors if the peers tries to retire a connection ID in a packet with that connection ID", func() {
Expect(g.SetMaxActiveConnIDs(4)).To(Succeed())
Expect(queuedFrames).ToNot(BeEmpty())
Expect(queuedFrames[0]).To(BeAssignableToTypeOf(&wire.NewConnectionIDFrame{}))
f := queuedFrames[0].(*wire.NewConnectionIDFrame)
Expect(g.Retire(f.SequenceNumber, f.ConnectionID)).To(MatchError(&qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: fmt.Sprintf("retired connection ID %d (%s), which was used as the Destination Connection ID on this packet", f.SequenceNumber, f.ConnectionID),
}))
})
It("issues new connection IDs, when old ones are retired", func() {
Expect(g.SetMaxActiveConnIDs(5)).To(Succeed())
queuedFrames = nil
Expect(retiredConnIDs).To(BeEmpty())
Expect(g.Retire(3, protocol.ConnectionID{})).To(Succeed())
Expect(queuedFrames).To(HaveLen(1))
Expect(queuedFrames[0]).To(BeAssignableToTypeOf(&wire.NewConnectionIDFrame{}))
nf := queuedFrames[0].(*wire.NewConnectionIDFrame)
Expect(nf.SequenceNumber).To(BeEquivalentTo(5))
Expect(nf.ConnectionID.Len()).To(Equal(7))
})
It("retires the initial connection ID", func() {
Expect(g.Retire(0, protocol.ConnectionID{})).To(Succeed())
Expect(removedConnIDs).To(BeEmpty())
Expect(retiredConnIDs).To(HaveLen(1))
Expect(retiredConnIDs[0]).To(Equal(initialConnID))
Expect(addedConnIDs).To(BeEmpty())
})
It("handles duplicate retirements", func() {
Expect(g.SetMaxActiveConnIDs(11)).To(Succeed())
queuedFrames = nil
Expect(retiredConnIDs).To(BeEmpty())
Expect(g.Retire(5, protocol.ConnectionID{})).To(Succeed())
Expect(retiredConnIDs).To(HaveLen(1))
Expect(queuedFrames).To(HaveLen(1))
Expect(g.Retire(5, protocol.ConnectionID{})).To(Succeed())
Expect(retiredConnIDs).To(HaveLen(1))
Expect(queuedFrames).To(HaveLen(1))
})
It("retires the client's initial destination connection ID when the handshake completes", func() {
g.SetHandshakeComplete()
Expect(retiredConnIDs).To(HaveLen(1))
Expect(retiredConnIDs[0]).To(Equal(initialClientDestConnID))
})
It("removes all connection IDs", func() {
Expect(g.SetMaxActiveConnIDs(5)).To(Succeed())
Expect(queuedFrames).To(HaveLen(4))
g.RemoveAll()
Expect(removedConnIDs).To(HaveLen(6)) // initial conn ID, initial client dest conn id, and newly issued ones
Expect(removedConnIDs).To(ContainElement(initialConnID))
Expect(removedConnIDs).To(ContainElement(initialClientDestConnID))
for _, f := range queuedFrames {
nf := f.(*wire.NewConnectionIDFrame)
Expect(removedConnIDs).To(ContainElement(nf.ConnectionID))
}
})
It("replaces with a closed connection for all connection IDs", func() {
Expect(g.SetMaxActiveConnIDs(5)).To(Succeed())
Expect(queuedFrames).To(HaveLen(4))
g.ReplaceWithClosed(protocol.PerspectiveClient, []byte("foobar"))
Expect(replacedWithClosed).To(HaveLen(6)) // initial conn ID, initial client dest conn id, and newly issued ones
Expect(replacedWithClosed).To(ContainElement(initialClientDestConnID))
Expect(replacedWithClosed).To(ContainElement(initialConnID))
for _, f := range queuedFrames {
nf := f.(*wire.NewConnectionIDFrame)
Expect(replacedWithClosed).To(ContainElement(nf.ConnectionID))
}
})
})
golang-github-lucas-clemente-quic-go-0.38.2/conn_id_manager.go 0000664 0000000 0000000 00000015232 14545452366 0024247 0 ustar 00root root 0000000 0000000 package quic
import (
"fmt"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/utils"
list "github.com/quic-go/quic-go/internal/utils/linkedlist"
"github.com/quic-go/quic-go/internal/wire"
)
type newConnID struct {
SequenceNumber uint64
ConnectionID protocol.ConnectionID
StatelessResetToken protocol.StatelessResetToken
}
type connIDManager struct {
queue list.List[newConnID]
handshakeComplete bool
activeSequenceNumber uint64
highestRetired uint64
activeConnectionID protocol.ConnectionID
activeStatelessResetToken *protocol.StatelessResetToken
// We change the connection ID after sending on average
// protocol.PacketsPerConnectionID packets. The actual value is randomized
// hide the packet loss rate from on-path observers.
rand utils.Rand
packetsSinceLastChange uint32
packetsPerConnectionID uint32
addStatelessResetToken func(protocol.StatelessResetToken)
removeStatelessResetToken func(protocol.StatelessResetToken)
queueControlFrame func(wire.Frame)
}
func newConnIDManager(
initialDestConnID protocol.ConnectionID,
addStatelessResetToken func(protocol.StatelessResetToken),
removeStatelessResetToken func(protocol.StatelessResetToken),
queueControlFrame func(wire.Frame),
) *connIDManager {
return &connIDManager{
activeConnectionID: initialDestConnID,
addStatelessResetToken: addStatelessResetToken,
removeStatelessResetToken: removeStatelessResetToken,
queueControlFrame: queueControlFrame,
}
}
func (h *connIDManager) AddFromPreferredAddress(connID protocol.ConnectionID, resetToken protocol.StatelessResetToken) error {
return h.addConnectionID(1, connID, resetToken)
}
func (h *connIDManager) Add(f *wire.NewConnectionIDFrame) error {
if err := h.add(f); err != nil {
return err
}
if h.queue.Len() >= protocol.MaxActiveConnectionIDs {
return &qerr.TransportError{ErrorCode: qerr.ConnectionIDLimitError}
}
return nil
}
func (h *connIDManager) add(f *wire.NewConnectionIDFrame) error {
// If the NEW_CONNECTION_ID frame is reordered, such that its sequence number is smaller than the currently active
// connection ID or if it was already retired, send the RETIRE_CONNECTION_ID frame immediately.
if f.SequenceNumber < h.activeSequenceNumber || f.SequenceNumber < h.highestRetired {
h.queueControlFrame(&wire.RetireConnectionIDFrame{
SequenceNumber: f.SequenceNumber,
})
return nil
}
// Retire elements in the queue.
// Doesn't retire the active connection ID.
if f.RetirePriorTo > h.highestRetired {
var next *list.Element[newConnID]
for el := h.queue.Front(); el != nil; el = next {
if el.Value.SequenceNumber >= f.RetirePriorTo {
break
}
next = el.Next()
h.queueControlFrame(&wire.RetireConnectionIDFrame{
SequenceNumber: el.Value.SequenceNumber,
})
h.queue.Remove(el)
}
h.highestRetired = f.RetirePriorTo
}
if f.SequenceNumber == h.activeSequenceNumber {
return nil
}
if err := h.addConnectionID(f.SequenceNumber, f.ConnectionID, f.StatelessResetToken); err != nil {
return err
}
// Retire the active connection ID, if necessary.
if h.activeSequenceNumber < f.RetirePriorTo {
// The queue is guaranteed to have at least one element at this point.
h.updateConnectionID()
}
return nil
}
func (h *connIDManager) addConnectionID(seq uint64, connID protocol.ConnectionID, resetToken protocol.StatelessResetToken) error {
// insert a new element at the end
if h.queue.Len() == 0 || h.queue.Back().Value.SequenceNumber < seq {
h.queue.PushBack(newConnID{
SequenceNumber: seq,
ConnectionID: connID,
StatelessResetToken: resetToken,
})
return nil
}
// insert a new element somewhere in the middle
for el := h.queue.Front(); el != nil; el = el.Next() {
if el.Value.SequenceNumber == seq {
if el.Value.ConnectionID != connID {
return fmt.Errorf("received conflicting connection IDs for sequence number %d", seq)
}
if el.Value.StatelessResetToken != resetToken {
return fmt.Errorf("received conflicting stateless reset tokens for sequence number %d", seq)
}
break
}
if el.Value.SequenceNumber > seq {
h.queue.InsertBefore(newConnID{
SequenceNumber: seq,
ConnectionID: connID,
StatelessResetToken: resetToken,
}, el)
break
}
}
return nil
}
func (h *connIDManager) updateConnectionID() {
h.queueControlFrame(&wire.RetireConnectionIDFrame{
SequenceNumber: h.activeSequenceNumber,
})
h.highestRetired = utils.Max(h.highestRetired, h.activeSequenceNumber)
if h.activeStatelessResetToken != nil {
h.removeStatelessResetToken(*h.activeStatelessResetToken)
}
front := h.queue.Remove(h.queue.Front())
h.activeSequenceNumber = front.SequenceNumber
h.activeConnectionID = front.ConnectionID
h.activeStatelessResetToken = &front.StatelessResetToken
h.packetsSinceLastChange = 0
h.packetsPerConnectionID = protocol.PacketsPerConnectionID/2 + uint32(h.rand.Int31n(protocol.PacketsPerConnectionID))
h.addStatelessResetToken(*h.activeStatelessResetToken)
}
func (h *connIDManager) Close() {
if h.activeStatelessResetToken != nil {
h.removeStatelessResetToken(*h.activeStatelessResetToken)
}
}
// is called when the server performs a Retry
// and when the server changes the connection ID in the first Initial sent
func (h *connIDManager) ChangeInitialConnID(newConnID protocol.ConnectionID) {
if h.activeSequenceNumber != 0 {
panic("expected first connection ID to have sequence number 0")
}
h.activeConnectionID = newConnID
}
// is called when the server provides a stateless reset token in the transport parameters
func (h *connIDManager) SetStatelessResetToken(token protocol.StatelessResetToken) {
if h.activeSequenceNumber != 0 {
panic("expected first connection ID to have sequence number 0")
}
h.activeStatelessResetToken = &token
h.addStatelessResetToken(token)
}
func (h *connIDManager) SentPacket() {
h.packetsSinceLastChange++
}
func (h *connIDManager) shouldUpdateConnID() bool {
if !h.handshakeComplete {
return false
}
// initiate the first change as early as possible (after handshake completion)
if h.queue.Len() > 0 && h.activeSequenceNumber == 0 {
return true
}
// For later changes, only change if
// 1. The queue of connection IDs is filled more than 50%.
// 2. We sent at least PacketsPerConnectionID packets
return 2*h.queue.Len() >= protocol.MaxActiveConnectionIDs &&
h.packetsSinceLastChange >= h.packetsPerConnectionID
}
func (h *connIDManager) Get() protocol.ConnectionID {
if h.shouldUpdateConnID() {
h.updateConnectionID()
}
return h.activeConnectionID
}
func (h *connIDManager) SetHandshakeComplete() {
h.handshakeComplete = true
}
golang-github-lucas-clemente-quic-go-0.38.2/conn_id_manager_test.go 0000664 0000000 0000000 00000035111 14545452366 0025304 0 ustar 00root root 0000000 0000000 package quic
import (
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/wire"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Connection ID Manager", func() {
var (
m *connIDManager
frameQueue []wire.Frame
tokenAdded *protocol.StatelessResetToken
removedTokens []protocol.StatelessResetToken
)
initialConnID := protocol.ParseConnectionID([]byte{0, 0, 0, 0})
BeforeEach(func() {
frameQueue = nil
tokenAdded = nil
removedTokens = nil
m = newConnIDManager(
initialConnID,
func(token protocol.StatelessResetToken) { tokenAdded = &token },
func(token protocol.StatelessResetToken) { removedTokens = append(removedTokens, token) },
func(f wire.Frame,
) {
frameQueue = append(frameQueue, f)
})
})
get := func() (protocol.ConnectionID, protocol.StatelessResetToken) {
if m.queue.Len() == 0 {
return protocol.ConnectionID{}, protocol.StatelessResetToken{}
}
val := m.queue.Remove(m.queue.Front())
return val.ConnectionID, val.StatelessResetToken
}
It("returns the initial connection ID", func() {
Expect(m.Get()).To(Equal(initialConnID))
})
It("changes the initial connection ID", func() {
m.ChangeInitialConnID(protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5}))
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5})))
})
It("sets the token for the first connection ID", func() {
token := protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
m.SetStatelessResetToken(token)
Expect(*m.activeStatelessResetToken).To(Equal(token))
Expect(*tokenAdded).To(Equal(token))
})
It("adds and gets connection IDs", func() {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 10,
ConnectionID: protocol.ParseConnectionID([]byte{2, 3, 4, 5}),
StatelessResetToken: protocol.StatelessResetToken{0xe, 0xd, 0xc, 0xb, 0xa, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
})).To(Succeed())
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 4,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe},
})).To(Succeed())
c1, rt1 := get()
Expect(c1).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4})))
Expect(rt1).To(Equal(protocol.StatelessResetToken{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe}))
c2, rt2 := get()
Expect(c2).To(Equal(protocol.ParseConnectionID([]byte{2, 3, 4, 5})))
Expect(rt2).To(Equal(protocol.StatelessResetToken{0xe, 0xd, 0xc, 0xb, 0xa, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}))
c3, _ := get()
Expect(c3).To(BeZero())
})
It("accepts duplicates", func() {
f1 := &wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe},
}
f2 := &wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe},
}
Expect(m.Add(f1)).To(Succeed())
Expect(m.Add(f2)).To(Succeed())
c1, rt1 := get()
Expect(c1).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4})))
Expect(rt1).To(Equal(protocol.StatelessResetToken{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe}))
c2, _ := get()
Expect(c2).To(BeZero())
})
It("ignores duplicates for the currently used connection ID", func() {
f := &wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe},
}
m.SetHandshakeComplete()
Expect(m.Add(f)).To(Succeed())
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4})))
c, _ := get()
Expect(c).To(BeZero())
// Now send the same connection ID again. It should not be queued.
Expect(m.Add(f)).To(Succeed())
c, _ = get()
Expect(c).To(BeZero())
})
It("rejects duplicates with different connection IDs", func() {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 42,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
})).To(Succeed())
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 42,
ConnectionID: protocol.ParseConnectionID([]byte{2, 3, 4, 5}),
})).To(MatchError("received conflicting connection IDs for sequence number 42"))
})
It("rejects duplicates with different connection IDs", func() {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 42,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe},
})).To(Succeed())
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 42,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{0xe, 0xd, 0xc, 0xb, 0xa, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
})).To(MatchError("received conflicting stateless reset tokens for sequence number 42"))
})
It("retires connection IDs", func() {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 10,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
})).To(Succeed())
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 13,
ConnectionID: protocol.ParseConnectionID([]byte{2, 3, 4, 5}),
})).To(Succeed())
Expect(frameQueue).To(BeEmpty())
Expect(m.Add(&wire.NewConnectionIDFrame{
RetirePriorTo: 14,
SequenceNumber: 17,
ConnectionID: protocol.ParseConnectionID([]byte{3, 4, 5, 6}),
})).To(Succeed())
Expect(frameQueue).To(HaveLen(3))
Expect(frameQueue[0].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeEquivalentTo(10))
Expect(frameQueue[1].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeEquivalentTo(13))
Expect(frameQueue[2].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeZero())
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{3, 4, 5, 6})))
})
It("ignores reordered connection IDs, if their sequence number was already retired", func() {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 10,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
RetirePriorTo: 5,
})).To(Succeed())
Expect(frameQueue).To(HaveLen(1))
Expect(frameQueue[0].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeZero())
frameQueue = nil
// If this NEW_CONNECTION_ID frame hadn't been reordered, we would have retired it before.
// Make sure it gets retired immediately now.
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 4,
ConnectionID: protocol.ParseConnectionID([]byte{4, 3, 2, 1}),
})).To(Succeed())
Expect(frameQueue).To(HaveLen(1))
Expect(frameQueue[0].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeEquivalentTo(4))
})
It("ignores reordered connection IDs, if their sequence number was already retired or less than active", func() {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 10,
ConnectionID: protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef}),
RetirePriorTo: 5,
})).To(Succeed())
Expect(frameQueue).To(HaveLen(1))
Expect(frameQueue[0].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeZero())
frameQueue = nil
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef})))
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 9,
ConnectionID: protocol.ParseConnectionID([]byte{0xde, 0xca, 0xfb, 0xad}),
RetirePriorTo: 5,
})).To(Succeed())
Expect(frameQueue).To(HaveLen(1))
Expect(frameQueue[0].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeEquivalentTo(9))
})
It("accepts retransmissions for the connection ID that is in use", func() {
connID := protocol.ParseConnectionID([]byte{1, 2, 3, 4})
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: connID,
})).To(Succeed())
m.SetHandshakeComplete()
Expect(frameQueue).To(BeEmpty())
Expect(m.Get()).To(Equal(connID))
Expect(frameQueue).To(HaveLen(1))
Expect(frameQueue[0]).To(BeAssignableToTypeOf(&wire.RetireConnectionIDFrame{}))
Expect(frameQueue[0].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeZero())
frameQueue = nil
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: connID,
})).To(Succeed())
Expect(frameQueue).To(BeEmpty())
})
It("errors when the peer sends too connection IDs", func() {
for i := uint8(1); i < protocol.MaxActiveConnectionIDs; i++ {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: uint64(i),
ConnectionID: protocol.ParseConnectionID([]byte{i, i, i, i}),
StatelessResetToken: protocol.StatelessResetToken{i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i},
})).To(Succeed())
}
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: uint64(9999),
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
})).To(MatchError(&qerr.TransportError{ErrorCode: qerr.ConnectionIDLimitError}))
})
It("initiates the first connection ID update as soon as possible", func() {
Expect(m.Get()).To(Equal(initialConnID))
m.SetHandshakeComplete()
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1},
})).To(Succeed())
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4})))
})
It("waits until handshake completion before initiating a connection ID update", func() {
Expect(m.Get()).To(Equal(initialConnID))
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1},
})).To(Succeed())
Expect(m.Get()).To(Equal(initialConnID))
m.SetHandshakeComplete()
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4})))
})
It("initiates subsequent updates when enough packets are sent", func() {
var s uint8
for s = uint8(1); s < protocol.MaxActiveConnectionIDs; s++ {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: uint64(s),
ConnectionID: protocol.ParseConnectionID([]byte{s, s, s, s}),
StatelessResetToken: protocol.StatelessResetToken{s, s, s, s, s, s, s, s, s, s, s, s, s, s, s, s},
})).To(Succeed())
}
m.SetHandshakeComplete()
lastConnID := m.Get()
Expect(lastConnID).To(Equal(protocol.ParseConnectionID([]byte{1, 1, 1, 1})))
var counter int
for i := 0; i < 50*protocol.PacketsPerConnectionID; i++ {
m.SentPacket()
connID := m.Get()
if connID != lastConnID {
counter++
lastConnID = connID
Expect(removedTokens).To(HaveLen(1))
removedTokens = nil
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: uint64(s),
ConnectionID: protocol.ParseConnectionID([]byte{s, s, s, s}),
StatelessResetToken: protocol.StatelessResetToken{s, s, s, s, s, s, s, s, s, s, s, s, s, s, s, s},
})).To(Succeed())
s++
}
}
Expect(counter).To(BeNumerically("~", 50, 10))
})
It("retires delayed connection IDs that arrive after a higher connection ID was already retired", func() {
for s := uint8(10); s <= 10+protocol.MaxActiveConnectionIDs/2; s++ {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: uint64(s),
ConnectionID: protocol.ParseConnectionID([]byte{s, s, s, s}),
StatelessResetToken: protocol.StatelessResetToken{s, s, s, s, s, s, s, s, s, s, s, s, s, s, s, s},
})).To(Succeed())
}
m.SetHandshakeComplete()
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{10, 10, 10, 10})))
for {
m.SentPacket()
if m.Get() == protocol.ParseConnectionID([]byte{11, 11, 11, 11}) {
break
}
}
// The active conn ID is now {11, 11, 11, 11}
Expect(m.queue.Front().Value.ConnectionID).To(Equal(protocol.ParseConnectionID([]byte{12, 12, 12, 12})))
// Add a delayed connection ID. It should just be ignored now.
frameQueue = nil
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: uint64(5),
ConnectionID: protocol.ParseConnectionID([]byte{5, 5, 5, 5}),
StatelessResetToken: protocol.StatelessResetToken{5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5},
})).To(Succeed())
Expect(m.queue.Front().Value.ConnectionID).To(Equal(protocol.ParseConnectionID([]byte{12, 12, 12, 12})))
Expect(frameQueue).To(HaveLen(1))
Expect(frameQueue[0].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeEquivalentTo(5))
})
It("only initiates subsequent updates when enough if enough connection IDs are queued", func() {
for i := uint8(1); i <= protocol.MaxActiveConnectionIDs/2; i++ {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: uint64(i),
ConnectionID: protocol.ParseConnectionID([]byte{i, i, i, i}),
StatelessResetToken: protocol.StatelessResetToken{i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i},
})).To(Succeed())
}
m.SetHandshakeComplete()
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 1, 1, 1})))
for i := 0; i < 2*protocol.PacketsPerConnectionID; i++ {
m.SentPacket()
}
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 1, 1, 1})))
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 1337,
ConnectionID: protocol.ParseConnectionID([]byte{1, 3, 3, 7}),
})).To(Succeed())
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{2, 2, 2, 2})))
Expect(removedTokens).To(HaveLen(1))
Expect(removedTokens[0]).To(Equal(protocol.StatelessResetToken{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}))
})
It("removes the currently active stateless reset token when it is closed", func() {
m.Close()
Expect(removedTokens).To(BeEmpty())
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1},
})).To(Succeed())
m.SetHandshakeComplete()
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4})))
m.Close()
Expect(removedTokens).To(HaveLen(1))
Expect(removedTokens[0]).To(Equal(protocol.StatelessResetToken{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}))
})
})
golang-github-lucas-clemente-quic-go-0.38.2/connection.go 0000664 0000000 0000000 00000224560 14545452366 0023311 0 ustar 00root root 0000000 0000000 package quic
import (
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"reflect"
"sync"
"sync/atomic"
"time"
"github.com/quic-go/quic-go/internal/ackhandler"
"github.com/quic-go/quic-go/internal/flowcontrol"
"github.com/quic-go/quic-go/internal/handshake"
"github.com/quic-go/quic-go/internal/logutils"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/internal/wire"
"github.com/quic-go/quic-go/logging"
)
type unpacker interface {
UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte, v protocol.VersionNumber) (*unpackedPacket, error)
UnpackShortHeader(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error)
}
type streamGetter interface {
GetOrOpenReceiveStream(protocol.StreamID) (receiveStreamI, error)
GetOrOpenSendStream(protocol.StreamID) (sendStreamI, error)
}
type streamManager interface {
GetOrOpenSendStream(protocol.StreamID) (sendStreamI, error)
GetOrOpenReceiveStream(protocol.StreamID) (receiveStreamI, error)
OpenStream() (Stream, error)
OpenUniStream() (SendStream, error)
OpenStreamSync(context.Context) (Stream, error)
OpenUniStreamSync(context.Context) (SendStream, error)
AcceptStream(context.Context) (Stream, error)
AcceptUniStream(context.Context) (ReceiveStream, error)
DeleteStream(protocol.StreamID) error
UpdateLimits(*wire.TransportParameters)
HandleMaxStreamsFrame(*wire.MaxStreamsFrame)
CloseWithError(error)
ResetFor0RTT()
UseResetMaps()
}
type cryptoStreamHandler interface {
StartHandshake() error
ChangeConnectionID(protocol.ConnectionID)
SetLargest1RTTAcked(protocol.PacketNumber) error
SetHandshakeConfirmed()
GetSessionTicket() ([]byte, error)
NextEvent() handshake.Event
DiscardInitialKeys()
io.Closer
ConnectionState() handshake.ConnectionState
}
type receivedPacket struct {
buffer *packetBuffer
remoteAddr net.Addr
rcvTime time.Time
data []byte
ecn protocol.ECN
info packetInfo // only valid if the contained IP address is valid
}
func (p *receivedPacket) Size() protocol.ByteCount { return protocol.ByteCount(len(p.data)) }
func (p *receivedPacket) Clone() *receivedPacket {
return &receivedPacket{
remoteAddr: p.remoteAddr,
rcvTime: p.rcvTime,
data: p.data,
buffer: p.buffer,
ecn: p.ecn,
info: p.info,
}
}
type connRunner interface {
Add(protocol.ConnectionID, packetHandler) bool
GetStatelessResetToken(protocol.ConnectionID) protocol.StatelessResetToken
Retire(protocol.ConnectionID)
Remove(protocol.ConnectionID)
ReplaceWithClosed([]protocol.ConnectionID, protocol.Perspective, []byte)
AddResetToken(protocol.StatelessResetToken, packetHandler)
RemoveResetToken(protocol.StatelessResetToken)
}
type closeError struct {
err error
remote bool
immediate bool
}
type errCloseForRecreating struct {
nextPacketNumber protocol.PacketNumber
nextVersion protocol.VersionNumber
}
func (e *errCloseForRecreating) Error() string {
return "closing connection in order to recreate it"
}
var connTracingID uint64 // to be accessed atomically
func nextConnTracingID() uint64 { return atomic.AddUint64(&connTracingID, 1) }
// A Connection is a QUIC connection
type connection struct {
// Destination connection ID used during the handshake.
// Used to check source connection ID on incoming packets.
handshakeDestConnID protocol.ConnectionID
// Set for the client. Destination connection ID used on the first Initial sent.
origDestConnID protocol.ConnectionID
retrySrcConnID *protocol.ConnectionID // only set for the client (and if a Retry was performed)
srcConnIDLen int
perspective protocol.Perspective
version protocol.VersionNumber
config *Config
conn sendConn
sendQueue sender
streamsMap streamManager
connIDManager *connIDManager
connIDGenerator *connIDGenerator
rttStats *utils.RTTStats
cryptoStreamManager *cryptoStreamManager
sentPacketHandler ackhandler.SentPacketHandler
receivedPacketHandler ackhandler.ReceivedPacketHandler
retransmissionQueue *retransmissionQueue
framer framer
windowUpdateQueue *windowUpdateQueue
connFlowController flowcontrol.ConnectionFlowController
tokenStoreKey string // only set for the client
tokenGenerator *handshake.TokenGenerator // only set for the server
unpacker unpacker
frameParser wire.FrameParser
packer packer
mtuDiscoverer mtuDiscoverer // initialized when the handshake completes
initialStream cryptoStream
handshakeStream cryptoStream
oneRTTStream cryptoStream // only set for the server
cryptoStreamHandler cryptoStreamHandler
receivedPackets chan receivedPacket
sendingScheduled chan struct{}
closeOnce sync.Once
// closeChan is used to notify the run loop that it should terminate
closeChan chan closeError
ctx context.Context
ctxCancel context.CancelCauseFunc
handshakeCtx context.Context
handshakeCtxCancel context.CancelFunc
undecryptablePackets []receivedPacket // undecryptable packets, waiting for a change in encryption level
undecryptablePacketsToProcess []receivedPacket
earlyConnReadyChan chan struct{}
sentFirstPacket bool
handshakeComplete bool
handshakeConfirmed bool
receivedRetry bool
versionNegotiated bool
receivedFirstPacket bool
// the minimum of the max_idle_timeout values advertised by both endpoints
idleTimeout time.Duration
creationTime time.Time
// The idle timeout is set based on the max of the time we received the last packet...
lastPacketReceivedTime time.Time
// ... and the time we sent a new ack-eliciting packet after receiving a packet.
firstAckElicitingPacketAfterIdleSentTime time.Time
// pacingDeadline is the time when the next packet should be sent
pacingDeadline time.Time
peerParams *wire.TransportParameters
timer connectionTimer
// keepAlivePingSent stores whether a keep alive PING is in flight.
// It is reset as soon as we receive a packet from the peer.
keepAlivePingSent bool
keepAliveInterval time.Duration
datagramQueue *datagramQueue
connStateMutex sync.Mutex
connState ConnectionState
logID string
tracer logging.ConnectionTracer
logger utils.Logger
}
var (
_ Connection = &connection{}
_ EarlyConnection = &connection{}
_ streamSender = &connection{}
)
var newConnection = func(
conn sendConn,
runner connRunner,
origDestConnID protocol.ConnectionID,
retrySrcConnID *protocol.ConnectionID,
clientDestConnID protocol.ConnectionID,
destConnID protocol.ConnectionID,
srcConnID protocol.ConnectionID,
connIDGenerator ConnectionIDGenerator,
statelessResetToken protocol.StatelessResetToken,
conf *Config,
tlsConf *tls.Config,
tokenGenerator *handshake.TokenGenerator,
clientAddressValidated bool,
tracer logging.ConnectionTracer,
tracingID uint64,
logger utils.Logger,
v protocol.VersionNumber,
) quicConn {
s := &connection{
conn: conn,
config: conf,
handshakeDestConnID: destConnID,
srcConnIDLen: srcConnID.Len(),
tokenGenerator: tokenGenerator,
oneRTTStream: newCryptoStream(true),
perspective: protocol.PerspectiveServer,
tracer: tracer,
logger: logger,
version: v,
}
if origDestConnID.Len() > 0 {
s.logID = origDestConnID.String()
} else {
s.logID = destConnID.String()
}
s.connIDManager = newConnIDManager(
destConnID,
func(token protocol.StatelessResetToken) { runner.AddResetToken(token, s) },
runner.RemoveResetToken,
s.queueControlFrame,
)
s.connIDGenerator = newConnIDGenerator(
srcConnID,
&clientDestConnID,
func(connID protocol.ConnectionID) { runner.Add(connID, s) },
runner.GetStatelessResetToken,
runner.Remove,
runner.Retire,
runner.ReplaceWithClosed,
s.queueControlFrame,
connIDGenerator,
)
s.preSetup()
s.ctx, s.ctxCancel = context.WithCancelCause(context.WithValue(context.Background(), ConnectionTracingKey, tracingID))
s.sentPacketHandler, s.receivedPacketHandler = ackhandler.NewAckHandler(
0,
getMaxPacketSize(s.conn.RemoteAddr()),
s.rttStats,
clientAddressValidated,
s.perspective,
s.tracer,
s.logger,
)
s.mtuDiscoverer = newMTUDiscoverer(s.rttStats, getMaxPacketSize(s.conn.RemoteAddr()), s.sentPacketHandler.SetMaxDatagramSize)
params := &wire.TransportParameters{
InitialMaxStreamDataBidiLocal: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
InitialMaxStreamDataBidiRemote: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
InitialMaxStreamDataUni: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
InitialMaxData: protocol.ByteCount(s.config.InitialConnectionReceiveWindow),
MaxIdleTimeout: s.config.MaxIdleTimeout,
MaxBidiStreamNum: protocol.StreamNum(s.config.MaxIncomingStreams),
MaxUniStreamNum: protocol.StreamNum(s.config.MaxIncomingUniStreams),
MaxAckDelay: protocol.MaxAckDelayInclGranularity,
AckDelayExponent: protocol.AckDelayExponent,
DisableActiveMigration: true,
StatelessResetToken: &statelessResetToken,
OriginalDestinationConnectionID: origDestConnID,
// For interoperability with quic-go versions before May 2023, this value must be set to a value
// different from protocol.DefaultActiveConnectionIDLimit.
// If set to the default value, it will be omitted from the transport parameters, which will make
// old quic-go versions interpret it as 0, instead of the default value of 2.
// See https://github.com/quic-go/quic-go/pull/3806.
ActiveConnectionIDLimit: protocol.MaxActiveConnectionIDs,
InitialSourceConnectionID: srcConnID,
RetrySourceConnectionID: retrySrcConnID,
}
if s.config.EnableDatagrams {
params.MaxDatagramFrameSize = protocol.MaxDatagramFrameSize
} else {
params.MaxDatagramFrameSize = protocol.InvalidByteCount
}
if s.tracer != nil {
s.tracer.SentTransportParameters(params)
}
cs := handshake.NewCryptoSetupServer(
clientDestConnID,
conn.LocalAddr(),
conn.RemoteAddr(),
params,
tlsConf,
conf.Allow0RTT,
s.rttStats,
tracer,
logger,
s.version,
)
s.cryptoStreamHandler = cs
s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, s.initialStream, s.handshakeStream, s.sentPacketHandler, s.retransmissionQueue, cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective)
s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen)
s.cryptoStreamManager = newCryptoStreamManager(cs, s.initialStream, s.handshakeStream, s.oneRTTStream)
return s
}
// declare this as a variable, such that we can it mock it in the tests
var newClientConnection = func(
conn sendConn,
runner connRunner,
destConnID protocol.ConnectionID,
srcConnID protocol.ConnectionID,
connIDGenerator ConnectionIDGenerator,
conf *Config,
tlsConf *tls.Config,
initialPacketNumber protocol.PacketNumber,
enable0RTT bool,
hasNegotiatedVersion bool,
tracer logging.ConnectionTracer,
tracingID uint64,
logger utils.Logger,
v protocol.VersionNumber,
) quicConn {
s := &connection{
conn: conn,
config: conf,
origDestConnID: destConnID,
handshakeDestConnID: destConnID,
srcConnIDLen: srcConnID.Len(),
perspective: protocol.PerspectiveClient,
logID: destConnID.String(),
logger: logger,
tracer: tracer,
versionNegotiated: hasNegotiatedVersion,
version: v,
}
s.connIDManager = newConnIDManager(
destConnID,
func(token protocol.StatelessResetToken) { runner.AddResetToken(token, s) },
runner.RemoveResetToken,
s.queueControlFrame,
)
s.connIDGenerator = newConnIDGenerator(
srcConnID,
nil,
func(connID protocol.ConnectionID) { runner.Add(connID, s) },
runner.GetStatelessResetToken,
runner.Remove,
runner.Retire,
runner.ReplaceWithClosed,
s.queueControlFrame,
connIDGenerator,
)
s.preSetup()
s.ctx, s.ctxCancel = context.WithCancelCause(context.WithValue(context.Background(), ConnectionTracingKey, tracingID))
s.sentPacketHandler, s.receivedPacketHandler = ackhandler.NewAckHandler(
initialPacketNumber,
getMaxPacketSize(s.conn.RemoteAddr()),
s.rttStats,
false, /* has no effect */
s.perspective,
s.tracer,
s.logger,
)
s.mtuDiscoverer = newMTUDiscoverer(s.rttStats, getMaxPacketSize(s.conn.RemoteAddr()), s.sentPacketHandler.SetMaxDatagramSize)
oneRTTStream := newCryptoStream(true)
params := &wire.TransportParameters{
InitialMaxStreamDataBidiRemote: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
InitialMaxStreamDataBidiLocal: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
InitialMaxStreamDataUni: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
InitialMaxData: protocol.ByteCount(s.config.InitialConnectionReceiveWindow),
MaxIdleTimeout: s.config.MaxIdleTimeout,
MaxBidiStreamNum: protocol.StreamNum(s.config.MaxIncomingStreams),
MaxUniStreamNum: protocol.StreamNum(s.config.MaxIncomingUniStreams),
MaxAckDelay: protocol.MaxAckDelayInclGranularity,
AckDelayExponent: protocol.AckDelayExponent,
DisableActiveMigration: true,
// For interoperability with quic-go versions before May 2023, this value must be set to a value
// different from protocol.DefaultActiveConnectionIDLimit.
// If set to the default value, it will be omitted from the transport parameters, which will make
// old quic-go versions interpret it as 0, instead of the default value of 2.
// See https://github.com/quic-go/quic-go/pull/3806.
ActiveConnectionIDLimit: protocol.MaxActiveConnectionIDs,
InitialSourceConnectionID: srcConnID,
}
if s.config.EnableDatagrams {
params.MaxDatagramFrameSize = protocol.MaxDatagramFrameSize
} else {
params.MaxDatagramFrameSize = protocol.InvalidByteCount
}
if s.tracer != nil {
s.tracer.SentTransportParameters(params)
}
cs := handshake.NewCryptoSetupClient(
destConnID,
params,
tlsConf,
enable0RTT,
s.rttStats,
tracer,
logger,
s.version,
)
s.cryptoStreamHandler = cs
s.cryptoStreamManager = newCryptoStreamManager(cs, s.initialStream, s.handshakeStream, oneRTTStream)
s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen)
s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, s.initialStream, s.handshakeStream, s.sentPacketHandler, s.retransmissionQueue, cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective)
if len(tlsConf.ServerName) > 0 {
s.tokenStoreKey = tlsConf.ServerName
} else {
s.tokenStoreKey = conn.RemoteAddr().String()
}
if s.config.TokenStore != nil {
if token := s.config.TokenStore.Pop(s.tokenStoreKey); token != nil {
s.packer.SetToken(token.data)
}
}
return s
}
func (s *connection) preSetup() {
s.initialStream = newCryptoStream(false)
s.handshakeStream = newCryptoStream(false)
s.sendQueue = newSendQueue(s.conn)
s.retransmissionQueue = newRetransmissionQueue()
s.frameParser = wire.NewFrameParser(s.config.EnableDatagrams)
s.rttStats = &utils.RTTStats{}
s.connFlowController = flowcontrol.NewConnectionFlowController(
protocol.ByteCount(s.config.InitialConnectionReceiveWindow),
protocol.ByteCount(s.config.MaxConnectionReceiveWindow),
s.onHasConnectionWindowUpdate,
func(size protocol.ByteCount) bool {
if s.config.AllowConnectionWindowIncrease == nil {
return true
}
return s.config.AllowConnectionWindowIncrease(s, uint64(size))
},
s.rttStats,
s.logger,
)
s.earlyConnReadyChan = make(chan struct{})
s.streamsMap = newStreamsMap(
s,
s.newFlowController,
uint64(s.config.MaxIncomingStreams),
uint64(s.config.MaxIncomingUniStreams),
s.perspective,
)
s.framer = newFramer(s.streamsMap)
s.receivedPackets = make(chan receivedPacket, protocol.MaxConnUnprocessedPackets)
s.closeChan = make(chan closeError, 1)
s.sendingScheduled = make(chan struct{}, 1)
s.handshakeCtx, s.handshakeCtxCancel = context.WithCancel(context.Background())
now := time.Now()
s.lastPacketReceivedTime = now
s.creationTime = now
s.windowUpdateQueue = newWindowUpdateQueue(s.streamsMap, s.connFlowController, s.framer.QueueControlFrame)
s.datagramQueue = newDatagramQueue(s.scheduleSending, s.logger)
s.connState.Version = s.version
}
// run the connection main loop
func (s *connection) run() error {
var closeErr closeError
defer func() {
s.ctxCancel(closeErr.err)
}()
s.timer = *newTimer()
if err := s.cryptoStreamHandler.StartHandshake(); err != nil {
return err
}
if err := s.handleHandshakeEvents(); err != nil {
return err
}
go func() {
if err := s.sendQueue.Run(); err != nil {
s.destroyImpl(err)
}
}()
if s.perspective == protocol.PerspectiveClient {
s.scheduleSending() // so the ClientHello actually gets sent
}
var sendQueueAvailable <-chan struct{}
runLoop:
for {
// Close immediately if requested
select {
case closeErr = <-s.closeChan:
break runLoop
default:
}
s.maybeResetTimer()
var processedUndecryptablePacket bool
if len(s.undecryptablePacketsToProcess) > 0 {
queue := s.undecryptablePacketsToProcess
s.undecryptablePacketsToProcess = nil
for _, p := range queue {
if processed := s.handlePacketImpl(p); processed {
processedUndecryptablePacket = true
}
// Don't set timers and send packets if the packet made us close the connection.
select {
case closeErr = <-s.closeChan:
break runLoop
default:
}
}
}
// If we processed any undecryptable packets, jump to the resetting of the timers directly.
if !processedUndecryptablePacket {
select {
case closeErr = <-s.closeChan:
break runLoop
case <-s.timer.Chan():
s.timer.SetRead()
// We do all the interesting stuff after the switch statement, so
// nothing to see here.
case <-s.sendingScheduled:
// We do all the interesting stuff after the switch statement, so
// nothing to see here.
case <-sendQueueAvailable:
case firstPacket := <-s.receivedPackets:
wasProcessed := s.handlePacketImpl(firstPacket)
// Don't set timers and send packets if the packet made us close the connection.
select {
case closeErr = <-s.closeChan:
break runLoop
default:
}
if s.handshakeComplete {
// Now process all packets in the receivedPackets channel.
// Limit the number of packets to the length of the receivedPackets channel,
// so we eventually get a chance to send out an ACK when receiving a lot of packets.
numPackets := len(s.receivedPackets)
receiveLoop:
for i := 0; i < numPackets; i++ {
select {
case p := <-s.receivedPackets:
if processed := s.handlePacketImpl(p); processed {
wasProcessed = true
}
select {
case closeErr = <-s.closeChan:
break runLoop
default:
}
default:
break receiveLoop
}
}
}
// Only reset the timers if this packet was actually processed.
// This avoids modifying any state when handling undecryptable packets,
// which could be injected by an attacker.
if !wasProcessed {
continue
}
}
}
now := time.Now()
if timeout := s.sentPacketHandler.GetLossDetectionTimeout(); !timeout.IsZero() && timeout.Before(now) {
// This could cause packets to be retransmitted.
// Check it before trying to send packets.
if err := s.sentPacketHandler.OnLossDetectionTimeout(); err != nil {
s.closeLocal(err)
}
}
if keepAliveTime := s.nextKeepAliveTime(); !keepAliveTime.IsZero() && !now.Before(keepAliveTime) {
// send a PING frame since there is no activity in the connection
s.logger.Debugf("Sending a keep-alive PING to keep the connection alive.")
s.framer.QueueControlFrame(&wire.PingFrame{})
s.keepAlivePingSent = true
} else if !s.handshakeComplete && now.Sub(s.creationTime) >= s.config.handshakeTimeout() {
s.destroyImpl(qerr.ErrHandshakeTimeout)
continue
} else {
idleTimeoutStartTime := s.idleTimeoutStartTime()
if (!s.handshakeComplete && now.Sub(idleTimeoutStartTime) >= s.config.HandshakeIdleTimeout) ||
(s.handshakeComplete && now.After(s.nextIdleTimeoutTime())) {
s.destroyImpl(qerr.ErrIdleTimeout)
continue
}
}
if s.sendQueue.WouldBlock() {
// The send queue is still busy sending out packets.
// Wait until there's space to enqueue new packets.
sendQueueAvailable = s.sendQueue.Available()
continue
}
if err := s.triggerSending(); err != nil {
s.closeLocal(err)
}
if s.sendQueue.WouldBlock() {
sendQueueAvailable = s.sendQueue.Available()
} else {
sendQueueAvailable = nil
}
}
s.cryptoStreamHandler.Close()
s.sendQueue.Close() // close the send queue before sending the CONNECTION_CLOSE
s.handleCloseError(&closeErr)
if e := (&errCloseForRecreating{}); !errors.As(closeErr.err, &e) && s.tracer != nil {
s.tracer.Close()
}
s.logger.Infof("Connection %s closed.", s.logID)
s.timer.Stop()
return closeErr.err
}
// blocks until the early connection can be used
func (s *connection) earlyConnReady() <-chan struct{} {
return s.earlyConnReadyChan
}
func (s *connection) HandshakeComplete() <-chan struct{} {
return s.handshakeCtx.Done()
}
func (s *connection) Context() context.Context {
return s.ctx
}
func (s *connection) supportsDatagrams() bool {
return s.peerParams.MaxDatagramFrameSize > 0
}
func (s *connection) ConnectionState() ConnectionState {
s.connStateMutex.Lock()
defer s.connStateMutex.Unlock()
cs := s.cryptoStreamHandler.ConnectionState()
s.connState.TLS = cs.ConnectionState
s.connState.Used0RTT = cs.Used0RTT
return s.connState
}
// Time when the connection should time out
func (s *connection) nextIdleTimeoutTime() time.Time {
idleTimeout := utils.Max(s.idleTimeout, s.rttStats.PTO(true)*3)
return s.idleTimeoutStartTime().Add(idleTimeout)
}
// Time when the next keep-alive packet should be sent.
// It returns a zero time if no keep-alive should be sent.
func (s *connection) nextKeepAliveTime() time.Time {
if s.config.KeepAlivePeriod == 0 || s.keepAlivePingSent || !s.firstAckElicitingPacketAfterIdleSentTime.IsZero() {
return time.Time{}
}
keepAliveInterval := utils.Max(s.keepAliveInterval, s.rttStats.PTO(true)*3/2)
return s.lastPacketReceivedTime.Add(keepAliveInterval)
}
func (s *connection) maybeResetTimer() {
var deadline time.Time
if !s.handshakeComplete {
deadline = utils.MinTime(
s.creationTime.Add(s.config.handshakeTimeout()),
s.idleTimeoutStartTime().Add(s.config.HandshakeIdleTimeout),
)
} else {
if keepAliveTime := s.nextKeepAliveTime(); !keepAliveTime.IsZero() {
deadline = keepAliveTime
} else {
deadline = s.nextIdleTimeoutTime()
}
}
s.timer.SetTimer(
deadline,
s.receivedPacketHandler.GetAlarmTimeout(),
s.sentPacketHandler.GetLossDetectionTimeout(),
s.pacingDeadline,
)
}
func (s *connection) idleTimeoutStartTime() time.Time {
return utils.MaxTime(s.lastPacketReceivedTime, s.firstAckElicitingPacketAfterIdleSentTime)
}
func (s *connection) handleHandshakeComplete() error {
defer s.handshakeCtxCancel()
// Once the handshake completes, we have derived 1-RTT keys.
// There's no point in queueing undecryptable packets for later decryption anymore.
s.undecryptablePackets = nil
s.connIDManager.SetHandshakeComplete()
s.connIDGenerator.SetHandshakeComplete()
// The server applies transport parameters right away, but the client side has to wait for handshake completion.
// During a 0-RTT connection, the client is only allowed to use the new transport parameters for 1-RTT packets.
if s.perspective == protocol.PerspectiveClient {
s.applyTransportParameters()
return nil
}
// All these only apply to the server side.
if err := s.handleHandshakeConfirmed(); err != nil {
return err
}
ticket, err := s.cryptoStreamHandler.GetSessionTicket()
if err != nil {
return err
}
if ticket != nil { // may be nil if session tickets are disabled via tls.Config.SessionTicketsDisabled
s.oneRTTStream.Write(ticket)
for s.oneRTTStream.HasData() {
s.queueControlFrame(s.oneRTTStream.PopCryptoFrame(protocol.MaxPostHandshakeCryptoFrameSize))
}
}
token, err := s.tokenGenerator.NewToken(s.conn.RemoteAddr())
if err != nil {
return err
}
s.queueControlFrame(&wire.NewTokenFrame{Token: token})
s.queueControlFrame(&wire.HandshakeDoneFrame{})
return nil
}
func (s *connection) handleHandshakeConfirmed() error {
if err := s.dropEncryptionLevel(protocol.EncryptionHandshake); err != nil {
return err
}
s.handshakeConfirmed = true
s.sentPacketHandler.SetHandshakeConfirmed()
s.cryptoStreamHandler.SetHandshakeConfirmed()
if !s.config.DisablePathMTUDiscovery && s.conn.capabilities().DF {
maxPacketSize := s.peerParams.MaxUDPPayloadSize
if maxPacketSize == 0 {
maxPacketSize = protocol.MaxByteCount
}
s.mtuDiscoverer.Start(utils.Min(maxPacketSize, protocol.MaxPacketBufferSize))
}
return nil
}
func (s *connection) handlePacketImpl(rp receivedPacket) bool {
s.sentPacketHandler.ReceivedBytes(rp.Size())
if wire.IsVersionNegotiationPacket(rp.data) {
s.handleVersionNegotiationPacket(rp)
return false
}
var counter uint8
var lastConnID protocol.ConnectionID
var processed bool
data := rp.data
p := rp
for len(data) > 0 {
var destConnID protocol.ConnectionID
if counter > 0 {
p = *(p.Clone())
p.data = data
var err error
destConnID, err = wire.ParseConnectionID(p.data, s.srcConnIDLen)
if err != nil {
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.ByteCount(len(data)), logging.PacketDropHeaderParseError)
}
s.logger.Debugf("error parsing packet, couldn't parse connection ID: %s", err)
break
}
if destConnID != lastConnID {
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.ByteCount(len(data)), logging.PacketDropUnknownConnectionID)
}
s.logger.Debugf("coalesced packet has different destination connection ID: %s, expected %s", destConnID, lastConnID)
break
}
}
if wire.IsLongHeaderPacket(p.data[0]) {
hdr, packetData, rest, err := wire.ParsePacket(p.data)
if err != nil {
if s.tracer != nil {
dropReason := logging.PacketDropHeaderParseError
if err == wire.ErrUnsupportedVersion {
dropReason = logging.PacketDropUnsupportedVersion
}
s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.ByteCount(len(data)), dropReason)
}
s.logger.Debugf("error parsing packet: %s", err)
break
}
lastConnID = hdr.DestConnectionID
if hdr.Version != s.version {
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), protocol.ByteCount(len(data)), logging.PacketDropUnexpectedVersion)
}
s.logger.Debugf("Dropping packet with version %x. Expected %x.", hdr.Version, s.version)
break
}
if counter > 0 {
p.buffer.Split()
}
counter++
// only log if this actually a coalesced packet
if s.logger.Debug() && (counter > 1 || len(rest) > 0) {
s.logger.Debugf("Parsed a coalesced packet. Part %d: %d bytes. Remaining: %d bytes.", counter, len(packetData), len(rest))
}
p.data = packetData
if wasProcessed := s.handleLongHeaderPacket(p, hdr); wasProcessed {
processed = true
}
data = rest
} else {
if counter > 0 {
p.buffer.Split()
}
processed = s.handleShortHeaderPacket(p, destConnID)
break
}
}
p.buffer.MaybeRelease()
return processed
}
func (s *connection) handleShortHeaderPacket(p receivedPacket, destConnID protocol.ConnectionID) bool {
var wasQueued bool
defer func() {
// Put back the packet buffer if the packet wasn't queued for later decryption.
if !wasQueued {
p.buffer.Decrement()
}
}()
pn, pnLen, keyPhase, data, err := s.unpacker.UnpackShortHeader(p.rcvTime, p.data)
if err != nil {
wasQueued = s.handleUnpackError(err, p, logging.PacketType1RTT)
return false
}
if s.logger.Debug() {
s.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, 1-RTT", pn, p.Size(), destConnID)
wire.LogShortHeader(s.logger, destConnID, pn, pnLen, keyPhase)
}
if s.receivedPacketHandler.IsPotentiallyDuplicate(pn, protocol.Encryption1RTT) {
s.logger.Debugf("Dropping (potentially) duplicate packet.")
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketType1RTT, p.Size(), logging.PacketDropDuplicate)
}
return false
}
var log func([]logging.Frame)
if s.tracer != nil {
log = func(frames []logging.Frame) {
s.tracer.ReceivedShortHeaderPacket(
&logging.ShortHeader{
DestConnectionID: destConnID,
PacketNumber: pn,
PacketNumberLen: pnLen,
KeyPhase: keyPhase,
},
p.Size(),
frames,
)
}
}
if err := s.handleUnpackedShortHeaderPacket(destConnID, pn, data, p.ecn, p.rcvTime, log); err != nil {
s.closeLocal(err)
return false
}
return true
}
func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) bool /* was the packet successfully processed */ {
var wasQueued bool
defer func() {
// Put back the packet buffer if the packet wasn't queued for later decryption.
if !wasQueued {
p.buffer.Decrement()
}
}()
if hdr.Type == protocol.PacketTypeRetry {
return s.handleRetryPacket(hdr, p.data)
}
// The server can change the source connection ID with the first Handshake packet.
// After this, all packets with a different source connection have to be ignored.
if s.receivedFirstPacket && hdr.Type == protocol.PacketTypeInitial && hdr.SrcConnectionID != s.handshakeDestConnID {
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeInitial, p.Size(), logging.PacketDropUnknownConnectionID)
}
s.logger.Debugf("Dropping Initial packet (%d bytes) with unexpected source connection ID: %s (expected %s)", p.Size(), hdr.SrcConnectionID, s.handshakeDestConnID)
return false
}
// drop 0-RTT packets, if we are a client
if s.perspective == protocol.PerspectiveClient && hdr.Type == protocol.PacketType0RTT {
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketType0RTT, p.Size(), logging.PacketDropKeyUnavailable)
}
return false
}
packet, err := s.unpacker.UnpackLongHeader(hdr, p.rcvTime, p.data, s.version)
if err != nil {
wasQueued = s.handleUnpackError(err, p, logging.PacketTypeFromHeader(hdr))
return false
}
if s.logger.Debug() {
s.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, %s", packet.hdr.PacketNumber, p.Size(), hdr.DestConnectionID, packet.encryptionLevel)
packet.hdr.Log(s.logger)
}
if s.receivedPacketHandler.IsPotentiallyDuplicate(packet.hdr.PacketNumber, packet.encryptionLevel) {
s.logger.Debugf("Dropping (potentially) duplicate packet.")
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), p.Size(), logging.PacketDropDuplicate)
}
return false
}
if err := s.handleUnpackedLongHeaderPacket(packet, p.ecn, p.rcvTime, p.Size()); err != nil {
s.closeLocal(err)
return false
}
return true
}
func (s *connection) handleUnpackError(err error, p receivedPacket, pt logging.PacketType) (wasQueued bool) {
switch err {
case handshake.ErrKeysDropped:
if s.tracer != nil {
s.tracer.DroppedPacket(pt, p.Size(), logging.PacketDropKeyUnavailable)
}
s.logger.Debugf("Dropping %s packet (%d bytes) because we already dropped the keys.", pt, p.Size())
case handshake.ErrKeysNotYetAvailable:
// Sealer for this encryption level not yet available.
// Try again later.
s.tryQueueingUndecryptablePacket(p, pt)
return true
case wire.ErrInvalidReservedBits:
s.closeLocal(&qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: err.Error(),
})
case handshake.ErrDecryptionFailed:
// This might be a packet injected by an attacker. Drop it.
if s.tracer != nil {
s.tracer.DroppedPacket(pt, p.Size(), logging.PacketDropPayloadDecryptError)
}
s.logger.Debugf("Dropping %s packet (%d bytes) that could not be unpacked. Error: %s", pt, p.Size(), err)
default:
var headerErr *headerParseError
if errors.As(err, &headerErr) {
// This might be a packet injected by an attacker. Drop it.
if s.tracer != nil {
s.tracer.DroppedPacket(pt, p.Size(), logging.PacketDropHeaderParseError)
}
s.logger.Debugf("Dropping %s packet (%d bytes) for which we couldn't unpack the header. Error: %s", pt, p.Size(), err)
} else {
// This is an error returned by the AEAD (other than ErrDecryptionFailed).
// For example, a PROTOCOL_VIOLATION due to key updates.
s.closeLocal(err)
}
}
return false
}
func (s *connection) handleRetryPacket(hdr *wire.Header, data []byte) bool /* was this a valid Retry */ {
if s.perspective == protocol.PerspectiveServer {
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.ByteCount(len(data)), logging.PacketDropUnexpectedPacket)
}
s.logger.Debugf("Ignoring Retry.")
return false
}
if s.receivedFirstPacket {
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.ByteCount(len(data)), logging.PacketDropUnexpectedPacket)
}
s.logger.Debugf("Ignoring Retry, since we already received a packet.")
return false
}
destConnID := s.connIDManager.Get()
if hdr.SrcConnectionID == destConnID {
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.ByteCount(len(data)), logging.PacketDropUnexpectedPacket)
}
s.logger.Debugf("Ignoring Retry, since the server didn't change the Source Connection ID.")
return false
}
// If a token is already set, this means that we already received a Retry from the server.
// Ignore this Retry packet.
if s.receivedRetry {
s.logger.Debugf("Ignoring Retry, since a Retry was already received.")
return false
}
tag := handshake.GetRetryIntegrityTag(data[:len(data)-16], destConnID, hdr.Version)
if !bytes.Equal(data[len(data)-16:], tag[:]) {
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.ByteCount(len(data)), logging.PacketDropPayloadDecryptError)
}
s.logger.Debugf("Ignoring spoofed Retry. Integrity Tag doesn't match.")
return false
}
if s.logger.Debug() {
s.logger.Debugf("<- Received Retry:")
(&wire.ExtendedHeader{Header: *hdr}).Log(s.logger)
s.logger.Debugf("Switching destination connection ID to: %s", hdr.SrcConnectionID)
}
if s.tracer != nil {
s.tracer.ReceivedRetry(hdr)
}
newDestConnID := hdr.SrcConnectionID
s.receivedRetry = true
if err := s.sentPacketHandler.ResetForRetry(); err != nil {
s.closeLocal(err)
return false
}
s.handshakeDestConnID = newDestConnID
s.retrySrcConnID = &newDestConnID
s.cryptoStreamHandler.ChangeConnectionID(newDestConnID)
s.packer.SetToken(hdr.Token)
s.connIDManager.ChangeInitialConnID(newDestConnID)
s.scheduleSending()
return true
}
func (s *connection) handleVersionNegotiationPacket(p receivedPacket) {
if s.perspective == protocol.PerspectiveServer || // servers never receive version negotiation packets
s.receivedFirstPacket || s.versionNegotiated { // ignore delayed / duplicated version negotiation packets
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeVersionNegotiation, p.Size(), logging.PacketDropUnexpectedPacket)
}
return
}
src, dest, supportedVersions, err := wire.ParseVersionNegotiationPacket(p.data)
if err != nil {
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeVersionNegotiation, p.Size(), logging.PacketDropHeaderParseError)
}
s.logger.Debugf("Error parsing Version Negotiation packet: %s", err)
return
}
for _, v := range supportedVersions {
if v == s.version {
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeVersionNegotiation, p.Size(), logging.PacketDropUnexpectedVersion)
}
// The Version Negotiation packet contains the version that we offered.
// This might be a packet sent by an attacker, or it was corrupted.
return
}
}
s.logger.Infof("Received a Version Negotiation packet. Supported Versions: %s", supportedVersions)
if s.tracer != nil {
s.tracer.ReceivedVersionNegotiationPacket(dest, src, supportedVersions)
}
newVersion, ok := protocol.ChooseSupportedVersion(s.config.Versions, supportedVersions)
if !ok {
s.destroyImpl(&VersionNegotiationError{
Ours: s.config.Versions,
Theirs: supportedVersions,
})
s.logger.Infof("No compatible QUIC version found.")
return
}
if s.tracer != nil {
s.tracer.NegotiatedVersion(newVersion, s.config.Versions, supportedVersions)
}
s.logger.Infof("Switching to QUIC version %s.", newVersion)
nextPN, _ := s.sentPacketHandler.PeekPacketNumber(protocol.EncryptionInitial)
s.destroyImpl(&errCloseForRecreating{
nextPacketNumber: nextPN,
nextVersion: newVersion,
})
}
func (s *connection) handleUnpackedLongHeaderPacket(
packet *unpackedPacket,
ecn protocol.ECN,
rcvTime time.Time,
packetSize protocol.ByteCount, // only for logging
) error {
if !s.receivedFirstPacket {
s.receivedFirstPacket = true
if !s.versionNegotiated && s.tracer != nil {
var clientVersions, serverVersions []protocol.VersionNumber
switch s.perspective {
case protocol.PerspectiveClient:
clientVersions = s.config.Versions
case protocol.PerspectiveServer:
serverVersions = s.config.Versions
}
s.tracer.NegotiatedVersion(s.version, clientVersions, serverVersions)
}
// The server can change the source connection ID with the first Handshake packet.
if s.perspective == protocol.PerspectiveClient && packet.hdr.SrcConnectionID != s.handshakeDestConnID {
cid := packet.hdr.SrcConnectionID
s.logger.Debugf("Received first packet. Switching destination connection ID to: %s", cid)
s.handshakeDestConnID = cid
s.connIDManager.ChangeInitialConnID(cid)
}
// We create the connection as soon as we receive the first packet from the client.
// We do that before authenticating the packet.
// That means that if the source connection ID was corrupted,
// we might have created a connection with an incorrect source connection ID.
// Once we authenticate the first packet, we need to update it.
if s.perspective == protocol.PerspectiveServer {
if packet.hdr.SrcConnectionID != s.handshakeDestConnID {
s.handshakeDestConnID = packet.hdr.SrcConnectionID
s.connIDManager.ChangeInitialConnID(packet.hdr.SrcConnectionID)
}
if s.tracer != nil {
s.tracer.StartedConnection(
s.conn.LocalAddr(),
s.conn.RemoteAddr(),
packet.hdr.SrcConnectionID,
packet.hdr.DestConnectionID,
)
}
}
}
if s.perspective == protocol.PerspectiveServer && packet.encryptionLevel == protocol.EncryptionHandshake {
// On the server side, Initial keys are dropped as soon as the first Handshake packet is received.
// See Section 4.9.1 of RFC 9001.
if err := s.dropEncryptionLevel(protocol.EncryptionInitial); err != nil {
return err
}
}
s.lastPacketReceivedTime = rcvTime
s.firstAckElicitingPacketAfterIdleSentTime = time.Time{}
s.keepAlivePingSent = false
var log func([]logging.Frame)
if s.tracer != nil {
log = func(frames []logging.Frame) {
s.tracer.ReceivedLongHeaderPacket(packet.hdr, packetSize, frames)
}
}
isAckEliciting, err := s.handleFrames(packet.data, packet.hdr.DestConnectionID, packet.encryptionLevel, log)
if err != nil {
return err
}
return s.receivedPacketHandler.ReceivedPacket(packet.hdr.PacketNumber, ecn, packet.encryptionLevel, rcvTime, isAckEliciting)
}
func (s *connection) handleUnpackedShortHeaderPacket(
destConnID protocol.ConnectionID,
pn protocol.PacketNumber,
data []byte,
ecn protocol.ECN,
rcvTime time.Time,
log func([]logging.Frame),
) error {
s.lastPacketReceivedTime = rcvTime
s.firstAckElicitingPacketAfterIdleSentTime = time.Time{}
s.keepAlivePingSent = false
isAckEliciting, err := s.handleFrames(data, destConnID, protocol.Encryption1RTT, log)
if err != nil {
return err
}
return s.receivedPacketHandler.ReceivedPacket(pn, ecn, protocol.Encryption1RTT, rcvTime, isAckEliciting)
}
func (s *connection) handleFrames(
data []byte,
destConnID protocol.ConnectionID,
encLevel protocol.EncryptionLevel,
log func([]logging.Frame),
) (isAckEliciting bool, _ error) {
// Only used for tracing.
// If we're not tracing, this slice will always remain empty.
var frames []logging.Frame
if log != nil {
frames = make([]logging.Frame, 0, 4)
}
handshakeWasComplete := s.handshakeComplete
var handleErr error
for len(data) > 0 {
l, frame, err := s.frameParser.ParseNext(data, encLevel, s.version)
if err != nil {
return false, err
}
data = data[l:]
if frame == nil {
break
}
if ackhandler.IsFrameAckEliciting(frame) {
isAckEliciting = true
}
if log != nil {
frames = append(frames, logutils.ConvertFrame(frame))
}
// An error occurred handling a previous frame.
// Don't handle the current frame.
if handleErr != nil {
continue
}
if err := s.handleFrame(frame, encLevel, destConnID); err != nil {
if log == nil {
return false, err
}
// If we're logging, we need to keep parsing (but not handling) all frames.
handleErr = err
}
}
if log != nil {
log(frames)
if handleErr != nil {
return false, handleErr
}
}
// Handle completion of the handshake after processing all the frames.
// This ensures that we correctly handle the following case on the server side:
// We receive a Handshake packet that contains the CRYPTO frame that allows us to complete the handshake,
// and an ACK serialized after that CRYPTO frame. In this case, we still want to process the ACK frame.
if !handshakeWasComplete && s.handshakeComplete {
if err := s.handleHandshakeComplete(); err != nil {
return false, err
}
}
return
}
func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel, destConnID protocol.ConnectionID) error {
var err error
wire.LogFrame(s.logger, f, false)
switch frame := f.(type) {
case *wire.CryptoFrame:
err = s.handleCryptoFrame(frame, encLevel)
case *wire.StreamFrame:
err = s.handleStreamFrame(frame)
case *wire.AckFrame:
err = s.handleAckFrame(frame, encLevel)
case *wire.ConnectionCloseFrame:
s.handleConnectionCloseFrame(frame)
case *wire.ResetStreamFrame:
err = s.handleResetStreamFrame(frame)
case *wire.MaxDataFrame:
s.handleMaxDataFrame(frame)
case *wire.MaxStreamDataFrame:
err = s.handleMaxStreamDataFrame(frame)
case *wire.MaxStreamsFrame:
s.handleMaxStreamsFrame(frame)
case *wire.DataBlockedFrame:
case *wire.StreamDataBlockedFrame:
case *wire.StreamsBlockedFrame:
case *wire.StopSendingFrame:
err = s.handleStopSendingFrame(frame)
case *wire.PingFrame:
case *wire.PathChallengeFrame:
s.handlePathChallengeFrame(frame)
case *wire.PathResponseFrame:
// since we don't send PATH_CHALLENGEs, we don't expect PATH_RESPONSEs
err = errors.New("unexpected PATH_RESPONSE frame")
case *wire.NewTokenFrame:
err = s.handleNewTokenFrame(frame)
case *wire.NewConnectionIDFrame:
err = s.handleNewConnectionIDFrame(frame)
case *wire.RetireConnectionIDFrame:
err = s.handleRetireConnectionIDFrame(frame, destConnID)
case *wire.HandshakeDoneFrame:
err = s.handleHandshakeDoneFrame()
case *wire.DatagramFrame:
err = s.handleDatagramFrame(frame)
default:
err = fmt.Errorf("unexpected frame type: %s", reflect.ValueOf(&frame).Elem().Type().Name())
}
return err
}
// handlePacket is called by the server with a new packet
func (s *connection) handlePacket(p receivedPacket) {
// Discard packets once the amount of queued packets is larger than
// the channel size, protocol.MaxConnUnprocessedPackets
select {
case s.receivedPackets <- p:
default:
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropDOSPrevention)
}
}
}
func (s *connection) handleConnectionCloseFrame(frame *wire.ConnectionCloseFrame) {
if frame.IsApplicationError {
s.closeRemote(&qerr.ApplicationError{
Remote: true,
ErrorCode: qerr.ApplicationErrorCode(frame.ErrorCode),
ErrorMessage: frame.ReasonPhrase,
})
return
}
s.closeRemote(&qerr.TransportError{
Remote: true,
ErrorCode: qerr.TransportErrorCode(frame.ErrorCode),
FrameType: frame.FrameType,
ErrorMessage: frame.ReasonPhrase,
})
}
func (s *connection) handleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel) error {
if err := s.cryptoStreamManager.HandleCryptoFrame(frame, encLevel); err != nil {
return err
}
return s.handleHandshakeEvents()
}
func (s *connection) handleHandshakeEvents() error {
for {
ev := s.cryptoStreamHandler.NextEvent()
var err error
switch ev.Kind {
case handshake.EventNoEvent:
return nil
case handshake.EventHandshakeComplete:
// Don't call handleHandshakeComplete yet.
// It's advantageous to process ACK frames that might be serialized after the CRYPTO frame first.
s.handshakeComplete = true
case handshake.EventReceivedTransportParameters:
err = s.handleTransportParameters(ev.TransportParameters)
case handshake.EventRestoredTransportParameters:
s.restoreTransportParameters(ev.TransportParameters)
close(s.earlyConnReadyChan)
case handshake.EventReceivedReadKeys:
// Queue all packets for decryption that have been undecryptable so far.
s.undecryptablePacketsToProcess = s.undecryptablePackets
s.undecryptablePackets = nil
case handshake.EventDiscard0RTTKeys:
err = s.dropEncryptionLevel(protocol.Encryption0RTT)
case handshake.EventWriteInitialData:
_, err = s.initialStream.Write(ev.Data)
case handshake.EventWriteHandshakeData:
_, err = s.handshakeStream.Write(ev.Data)
}
if err != nil {
return err
}
}
}
func (s *connection) handleStreamFrame(frame *wire.StreamFrame) error {
str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID)
if err != nil {
return err
}
if str == nil {
// Stream is closed and already garbage collected
// ignore this StreamFrame
return nil
}
return str.handleStreamFrame(frame)
}
func (s *connection) handleMaxDataFrame(frame *wire.MaxDataFrame) {
s.connFlowController.UpdateSendWindow(frame.MaximumData)
}
func (s *connection) handleMaxStreamDataFrame(frame *wire.MaxStreamDataFrame) error {
str, err := s.streamsMap.GetOrOpenSendStream(frame.StreamID)
if err != nil {
return err
}
if str == nil {
// stream is closed and already garbage collected
return nil
}
str.updateSendWindow(frame.MaximumStreamData)
return nil
}
func (s *connection) handleMaxStreamsFrame(frame *wire.MaxStreamsFrame) {
s.streamsMap.HandleMaxStreamsFrame(frame)
}
func (s *connection) handleResetStreamFrame(frame *wire.ResetStreamFrame) error {
str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID)
if err != nil {
return err
}
if str == nil {
// stream is closed and already garbage collected
return nil
}
return str.handleResetStreamFrame(frame)
}
func (s *connection) handleStopSendingFrame(frame *wire.StopSendingFrame) error {
str, err := s.streamsMap.GetOrOpenSendStream(frame.StreamID)
if err != nil {
return err
}
if str == nil {
// stream is closed and already garbage collected
return nil
}
str.handleStopSendingFrame(frame)
return nil
}
func (s *connection) handlePathChallengeFrame(frame *wire.PathChallengeFrame) {
s.queueControlFrame(&wire.PathResponseFrame{Data: frame.Data})
}
func (s *connection) handleNewTokenFrame(frame *wire.NewTokenFrame) error {
if s.perspective == protocol.PerspectiveServer {
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "received NEW_TOKEN frame from the client",
}
}
if s.config.TokenStore != nil {
s.config.TokenStore.Put(s.tokenStoreKey, &ClientToken{data: frame.Token})
}
return nil
}
func (s *connection) handleNewConnectionIDFrame(f *wire.NewConnectionIDFrame) error {
return s.connIDManager.Add(f)
}
func (s *connection) handleRetireConnectionIDFrame(f *wire.RetireConnectionIDFrame, destConnID protocol.ConnectionID) error {
return s.connIDGenerator.Retire(f.SequenceNumber, destConnID)
}
func (s *connection) handleHandshakeDoneFrame() error {
if s.perspective == protocol.PerspectiveServer {
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "received a HANDSHAKE_DONE frame",
}
}
if !s.handshakeConfirmed {
return s.handleHandshakeConfirmed()
}
return nil
}
func (s *connection) handleAckFrame(frame *wire.AckFrame, encLevel protocol.EncryptionLevel) error {
acked1RTTPacket, err := s.sentPacketHandler.ReceivedAck(frame, encLevel, s.lastPacketReceivedTime)
if err != nil {
return err
}
if !acked1RTTPacket {
return nil
}
// On the client side: If the packet acknowledged a 1-RTT packet, this confirms the handshake.
// This is only possible if the ACK was sent in a 1-RTT packet.
// This is an optimization over simply waiting for a HANDSHAKE_DONE frame, see section 4.1.2 of RFC 9001.
if s.perspective == protocol.PerspectiveClient && !s.handshakeConfirmed {
if err := s.handleHandshakeConfirmed(); err != nil {
return err
}
}
return s.cryptoStreamHandler.SetLargest1RTTAcked(frame.LargestAcked())
}
func (s *connection) handleDatagramFrame(f *wire.DatagramFrame) error {
if f.Length(s.version) > protocol.MaxDatagramFrameSize {
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "DATAGRAM frame too large",
}
}
s.datagramQueue.HandleDatagramFrame(f)
return nil
}
// closeLocal closes the connection and send a CONNECTION_CLOSE containing the error
func (s *connection) closeLocal(e error) {
s.closeOnce.Do(func() {
if e == nil {
s.logger.Infof("Closing connection.")
} else {
s.logger.Errorf("Closing connection with error: %s", e)
}
s.closeChan <- closeError{err: e, immediate: false, remote: false}
})
}
// destroy closes the connection without sending the error on the wire
func (s *connection) destroy(e error) {
s.destroyImpl(e)
<-s.ctx.Done()
}
func (s *connection) destroyImpl(e error) {
s.closeOnce.Do(func() {
if nerr, ok := e.(net.Error); ok && nerr.Timeout() {
s.logger.Errorf("Destroying connection: %s", e)
} else {
s.logger.Errorf("Destroying connection with error: %s", e)
}
s.closeChan <- closeError{err: e, immediate: true, remote: false}
})
}
func (s *connection) closeRemote(e error) {
s.closeOnce.Do(func() {
s.logger.Errorf("Peer closed connection with error: %s", e)
s.closeChan <- closeError{err: e, immediate: true, remote: true}
})
}
// Close the connection. It sends a NO_ERROR application error.
// It waits until the run loop has stopped before returning
func (s *connection) shutdown() {
s.closeLocal(nil)
<-s.ctx.Done()
}
func (s *connection) CloseWithError(code ApplicationErrorCode, desc string) error {
s.closeLocal(&qerr.ApplicationError{
ErrorCode: code,
ErrorMessage: desc,
})
<-s.ctx.Done()
return nil
}
func (s *connection) handleCloseError(closeErr *closeError) {
e := closeErr.err
if e == nil {
e = &qerr.ApplicationError{}
} else {
defer func() {
closeErr.err = e
}()
}
var (
statelessResetErr *StatelessResetError
versionNegotiationErr *VersionNegotiationError
recreateErr *errCloseForRecreating
applicationErr *ApplicationError
transportErr *TransportError
)
switch {
case errors.Is(e, qerr.ErrIdleTimeout),
errors.Is(e, qerr.ErrHandshakeTimeout),
errors.As(e, &statelessResetErr),
errors.As(e, &versionNegotiationErr),
errors.As(e, &recreateErr),
errors.As(e, &applicationErr),
errors.As(e, &transportErr):
default:
e = &qerr.TransportError{
ErrorCode: qerr.InternalError,
ErrorMessage: e.Error(),
}
}
s.streamsMap.CloseWithError(e)
s.connIDManager.Close()
if s.datagramQueue != nil {
s.datagramQueue.CloseWithError(e)
}
if s.tracer != nil && !errors.As(e, &recreateErr) {
s.tracer.ClosedConnection(e)
}
// If this is a remote close we're done here
if closeErr.remote {
s.connIDGenerator.ReplaceWithClosed(s.perspective, nil)
return
}
if closeErr.immediate {
s.connIDGenerator.RemoveAll()
return
}
// Don't send out any CONNECTION_CLOSE if this is an error that occurred
// before we even sent out the first packet.
if s.perspective == protocol.PerspectiveClient && !s.sentFirstPacket {
s.connIDGenerator.RemoveAll()
return
}
connClosePacket, err := s.sendConnectionClose(e)
if err != nil {
s.logger.Debugf("Error sending CONNECTION_CLOSE: %s", err)
}
s.connIDGenerator.ReplaceWithClosed(s.perspective, connClosePacket)
}
func (s *connection) dropEncryptionLevel(encLevel protocol.EncryptionLevel) error {
if s.tracer != nil {
s.tracer.DroppedEncryptionLevel(encLevel)
}
s.sentPacketHandler.DropPackets(encLevel)
s.receivedPacketHandler.DropPackets(encLevel)
//nolint:exhaustive // only Initial and 0-RTT need special treatment
switch encLevel {
case protocol.EncryptionInitial:
s.cryptoStreamHandler.DiscardInitialKeys()
case protocol.Encryption0RTT:
s.streamsMap.ResetFor0RTT()
if err := s.connFlowController.Reset(); err != nil {
return err
}
return s.framer.Handle0RTTRejection()
}
return s.cryptoStreamManager.Drop(encLevel)
}
// is called for the client, when restoring transport parameters saved for 0-RTT
func (s *connection) restoreTransportParameters(params *wire.TransportParameters) {
if s.logger.Debug() {
s.logger.Debugf("Restoring Transport Parameters: %s", params)
}
s.peerParams = params
s.connIDGenerator.SetMaxActiveConnIDs(params.ActiveConnectionIDLimit)
s.connFlowController.UpdateSendWindow(params.InitialMaxData)
s.streamsMap.UpdateLimits(params)
s.connStateMutex.Lock()
s.connState.SupportsDatagrams = s.supportsDatagrams()
s.connStateMutex.Unlock()
}
func (s *connection) handleTransportParameters(params *wire.TransportParameters) error {
if s.tracer != nil {
s.tracer.ReceivedTransportParameters(params)
}
if err := s.checkTransportParameters(params); err != nil {
return &qerr.TransportError{
ErrorCode: qerr.TransportParameterError,
ErrorMessage: err.Error(),
}
}
if s.perspective == protocol.PerspectiveClient && s.peerParams != nil && s.ConnectionState().Used0RTT && !params.ValidForUpdate(s.peerParams) {
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "server sent reduced limits after accepting 0-RTT data",
}
}
s.peerParams = params
// On the client side we have to wait for handshake completion.
// During a 0-RTT connection, we are only allowed to use the new transport parameters for 1-RTT packets.
if s.perspective == protocol.PerspectiveServer {
s.applyTransportParameters()
// On the server side, the early connection is ready as soon as we processed
// the client's transport parameters.
close(s.earlyConnReadyChan)
}
s.connStateMutex.Lock()
s.connState.SupportsDatagrams = s.supportsDatagrams()
s.connStateMutex.Unlock()
return nil
}
func (s *connection) checkTransportParameters(params *wire.TransportParameters) error {
if s.logger.Debug() {
s.logger.Debugf("Processed Transport Parameters: %s", params)
}
// check the initial_source_connection_id
if params.InitialSourceConnectionID != s.handshakeDestConnID {
return fmt.Errorf("expected initial_source_connection_id to equal %s, is %s", s.handshakeDestConnID, params.InitialSourceConnectionID)
}
if s.perspective == protocol.PerspectiveServer {
return nil
}
// check the original_destination_connection_id
if params.OriginalDestinationConnectionID != s.origDestConnID {
return fmt.Errorf("expected original_destination_connection_id to equal %s, is %s", s.origDestConnID, params.OriginalDestinationConnectionID)
}
if s.retrySrcConnID != nil { // a Retry was performed
if params.RetrySourceConnectionID == nil {
return errors.New("missing retry_source_connection_id")
}
if *params.RetrySourceConnectionID != *s.retrySrcConnID {
return fmt.Errorf("expected retry_source_connection_id to equal %s, is %s", s.retrySrcConnID, *params.RetrySourceConnectionID)
}
} else if params.RetrySourceConnectionID != nil {
return errors.New("received retry_source_connection_id, although no Retry was performed")
}
return nil
}
func (s *connection) applyTransportParameters() {
params := s.peerParams
// Our local idle timeout will always be > 0.
s.idleTimeout = utils.MinNonZeroDuration(s.config.MaxIdleTimeout, params.MaxIdleTimeout)
s.keepAliveInterval = utils.Min(s.config.KeepAlivePeriod, utils.Min(s.idleTimeout/2, protocol.MaxKeepAliveInterval))
s.streamsMap.UpdateLimits(params)
s.frameParser.SetAckDelayExponent(params.AckDelayExponent)
s.connFlowController.UpdateSendWindow(params.InitialMaxData)
s.rttStats.SetMaxAckDelay(params.MaxAckDelay)
s.connIDGenerator.SetMaxActiveConnIDs(params.ActiveConnectionIDLimit)
if params.StatelessResetToken != nil {
s.connIDManager.SetStatelessResetToken(*params.StatelessResetToken)
}
// We don't support connection migration yet, so we don't have any use for the preferred_address.
if params.PreferredAddress != nil {
// Retire the connection ID.
s.connIDManager.AddFromPreferredAddress(params.PreferredAddress.ConnectionID, params.PreferredAddress.StatelessResetToken)
}
}
func (s *connection) triggerSending() error {
s.pacingDeadline = time.Time{}
now := time.Now()
sendMode := s.sentPacketHandler.SendMode(now)
//nolint:exhaustive // No need to handle pacing limited here.
switch sendMode {
case ackhandler.SendAny:
return s.sendPackets(now)
case ackhandler.SendNone:
return nil
case ackhandler.SendPacingLimited:
deadline := s.sentPacketHandler.TimeUntilSend()
if deadline.IsZero() {
deadline = deadlineSendImmediately
}
s.pacingDeadline = deadline
// Allow sending of an ACK if we're pacing limit.
// This makes sure that a peer that is mostly receiving data (and thus has an inaccurate cwnd estimate)
// sends enough ACKs to allow its peer to utilize the bandwidth.
fallthrough
case ackhandler.SendAck:
// We can at most send a single ACK only packet.
// There will only be a new ACK after receiving new packets.
// SendAck is only returned when we're congestion limited, so we don't need to set the pacinggs timer.
return s.maybeSendAckOnlyPacket(now)
case ackhandler.SendPTOInitial:
if err := s.sendProbePacket(protocol.EncryptionInitial, now); err != nil {
return err
}
if s.sendQueue.WouldBlock() {
s.scheduleSending()
return nil
}
return s.triggerSending()
case ackhandler.SendPTOHandshake:
if err := s.sendProbePacket(protocol.EncryptionHandshake, now); err != nil {
return err
}
if s.sendQueue.WouldBlock() {
s.scheduleSending()
return nil
}
return s.triggerSending()
case ackhandler.SendPTOAppData:
if err := s.sendProbePacket(protocol.Encryption1RTT, now); err != nil {
return err
}
if s.sendQueue.WouldBlock() {
s.scheduleSending()
return nil
}
return s.triggerSending()
default:
return fmt.Errorf("BUG: invalid send mode %d", sendMode)
}
}
func (s *connection) sendPackets(now time.Time) error {
// Path MTU Discovery
// Can't use GSO, since we need to send a single packet that's larger than our current maximum size.
// Performance-wise, this doesn't matter, since we only send a very small (<10) number of
// MTU probe packets per connection.
if s.handshakeConfirmed && s.mtuDiscoverer != nil && s.mtuDiscoverer.ShouldSendProbe(now) {
ping, size := s.mtuDiscoverer.GetPing()
p, buf, err := s.packer.PackMTUProbePacket(ping, size, s.version)
if err != nil {
return err
}
s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, buf.Len(), false)
s.registerPackedShortHeaderPacket(p, now)
s.sendQueue.Send(buf, buf.Len())
// This is kind of a hack. We need to trigger sending again somehow.
s.pacingDeadline = deadlineSendImmediately
return nil
}
if isBlocked, offset := s.connFlowController.IsNewlyBlocked(); isBlocked {
s.framer.QueueControlFrame(&wire.DataBlockedFrame{MaximumData: offset})
}
s.windowUpdateQueue.QueueAll()
if cf := s.cryptoStreamManager.GetPostHandshakeData(protocol.MaxPostHandshakeCryptoFrameSize); cf != nil {
s.queueControlFrame(cf)
}
if !s.handshakeConfirmed {
packet, err := s.packer.PackCoalescedPacket(false, s.mtuDiscoverer.CurrentSize(), s.version)
if err != nil || packet == nil {
return err
}
s.sentFirstPacket = true
if err := s.sendPackedCoalescedPacket(packet, now); err != nil {
return err
}
sendMode := s.sentPacketHandler.SendMode(now)
if sendMode == ackhandler.SendPacingLimited {
s.resetPacingDeadline()
} else if sendMode == ackhandler.SendAny {
s.pacingDeadline = deadlineSendImmediately
}
return nil
}
if s.conn.capabilities().GSO {
return s.sendPacketsWithGSO(now)
}
return s.sendPacketsWithoutGSO(now)
}
func (s *connection) sendPacketsWithoutGSO(now time.Time) error {
for {
buf := getPacketBuffer()
if _, err := s.appendPacket(buf, s.mtuDiscoverer.CurrentSize(), now); err != nil {
if err == errNothingToPack {
buf.Release()
return nil
}
return err
}
s.sendQueue.Send(buf, buf.Len())
if s.sendQueue.WouldBlock() {
return nil
}
sendMode := s.sentPacketHandler.SendMode(now)
if sendMode == ackhandler.SendPacingLimited {
s.resetPacingDeadline()
return nil
}
if sendMode != ackhandler.SendAny {
return nil
}
// Prioritize receiving of packets over sending out more packets.
if len(s.receivedPackets) > 0 {
s.pacingDeadline = deadlineSendImmediately
return nil
}
}
}
func (s *connection) sendPacketsWithGSO(now time.Time) error {
buf := getLargePacketBuffer()
maxSize := s.mtuDiscoverer.CurrentSize()
for {
var dontSendMore bool
size, err := s.appendPacket(buf, maxSize, now)
if err != nil {
if err != errNothingToPack {
return err
}
if buf.Len() == 0 {
buf.Release()
return nil
}
dontSendMore = true
}
if !dontSendMore {
sendMode := s.sentPacketHandler.SendMode(now)
if sendMode == ackhandler.SendPacingLimited {
s.resetPacingDeadline()
}
if sendMode != ackhandler.SendAny {
dontSendMore = true
}
}
// Append another packet if
// 1. The congestion controller and pacer allow sending more
// 2. The last packet appended was a full-size packet
// 3. We still have enough space for another full-size packet in the buffer
if !dontSendMore && size == maxSize && buf.Len()+maxSize <= buf.Cap() {
continue
}
s.sendQueue.Send(buf, maxSize)
if dontSendMore {
return nil
}
if s.sendQueue.WouldBlock() {
return nil
}
// Prioritize receiving of packets over sending out more packets.
if len(s.receivedPackets) > 0 {
s.pacingDeadline = deadlineSendImmediately
return nil
}
buf = getLargePacketBuffer()
}
}
func (s *connection) resetPacingDeadline() {
deadline := s.sentPacketHandler.TimeUntilSend()
if deadline.IsZero() {
deadline = deadlineSendImmediately
}
s.pacingDeadline = deadline
}
func (s *connection) maybeSendAckOnlyPacket(now time.Time) error {
if !s.handshakeConfirmed {
packet, err := s.packer.PackCoalescedPacket(true, s.mtuDiscoverer.CurrentSize(), s.version)
if err != nil {
return err
}
if packet == nil {
return nil
}
return s.sendPackedCoalescedPacket(packet, time.Now())
}
p, buf, err := s.packer.PackAckOnlyPacket(s.mtuDiscoverer.CurrentSize(), s.version)
if err != nil {
if err == errNothingToPack {
return nil
}
return err
}
s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, buf.Len(), false)
s.registerPackedShortHeaderPacket(p, now)
s.sendQueue.Send(buf, buf.Len())
return nil
}
func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel, now time.Time) error {
// Queue probe packets until we actually send out a packet,
// or until there are no more packets to queue.
var packet *coalescedPacket
for {
if wasQueued := s.sentPacketHandler.QueueProbePacket(encLevel); !wasQueued {
break
}
var err error
packet, err = s.packer.MaybePackProbePacket(encLevel, s.mtuDiscoverer.CurrentSize(), s.version)
if err != nil {
return err
}
if packet != nil {
break
}
}
if packet == nil {
s.retransmissionQueue.AddPing(encLevel)
var err error
packet, err = s.packer.MaybePackProbePacket(encLevel, s.mtuDiscoverer.CurrentSize(), s.version)
if err != nil {
return err
}
}
if packet == nil || (len(packet.longHdrPackets) == 0 && packet.shortHdrPacket == nil) {
return fmt.Errorf("connection BUG: couldn't pack %s probe packet", encLevel)
}
return s.sendPackedCoalescedPacket(packet, now)
}
// appendPacket appends a new packet to the given packetBuffer.
// If there was nothing to pack, the returned size is 0.
func (s *connection) appendPacket(buf *packetBuffer, maxSize protocol.ByteCount, now time.Time) (protocol.ByteCount, error) {
startLen := buf.Len()
p, err := s.packer.AppendPacket(buf, maxSize, s.version)
if err != nil {
return 0, err
}
size := buf.Len() - startLen
s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, size, false)
s.registerPackedShortHeaderPacket(p, now)
return size, nil
}
func (s *connection) registerPackedShortHeaderPacket(p shortHeaderPacket, now time.Time) {
if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && (len(p.StreamFrames) > 0 || ackhandler.HasAckElicitingFrames(p.Frames)) {
s.firstAckElicitingPacketAfterIdleSentTime = now
}
largestAcked := protocol.InvalidPacketNumber
if p.Ack != nil {
largestAcked = p.Ack.LargestAcked()
}
s.sentPacketHandler.SentPacket(now, p.PacketNumber, largestAcked, p.StreamFrames, p.Frames, protocol.Encryption1RTT, p.Length, p.IsPathMTUProbePacket)
s.connIDManager.SentPacket()
}
func (s *connection) sendPackedCoalescedPacket(packet *coalescedPacket, now time.Time) error {
s.logCoalescedPacket(packet)
for _, p := range packet.longHdrPackets {
if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() {
s.firstAckElicitingPacketAfterIdleSentTime = now
}
largestAcked := protocol.InvalidPacketNumber
if p.ack != nil {
largestAcked = p.ack.LargestAcked()
}
s.sentPacketHandler.SentPacket(now, p.header.PacketNumber, largestAcked, p.streamFrames, p.frames, p.EncryptionLevel(), p.length, false)
if s.perspective == protocol.PerspectiveClient && p.EncryptionLevel() == protocol.EncryptionHandshake {
// On the client side, Initial keys are dropped as soon as the first Handshake packet is sent.
// See Section 4.9.1 of RFC 9001.
if err := s.dropEncryptionLevel(protocol.EncryptionInitial); err != nil {
return err
}
}
}
if p := packet.shortHdrPacket; p != nil {
if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() {
s.firstAckElicitingPacketAfterIdleSentTime = now
}
largestAcked := protocol.InvalidPacketNumber
if p.Ack != nil {
largestAcked = p.Ack.LargestAcked()
}
s.sentPacketHandler.SentPacket(now, p.PacketNumber, largestAcked, p.StreamFrames, p.Frames, protocol.Encryption1RTT, p.Length, p.IsPathMTUProbePacket)
}
s.connIDManager.SentPacket()
s.sendQueue.Send(packet.buffer, packet.buffer.Len())
return nil
}
func (s *connection) sendConnectionClose(e error) ([]byte, error) {
var packet *coalescedPacket
var err error
var transportErr *qerr.TransportError
var applicationErr *qerr.ApplicationError
if errors.As(e, &transportErr) {
packet, err = s.packer.PackConnectionClose(transportErr, s.mtuDiscoverer.CurrentSize(), s.version)
} else if errors.As(e, &applicationErr) {
packet, err = s.packer.PackApplicationClose(applicationErr, s.mtuDiscoverer.CurrentSize(), s.version)
} else {
packet, err = s.packer.PackConnectionClose(&qerr.TransportError{
ErrorCode: qerr.InternalError,
ErrorMessage: fmt.Sprintf("connection BUG: unspecified error type (msg: %s)", e.Error()),
}, s.mtuDiscoverer.CurrentSize(), s.version)
}
if err != nil {
return nil, err
}
s.logCoalescedPacket(packet)
return packet.buffer.Data, s.conn.Write(packet.buffer.Data, packet.buffer.Len())
}
func (s *connection) logLongHeaderPacket(p *longHeaderPacket) {
// quic-go logging
if s.logger.Debug() {
p.header.Log(s.logger)
if p.ack != nil {
wire.LogFrame(s.logger, p.ack, true)
}
for _, frame := range p.frames {
wire.LogFrame(s.logger, frame.Frame, true)
}
for _, frame := range p.streamFrames {
wire.LogFrame(s.logger, frame.Frame, true)
}
}
// tracing
if s.tracer != nil {
frames := make([]logging.Frame, 0, len(p.frames))
for _, f := range p.frames {
frames = append(frames, logutils.ConvertFrame(f.Frame))
}
for _, f := range p.streamFrames {
frames = append(frames, logutils.ConvertFrame(f.Frame))
}
var ack *logging.AckFrame
if p.ack != nil {
ack = logutils.ConvertAckFrame(p.ack)
}
s.tracer.SentLongHeaderPacket(p.header, p.length, ack, frames)
}
}
func (s *connection) logShortHeaderPacket(
destConnID protocol.ConnectionID,
ackFrame *wire.AckFrame,
frames []ackhandler.Frame,
streamFrames []ackhandler.StreamFrame,
pn protocol.PacketNumber,
pnLen protocol.PacketNumberLen,
kp protocol.KeyPhaseBit,
size protocol.ByteCount,
isCoalesced bool,
) {
if s.logger.Debug() && !isCoalesced {
s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, 1-RTT", pn, size, s.logID)
}
// quic-go logging
if s.logger.Debug() {
wire.LogShortHeader(s.logger, destConnID, pn, pnLen, kp)
if ackFrame != nil {
wire.LogFrame(s.logger, ackFrame, true)
}
for _, f := range frames {
wire.LogFrame(s.logger, f.Frame, true)
}
for _, f := range streamFrames {
wire.LogFrame(s.logger, f.Frame, true)
}
}
// tracing
if s.tracer != nil {
fs := make([]logging.Frame, 0, len(frames)+len(streamFrames))
for _, f := range frames {
fs = append(fs, logutils.ConvertFrame(f.Frame))
}
for _, f := range streamFrames {
fs = append(fs, logutils.ConvertFrame(f.Frame))
}
var ack *logging.AckFrame
if ackFrame != nil {
ack = logutils.ConvertAckFrame(ackFrame)
}
s.tracer.SentShortHeaderPacket(
&logging.ShortHeader{
DestConnectionID: destConnID,
PacketNumber: pn,
PacketNumberLen: pnLen,
KeyPhase: kp,
},
size,
ack,
fs,
)
}
}
func (s *connection) logCoalescedPacket(packet *coalescedPacket) {
if s.logger.Debug() {
// There's a short period between dropping both Initial and Handshake keys and completion of the handshake,
// during which we might call PackCoalescedPacket but just pack a short header packet.
if len(packet.longHdrPackets) == 0 && packet.shortHdrPacket != nil {
s.logShortHeaderPacket(
packet.shortHdrPacket.DestConnID,
packet.shortHdrPacket.Ack,
packet.shortHdrPacket.Frames,
packet.shortHdrPacket.StreamFrames,
packet.shortHdrPacket.PacketNumber,
packet.shortHdrPacket.PacketNumberLen,
packet.shortHdrPacket.KeyPhase,
packet.shortHdrPacket.Length,
false,
)
return
}
if len(packet.longHdrPackets) > 1 {
s.logger.Debugf("-> Sending coalesced packet (%d parts, %d bytes) for connection %s", len(packet.longHdrPackets), packet.buffer.Len(), s.logID)
} else {
s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.longHdrPackets[0].header.PacketNumber, packet.buffer.Len(), s.logID, packet.longHdrPackets[0].EncryptionLevel())
}
}
for _, p := range packet.longHdrPackets {
s.logLongHeaderPacket(p)
}
if p := packet.shortHdrPacket; p != nil {
s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, p.Length, true)
}
}
// AcceptStream returns the next stream openend by the peer
func (s *connection) AcceptStream(ctx context.Context) (Stream, error) {
return s.streamsMap.AcceptStream(ctx)
}
func (s *connection) AcceptUniStream(ctx context.Context) (ReceiveStream, error) {
return s.streamsMap.AcceptUniStream(ctx)
}
// OpenStream opens a stream
func (s *connection) OpenStream() (Stream, error) {
return s.streamsMap.OpenStream()
}
func (s *connection) OpenStreamSync(ctx context.Context) (Stream, error) {
return s.streamsMap.OpenStreamSync(ctx)
}
func (s *connection) OpenUniStream() (SendStream, error) {
return s.streamsMap.OpenUniStream()
}
func (s *connection) OpenUniStreamSync(ctx context.Context) (SendStream, error) {
return s.streamsMap.OpenUniStreamSync(ctx)
}
func (s *connection) newFlowController(id protocol.StreamID) flowcontrol.StreamFlowController {
initialSendWindow := s.peerParams.InitialMaxStreamDataUni
if id.Type() == protocol.StreamTypeBidi {
if id.InitiatedBy() == s.perspective {
initialSendWindow = s.peerParams.InitialMaxStreamDataBidiRemote
} else {
initialSendWindow = s.peerParams.InitialMaxStreamDataBidiLocal
}
}
return flowcontrol.NewStreamFlowController(
id,
s.connFlowController,
protocol.ByteCount(s.config.InitialStreamReceiveWindow),
protocol.ByteCount(s.config.MaxStreamReceiveWindow),
initialSendWindow,
s.onHasStreamWindowUpdate,
s.rttStats,
s.logger,
)
}
// scheduleSending signals that we have data for sending
func (s *connection) scheduleSending() {
select {
case s.sendingScheduled <- struct{}{}:
default:
}
}
// tryQueueingUndecryptablePacket queues a packet for which we're missing the decryption keys.
// The logging.PacketType is only used for logging purposes.
func (s *connection) tryQueueingUndecryptablePacket(p receivedPacket, pt logging.PacketType) {
if s.handshakeComplete {
panic("shouldn't queue undecryptable packets after handshake completion")
}
if len(s.undecryptablePackets)+1 > protocol.MaxUndecryptablePackets {
if s.tracer != nil {
s.tracer.DroppedPacket(pt, p.Size(), logging.PacketDropDOSPrevention)
}
s.logger.Infof("Dropping undecryptable packet (%d bytes). Undecryptable packet queue full.", p.Size())
return
}
s.logger.Infof("Queueing packet (%d bytes) for later decryption", p.Size())
if s.tracer != nil {
s.tracer.BufferedPacket(pt, p.Size())
}
s.undecryptablePackets = append(s.undecryptablePackets, p)
}
func (s *connection) queueControlFrame(f wire.Frame) {
s.framer.QueueControlFrame(f)
s.scheduleSending()
}
func (s *connection) onHasStreamWindowUpdate(id protocol.StreamID) {
s.windowUpdateQueue.AddStream(id)
s.scheduleSending()
}
func (s *connection) onHasConnectionWindowUpdate() {
s.windowUpdateQueue.AddConnection()
s.scheduleSending()
}
func (s *connection) onHasStreamData(id protocol.StreamID) {
s.framer.AddActiveStream(id)
s.scheduleSending()
}
func (s *connection) onStreamCompleted(id protocol.StreamID) {
if err := s.streamsMap.DeleteStream(id); err != nil {
s.closeLocal(err)
}
}
func (s *connection) SendMessage(p []byte) error {
if !s.supportsDatagrams() {
return errors.New("datagram support disabled")
}
f := &wire.DatagramFrame{DataLenPresent: true}
if protocol.ByteCount(len(p)) > f.MaxDataLen(s.peerParams.MaxDatagramFrameSize, s.version) {
return errors.New("message too large")
}
f.Data = make([]byte, len(p))
copy(f.Data, p)
return s.datagramQueue.AddAndWait(f)
}
func (s *connection) ReceiveMessage(ctx context.Context) ([]byte, error) {
if !s.config.EnableDatagrams {
return nil, errors.New("datagram support disabled")
}
return s.datagramQueue.Receive(ctx)
}
func (s *connection) LocalAddr() net.Addr {
return s.conn.LocalAddr()
}
func (s *connection) RemoteAddr() net.Addr {
return s.conn.RemoteAddr()
}
func (s *connection) getPerspective() protocol.Perspective {
return s.perspective
}
func (s *connection) GetVersion() protocol.VersionNumber {
return s.version
}
func (s *connection) NextConnection() Connection {
<-s.HandshakeComplete()
s.streamsMap.UseResetMaps()
return s
}
golang-github-lucas-clemente-quic-go-0.38.2/connection_test.go 0000664 0000000 0000000 00000407616 14545452366 0024355 0 ustar 00root root 0000000 0000000 package quic
import (
"bytes"
"context"
"crypto/rand"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"runtime/pprof"
"strings"
"time"
"github.com/quic-go/quic-go/internal/ackhandler"
"github.com/quic-go/quic-go/internal/handshake"
"github.com/quic-go/quic-go/internal/mocks"
mockackhandler "github.com/quic-go/quic-go/internal/mocks/ackhandler"
mocklogging "github.com/quic-go/quic-go/internal/mocks/logging"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/testutils"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/internal/wire"
"github.com/quic-go/quic-go/logging"
"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func areConnsRunning() bool {
var b bytes.Buffer
pprof.Lookup("goroutine").WriteTo(&b, 1)
return strings.Contains(b.String(), "quic-go.(*connection).run")
}
var _ = Describe("Connection", func() {
var (
conn *connection
connRunner *MockConnRunner
mconn *MockSendConn
streamManager *MockStreamManager
packer *MockPacker
cryptoSetup *mocks.MockCryptoSetup
tracer *mocklogging.MockConnectionTracer
capabilities connCapabilities
)
remoteAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 1337}
localAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 7331}
srcConnID := protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5, 6, 7, 8})
destConnID := protocol.ParseConnectionID([]byte{8, 7, 6, 5, 4, 3, 2, 1})
clientDestConnID := protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})
getCoalescedPacket := func(pn protocol.PacketNumber, isLongHeader bool) *coalescedPacket {
buffer := getPacketBuffer()
buffer.Data = append(buffer.Data, []byte("foobar")...)
packet := &coalescedPacket{buffer: buffer}
if isLongHeader {
packet.longHdrPackets = []*longHeaderPacket{{
header: &wire.ExtendedHeader{
Header: wire.Header{},
PacketNumber: pn,
},
length: 6, // foobar
}}
} else {
packet.shortHdrPacket = &shortHeaderPacket{
PacketNumber: pn,
Length: 6,
}
}
return packet
}
expectReplaceWithClosed := func() {
connRunner.EXPECT().ReplaceWithClosed(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(connIDs []protocol.ConnectionID, _ protocol.Perspective, _ []byte) {
Expect(connIDs).To(ContainElement(srcConnID))
if len(connIDs) > 1 {
Expect(connIDs).To(ContainElement(clientDestConnID))
}
})
}
expectAppendPacket := func(packer *MockPacker, p shortHeaderPacket, b []byte) *gomock.Call {
return packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), Version1).DoAndReturn(func(buf *packetBuffer, _ protocol.ByteCount, _ protocol.VersionNumber) (shortHeaderPacket, error) {
buf.Data = append(buf.Data, b...)
return p, nil
})
}
enableGSO := func() { capabilities = connCapabilities{GSO: true} }
BeforeEach(func() {
Eventually(areConnsRunning).Should(BeFalse())
connRunner = NewMockConnRunner(mockCtrl)
mconn = NewMockSendConn(mockCtrl)
mconn.EXPECT().capabilities().DoAndReturn(func() connCapabilities { return capabilities }).AnyTimes()
mconn.EXPECT().RemoteAddr().Return(remoteAddr).AnyTimes()
mconn.EXPECT().LocalAddr().Return(localAddr).AnyTimes()
tokenGenerator, err := handshake.NewTokenGenerator(rand.Reader)
Expect(err).ToNot(HaveOccurred())
tracer = mocklogging.NewMockConnectionTracer(mockCtrl)
tracer.EXPECT().NegotiatedVersion(gomock.Any(), gomock.Any(), gomock.Any()).MaxTimes(1)
tracer.EXPECT().SentTransportParameters(gomock.Any())
tracer.EXPECT().UpdatedKeyFromTLS(gomock.Any(), gomock.Any()).AnyTimes()
tracer.EXPECT().UpdatedCongestionState(gomock.Any())
conn = newConnection(
mconn,
connRunner,
protocol.ConnectionID{},
nil,
clientDestConnID,
destConnID,
srcConnID,
&protocol.DefaultConnectionIDGenerator{},
protocol.StatelessResetToken{},
populateServerConfig(&Config{DisablePathMTUDiscovery: true}),
&tls.Config{},
tokenGenerator,
false,
tracer,
1234,
utils.DefaultLogger,
protocol.Version1,
).(*connection)
streamManager = NewMockStreamManager(mockCtrl)
conn.streamsMap = streamManager
packer = NewMockPacker(mockCtrl)
conn.packer = packer
cryptoSetup = mocks.NewMockCryptoSetup(mockCtrl)
conn.cryptoStreamHandler = cryptoSetup
conn.handshakeComplete = true
conn.idleTimeout = time.Hour
})
AfterEach(func() {
Eventually(areConnsRunning).Should(BeFalse())
capabilities = connCapabilities{}
})
Context("frame handling", func() {
Context("handling STREAM frames", func() {
It("passes STREAM frames to the stream", func() {
f := &wire.StreamFrame{
StreamID: 5,
Data: []byte{0xde, 0xca, 0xfb, 0xad},
}
str := NewMockReceiveStreamI(mockCtrl)
str.EXPECT().handleStreamFrame(f)
streamManager.EXPECT().GetOrOpenReceiveStream(protocol.StreamID(5)).Return(str, nil)
Expect(conn.handleStreamFrame(f)).To(Succeed())
})
It("returns errors", func() {
testErr := errors.New("test err")
f := &wire.StreamFrame{
StreamID: 5,
Data: []byte{0xde, 0xca, 0xfb, 0xad},
}
str := NewMockReceiveStreamI(mockCtrl)
str.EXPECT().handleStreamFrame(f).Return(testErr)
streamManager.EXPECT().GetOrOpenReceiveStream(protocol.StreamID(5)).Return(str, nil)
Expect(conn.handleStreamFrame(f)).To(MatchError(testErr))
})
It("ignores STREAM frames for closed streams", func() {
streamManager.EXPECT().GetOrOpenReceiveStream(protocol.StreamID(5)).Return(nil, nil) // for closed streams, the streamManager returns nil
Expect(conn.handleStreamFrame(&wire.StreamFrame{
StreamID: 5,
Data: []byte("foobar"),
})).To(Succeed())
})
})
Context("handling ACK frames", func() {
It("informs the SentPacketHandler about ACKs", func() {
f := &wire.AckFrame{AckRanges: []wire.AckRange{{Smallest: 2, Largest: 3}}}
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().ReceivedAck(f, protocol.EncryptionHandshake, gomock.Any())
conn.sentPacketHandler = sph
err := conn.handleAckFrame(f, protocol.EncryptionHandshake)
Expect(err).ToNot(HaveOccurred())
})
})
Context("handling RESET_STREAM frames", func() {
It("closes the streams for writing", func() {
f := &wire.ResetStreamFrame{
StreamID: 555,
ErrorCode: 42,
FinalSize: 0x1337,
}
str := NewMockReceiveStreamI(mockCtrl)
streamManager.EXPECT().GetOrOpenReceiveStream(protocol.StreamID(555)).Return(str, nil)
str.EXPECT().handleResetStreamFrame(f)
err := conn.handleResetStreamFrame(f)
Expect(err).ToNot(HaveOccurred())
})
It("returns errors", func() {
f := &wire.ResetStreamFrame{
StreamID: 7,
FinalSize: 0x1337,
}
testErr := errors.New("flow control violation")
str := NewMockReceiveStreamI(mockCtrl)
streamManager.EXPECT().GetOrOpenReceiveStream(protocol.StreamID(7)).Return(str, nil)
str.EXPECT().handleResetStreamFrame(f).Return(testErr)
err := conn.handleResetStreamFrame(f)
Expect(err).To(MatchError(testErr))
})
It("ignores RESET_STREAM frames for closed streams", func() {
streamManager.EXPECT().GetOrOpenReceiveStream(protocol.StreamID(3)).Return(nil, nil)
Expect(conn.handleFrame(&wire.ResetStreamFrame{
StreamID: 3,
ErrorCode: 42,
}, protocol.Encryption1RTT, protocol.ConnectionID{})).To(Succeed())
})
})
Context("handling MAX_DATA and MAX_STREAM_DATA frames", func() {
var connFC *mocks.MockConnectionFlowController
BeforeEach(func() {
connFC = mocks.NewMockConnectionFlowController(mockCtrl)
conn.connFlowController = connFC
})
It("updates the flow control window of a stream", func() {
f := &wire.MaxStreamDataFrame{
StreamID: 12345,
MaximumStreamData: 0x1337,
}
str := NewMockSendStreamI(mockCtrl)
streamManager.EXPECT().GetOrOpenSendStream(protocol.StreamID(12345)).Return(str, nil)
str.EXPECT().updateSendWindow(protocol.ByteCount(0x1337))
Expect(conn.handleMaxStreamDataFrame(f)).To(Succeed())
})
It("updates the flow control window of the connection", func() {
offset := protocol.ByteCount(0x800000)
connFC.EXPECT().UpdateSendWindow(offset)
conn.handleMaxDataFrame(&wire.MaxDataFrame{MaximumData: offset})
})
It("ignores MAX_STREAM_DATA frames for a closed stream", func() {
streamManager.EXPECT().GetOrOpenSendStream(protocol.StreamID(10)).Return(nil, nil)
Expect(conn.handleFrame(&wire.MaxStreamDataFrame{
StreamID: 10,
MaximumStreamData: 1337,
}, protocol.Encryption1RTT, protocol.ConnectionID{})).To(Succeed())
})
})
Context("handling MAX_STREAM_ID frames", func() {
It("passes the frame to the streamsMap", func() {
f := &wire.MaxStreamsFrame{
Type: protocol.StreamTypeUni,
MaxStreamNum: 10,
}
streamManager.EXPECT().HandleMaxStreamsFrame(f)
conn.handleMaxStreamsFrame(f)
})
})
Context("handling STOP_SENDING frames", func() {
It("passes the frame to the stream", func() {
f := &wire.StopSendingFrame{
StreamID: 5,
ErrorCode: 10,
}
str := NewMockSendStreamI(mockCtrl)
streamManager.EXPECT().GetOrOpenSendStream(protocol.StreamID(5)).Return(str, nil)
str.EXPECT().handleStopSendingFrame(f)
err := conn.handleStopSendingFrame(f)
Expect(err).ToNot(HaveOccurred())
})
It("ignores STOP_SENDING frames for a closed stream", func() {
streamManager.EXPECT().GetOrOpenSendStream(protocol.StreamID(3)).Return(nil, nil)
Expect(conn.handleFrame(&wire.StopSendingFrame{
StreamID: 3,
ErrorCode: 1337,
}, protocol.Encryption1RTT, protocol.ConnectionID{})).To(Succeed())
})
})
It("handles NEW_CONNECTION_ID frames", func() {
connID := protocol.ParseConnectionID([]byte{1, 2, 3, 4})
Expect(conn.handleFrame(&wire.NewConnectionIDFrame{
SequenceNumber: 10,
ConnectionID: connID,
}, protocol.Encryption1RTT, protocol.ConnectionID{})).To(Succeed())
Expect(conn.connIDManager.queue.Back().Value.ConnectionID).To(Equal(connID))
})
It("handles PING frames", func() {
err := conn.handleFrame(&wire.PingFrame{}, protocol.Encryption1RTT, protocol.ConnectionID{})
Expect(err).NotTo(HaveOccurred())
})
It("rejects PATH_RESPONSE frames", func() {
err := conn.handleFrame(&wire.PathResponseFrame{Data: [8]byte{1, 2, 3, 4, 5, 6, 7, 8}}, protocol.Encryption1RTT, protocol.ConnectionID{})
Expect(err).To(MatchError("unexpected PATH_RESPONSE frame"))
})
It("handles PATH_CHALLENGE frames", func() {
data := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
err := conn.handleFrame(&wire.PathChallengeFrame{Data: data}, protocol.Encryption1RTT, protocol.ConnectionID{})
Expect(err).ToNot(HaveOccurred())
frames, _ := conn.framer.AppendControlFrames(nil, 1000, protocol.Version1)
Expect(frames).To(Equal([]ackhandler.Frame{{Frame: &wire.PathResponseFrame{Data: data}}}))
})
It("rejects NEW_TOKEN frames", func() {
err := conn.handleNewTokenFrame(&wire.NewTokenFrame{})
Expect(err).To(HaveOccurred())
Expect(err).To(BeAssignableToTypeOf(&qerr.TransportError{}))
Expect(err.(*qerr.TransportError).ErrorCode).To(Equal(qerr.ProtocolViolation))
})
It("handles BLOCKED frames", func() {
err := conn.handleFrame(&wire.DataBlockedFrame{}, protocol.Encryption1RTT, protocol.ConnectionID{})
Expect(err).NotTo(HaveOccurred())
})
It("handles STREAM_BLOCKED frames", func() {
err := conn.handleFrame(&wire.StreamDataBlockedFrame{}, protocol.Encryption1RTT, protocol.ConnectionID{})
Expect(err).NotTo(HaveOccurred())
})
It("handles STREAMS_BLOCKED frames", func() {
err := conn.handleFrame(&wire.StreamsBlockedFrame{}, protocol.Encryption1RTT, protocol.ConnectionID{})
Expect(err).NotTo(HaveOccurred())
})
It("handles CONNECTION_CLOSE frames, with a transport error code", func() {
expectedErr := &qerr.TransportError{
Remote: true,
ErrorCode: qerr.StreamLimitError,
ErrorMessage: "foobar",
}
streamManager.EXPECT().CloseWithError(expectedErr)
connRunner.EXPECT().ReplaceWithClosed(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(connIDs []protocol.ConnectionID, _ protocol.Perspective, _ []byte) {
Expect(connIDs).To(ConsistOf(clientDestConnID, srcConnID))
})
cryptoSetup.EXPECT().Close()
gomock.InOrder(
tracer.EXPECT().ClosedConnection(expectedErr),
tracer.EXPECT().Close(),
)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
Expect(conn.run()).To(MatchError(expectedErr))
}()
Expect(conn.handleFrame(&wire.ConnectionCloseFrame{
ErrorCode: uint64(qerr.StreamLimitError),
ReasonPhrase: "foobar",
}, protocol.Encryption1RTT, protocol.ConnectionID{})).To(Succeed())
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("handles CONNECTION_CLOSE frames, with an application error code", func() {
testErr := &qerr.ApplicationError{
Remote: true,
ErrorCode: 0x1337,
ErrorMessage: "foobar",
}
streamManager.EXPECT().CloseWithError(testErr)
connRunner.EXPECT().ReplaceWithClosed(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(connIDs []protocol.ConnectionID, _ protocol.Perspective, _ []byte) {
Expect(connIDs).To(ConsistOf(clientDestConnID, srcConnID))
})
cryptoSetup.EXPECT().Close()
gomock.InOrder(
tracer.EXPECT().ClosedConnection(testErr),
tracer.EXPECT().Close(),
)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
Expect(conn.run()).To(MatchError(testErr))
}()
ccf := &wire.ConnectionCloseFrame{
ErrorCode: 0x1337,
ReasonPhrase: "foobar",
IsApplicationError: true,
}
Expect(conn.handleFrame(ccf, protocol.Encryption1RTT, protocol.ConnectionID{})).To(Succeed())
Eventually(conn.Context().Done()).Should(BeClosed())
Expect(context.Cause(conn.Context())).To(MatchError(testErr))
})
It("errors on HANDSHAKE_DONE frames", func() {
Expect(conn.handleHandshakeDoneFrame()).To(MatchError(&qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "received a HANDSHAKE_DONE frame",
}))
})
})
It("tells its versions", func() {
conn.version = 4242
Expect(conn.GetVersion()).To(Equal(protocol.VersionNumber(4242)))
})
Context("closing", func() {
var (
runErr chan error
expectedRunErr error
)
BeforeEach(func() {
runErr = make(chan error, 1)
expectedRunErr = nil
})
AfterEach(func() {
if expectedRunErr != nil {
Eventually(runErr).Should(Receive(MatchError(expectedRunErr)))
} else {
Eventually(runErr).Should(Receive())
}
})
runConn := func() {
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
runErr <- conn.run()
}()
Eventually(areConnsRunning).Should(BeTrue())
}
It("shuts down without error", func() {
conn.handshakeComplete = true
runConn()
streamManager.EXPECT().CloseWithError(&qerr.ApplicationError{})
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
buffer := getPacketBuffer()
buffer.Data = append(buffer.Data, []byte("connection close")...)
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).DoAndReturn(func(e *qerr.ApplicationError, _ protocol.ByteCount, _ protocol.VersionNumber) (*coalescedPacket, error) {
Expect(e.ErrorCode).To(BeEquivalentTo(qerr.NoError))
Expect(e.ErrorMessage).To(BeEmpty())
return &coalescedPacket{buffer: buffer}, nil
})
mconn.EXPECT().Write([]byte("connection close"), gomock.Any())
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
var appErr *ApplicationError
Expect(errors.As(e, &appErr)).To(BeTrue())
Expect(appErr.Remote).To(BeFalse())
Expect(appErr.ErrorCode).To(BeZero())
}),
tracer.EXPECT().Close(),
)
conn.shutdown()
Eventually(areConnsRunning).Should(BeFalse())
Expect(conn.Context().Done()).To(BeClosed())
})
It("only closes once", func() {
runConn()
streamManager.EXPECT().CloseWithError(gomock.Any())
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.shutdown()
conn.shutdown()
Eventually(areConnsRunning).Should(BeFalse())
Expect(conn.Context().Done()).To(BeClosed())
})
It("closes with an error", func() {
runConn()
expectedErr := &qerr.ApplicationError{
ErrorCode: 0x1337,
ErrorMessage: "test error",
}
streamManager.EXPECT().CloseWithError(expectedErr)
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackApplicationClose(expectedErr, gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
gomock.InOrder(
tracer.EXPECT().ClosedConnection(expectedErr),
tracer.EXPECT().Close(),
)
conn.CloseWithError(0x1337, "test error")
Eventually(areConnsRunning).Should(BeFalse())
Expect(conn.Context().Done()).To(BeClosed())
Expect(context.Cause(conn.Context())).To(MatchError(expectedErr))
})
It("includes the frame type in transport-level close frames", func() {
runConn()
expectedErr := &qerr.TransportError{
ErrorCode: 0x1337,
FrameType: 0x42,
ErrorMessage: "test error",
}
streamManager.EXPECT().CloseWithError(expectedErr)
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackConnectionClose(expectedErr, gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
gomock.InOrder(
tracer.EXPECT().ClosedConnection(expectedErr),
tracer.EXPECT().Close(),
)
conn.closeLocal(expectedErr)
Eventually(areConnsRunning).Should(BeFalse())
Expect(conn.Context().Done()).To(BeClosed())
})
It("destroys the connection", func() {
runConn()
testErr := errors.New("close")
streamManager.EXPECT().CloseWithError(gomock.Any())
connRunner.EXPECT().Remove(gomock.Any()).AnyTimes()
cryptoSetup.EXPECT().Close()
// don't EXPECT any calls to mconn.Write()
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
var transportErr *TransportError
Expect(errors.As(e, &transportErr)).To(BeTrue())
Expect(transportErr.Remote).To(BeFalse())
Expect(transportErr.ErrorCode).To(Equal(qerr.InternalError))
}),
tracer.EXPECT().Close(),
)
conn.destroy(testErr)
Eventually(areConnsRunning).Should(BeFalse())
expectedRunErr = &qerr.TransportError{
ErrorCode: qerr.InternalError,
ErrorMessage: testErr.Error(),
}
})
It("cancels the context when the run loop exists", func() {
runConn()
streamManager.EXPECT().CloseWithError(gomock.Any())
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
returned := make(chan struct{})
go func() {
defer GinkgoRecover()
ctx := conn.Context()
<-ctx.Done()
Expect(ctx.Err()).To(MatchError(context.Canceled))
close(returned)
}()
Consistently(returned).ShouldNot(BeClosed())
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.shutdown()
Eventually(returned).Should(BeClosed())
Expect(context.Cause(conn.Context())).To(MatchError(context.Canceled))
})
It("doesn't send any more packets after receiving a CONNECTION_CLOSE", func() {
unpacker := NewMockUnpacker(mockCtrl)
conn.handshakeConfirmed = true
conn.unpacker = unpacker
runConn()
cryptoSetup.EXPECT().Close()
streamManager.EXPECT().CloseWithError(gomock.Any())
connRunner.EXPECT().ReplaceWithClosed(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
b, err := wire.AppendShortHeader(nil, srcConnID, 42, protocol.PacketNumberLen2, protocol.KeyPhaseOne)
Expect(err).ToNot(HaveOccurred())
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).DoAndReturn(func(time.Time, []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) {
b, err := (&wire.ConnectionCloseFrame{ErrorCode: uint64(qerr.StreamLimitError)}).Append(nil, conn.version)
Expect(err).ToNot(HaveOccurred())
return 3, protocol.PacketNumberLen2, protocol.KeyPhaseOne, b, nil
})
gomock.InOrder(
tracer.EXPECT().ReceivedShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any()),
tracer.EXPECT().ClosedConnection(gomock.Any()),
tracer.EXPECT().Close(),
)
// don't EXPECT any calls to packer.PackPacket()
conn.handlePacket(receivedPacket{
rcvTime: time.Now(),
remoteAddr: &net.UDPAddr{},
buffer: getPacketBuffer(),
data: b,
})
// Consistently(pack).ShouldNot(Receive())
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("closes when the sendQueue encounters an error", func() {
conn.handshakeConfirmed = true
sconn := NewMockSendConn(mockCtrl)
sconn.EXPECT().capabilities().AnyTimes()
sconn.EXPECT().Write(gomock.Any(), gomock.Any()).Return(io.ErrClosedPipe).AnyTimes()
conn.sendQueue = newSendQueue(sconn)
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().GetLossDetectionTimeout().Return(time.Now().Add(time.Hour)).AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
// only expect a single SentPacket() call
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
streamManager.EXPECT().CloseWithError(gomock.Any())
connRunner.EXPECT().Remove(gomock.Any()).AnyTimes()
cryptoSetup.EXPECT().Close()
conn.sentPacketHandler = sph
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 1}, []byte("foobar"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack).AnyTimes()
runConn()
conn.queueControlFrame(&wire.PingFrame{})
conn.scheduleSending()
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("closes due to a stateless reset", func() {
token := protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
runConn()
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
var srErr *StatelessResetError
Expect(errors.As(e, &srErr)).To(BeTrue())
Expect(srErr.Token).To(Equal(token))
}),
tracer.EXPECT().Close(),
)
streamManager.EXPECT().CloseWithError(gomock.Any())
connRunner.EXPECT().Remove(gomock.Any()).AnyTimes()
cryptoSetup.EXPECT().Close()
conn.destroy(&StatelessResetError{Token: token})
})
})
Context("receiving packets", func() {
var unpacker *MockUnpacker
BeforeEach(func() {
unpacker = NewMockUnpacker(mockCtrl)
conn.unpacker = unpacker
})
getShortHeaderPacket := func(connID protocol.ConnectionID, pn protocol.PacketNumber, data []byte) receivedPacket {
b, err := wire.AppendShortHeader(nil, connID, pn, protocol.PacketNumberLen2, protocol.KeyPhaseOne)
Expect(err).ToNot(HaveOccurred())
return receivedPacket{
data: append(b, data...),
buffer: getPacketBuffer(),
rcvTime: time.Now(),
}
}
getLongHeaderPacket := func(extHdr *wire.ExtendedHeader, data []byte) receivedPacket {
b, err := extHdr.Append(nil, conn.version)
Expect(err).ToNot(HaveOccurred())
return receivedPacket{
data: append(b, data...),
buffer: getPacketBuffer(),
rcvTime: time.Now(),
}
}
It("drops Retry packets", func() {
p := getLongHeaderPacket(&wire.ExtendedHeader{Header: wire.Header{
Type: protocol.PacketTypeRetry,
DestConnectionID: destConnID,
SrcConnectionID: srcConnID,
Version: conn.version,
Token: []byte("foobar"),
}}, make([]byte, 16) /* Retry integrity tag */)
tracer.EXPECT().DroppedPacket(logging.PacketTypeRetry, p.Size(), logging.PacketDropUnexpectedPacket)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
It("drops Version Negotiation packets", func() {
b := wire.ComposeVersionNegotiation(
protocol.ArbitraryLenConnectionID(srcConnID.Bytes()),
protocol.ArbitraryLenConnectionID(destConnID.Bytes()),
conn.config.Versions,
)
tracer.EXPECT().DroppedPacket(logging.PacketTypeVersionNegotiation, protocol.ByteCount(len(b)), logging.PacketDropUnexpectedPacket)
Expect(conn.handlePacketImpl(receivedPacket{
data: b,
buffer: getPacketBuffer(),
})).To(BeFalse())
})
It("drops packets for which header decryption fails", func() {
p := getLongHeaderPacket(&wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeHandshake,
Version: conn.version,
},
PacketNumberLen: protocol.PacketNumberLen2,
}, nil)
p.data[0] ^= 0x40 // unset the QUIC bit
tracer.EXPECT().DroppedPacket(logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropHeaderParseError)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
It("drops packets for which the version is unsupported", func() {
p := getLongHeaderPacket(&wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeHandshake,
Version: conn.version + 1,
},
PacketNumberLen: protocol.PacketNumberLen2,
}, nil)
tracer.EXPECT().DroppedPacket(logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnsupportedVersion)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
It("drops packets with an unsupported version", func() {
origSupportedVersions := make([]protocol.VersionNumber, len(protocol.SupportedVersions))
copy(origSupportedVersions, protocol.SupportedVersions)
defer func() {
protocol.SupportedVersions = origSupportedVersions
}()
protocol.SupportedVersions = append(protocol.SupportedVersions, conn.version+1)
p := getLongHeaderPacket(&wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeHandshake,
DestConnectionID: destConnID,
SrcConnectionID: srcConnID,
Version: conn.version + 1,
},
PacketNumberLen: protocol.PacketNumberLen2,
}, nil)
tracer.EXPECT().DroppedPacket(logging.PacketTypeHandshake, p.Size(), logging.PacketDropUnexpectedVersion)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
It("informs the ReceivedPacketHandler about non-ack-eliciting packets", func() {
hdr := &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeInitial,
DestConnectionID: srcConnID,
Version: protocol.Version1,
Length: 1,
},
PacketNumber: 0x37,
PacketNumberLen: protocol.PacketNumberLen1,
}
unpackedHdr := *hdr
unpackedHdr.PacketNumber = 0x1337
packet := getLongHeaderPacket(hdr, nil)
packet.ecn = protocol.ECNCE
rcvTime := time.Now().Add(-10 * time.Second)
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), rcvTime, gomock.Any(), conn.version).Return(&unpackedPacket{
encryptionLevel: protocol.EncryptionInitial,
hdr: &unpackedHdr,
data: []byte{0}, // one PADDING frame
}, nil)
rph := mockackhandler.NewMockReceivedPacketHandler(mockCtrl)
gomock.InOrder(
rph.EXPECT().IsPotentiallyDuplicate(protocol.PacketNumber(0x1337), protocol.EncryptionInitial),
rph.EXPECT().ReceivedPacket(protocol.PacketNumber(0x1337), protocol.ECNCE, protocol.EncryptionInitial, rcvTime, false),
)
conn.receivedPacketHandler = rph
packet.rcvTime = rcvTime
tracer.EXPECT().StartedConnection(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), gomock.Any(), []logging.Frame{})
Expect(conn.handlePacketImpl(packet)).To(BeTrue())
})
It("informs the ReceivedPacketHandler about ack-eliciting packets", func() {
rcvTime := time.Now().Add(-10 * time.Second)
b, err := (&wire.PingFrame{}).Append(nil, conn.version)
Expect(err).ToNot(HaveOccurred())
packet := getShortHeaderPacket(srcConnID, 0x37, nil)
packet.ecn = protocol.ECT1
unpacker.EXPECT().UnpackShortHeader(rcvTime, gomock.Any()).Return(protocol.PacketNumber(0x1337), protocol.PacketNumberLen2, protocol.KeyPhaseZero, b, nil)
rph := mockackhandler.NewMockReceivedPacketHandler(mockCtrl)
gomock.InOrder(
rph.EXPECT().IsPotentiallyDuplicate(protocol.PacketNumber(0x1337), protocol.Encryption1RTT),
rph.EXPECT().ReceivedPacket(protocol.PacketNumber(0x1337), protocol.ECT1, protocol.Encryption1RTT, rcvTime, true),
)
conn.receivedPacketHandler = rph
packet.rcvTime = rcvTime
tracer.EXPECT().ReceivedShortHeaderPacket(&logging.ShortHeader{PacketNumber: 0x1337, PacketNumberLen: 2, KeyPhase: protocol.KeyPhaseZero}, protocol.ByteCount(len(packet.data)), []logging.Frame{&logging.PingFrame{}})
Expect(conn.handlePacketImpl(packet)).To(BeTrue())
})
It("drops duplicate packets", func() {
packet := getShortHeaderPacket(srcConnID, 0x37, nil)
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).Return(protocol.PacketNumber(0x1337), protocol.PacketNumberLen2, protocol.KeyPhaseOne, []byte("foobar"), nil)
rph := mockackhandler.NewMockReceivedPacketHandler(mockCtrl)
rph.EXPECT().IsPotentiallyDuplicate(protocol.PacketNumber(0x1337), protocol.Encryption1RTT).Return(true)
conn.receivedPacketHandler = rph
tracer.EXPECT().DroppedPacket(logging.PacketType1RTT, protocol.ByteCount(len(packet.data)), logging.PacketDropDuplicate)
Expect(conn.handlePacketImpl(packet)).To(BeFalse())
})
It("drops a packet when unpacking fails", func() {
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any(), gomock.Any(), conn.version).Return(nil, handshake.ErrDecryptionFailed)
streamManager.EXPECT().CloseWithError(gomock.Any())
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackConnectionClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
expectReplaceWithClosed()
p := getLongHeaderPacket(&wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeHandshake,
DestConnectionID: srcConnID,
Version: conn.version,
Length: 2 + 6,
},
PacketNumber: 0x1337,
PacketNumberLen: protocol.PacketNumberLen2,
}, []byte("foobar"))
tracer.EXPECT().DroppedPacket(logging.PacketTypeHandshake, p.Size(), logging.PacketDropPayloadDecryptError)
conn.handlePacket(p)
Consistently(conn.Context().Done()).ShouldNot(BeClosed())
// make the go routine return
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
conn.closeLocal(errors.New("close"))
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("processes multiple received packets before sending one", func() {
conn.creationTime = time.Now()
var pn protocol.PacketNumber
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).DoAndReturn(func(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) {
pn++
return pn, protocol.PacketNumberLen2, protocol.KeyPhaseZero, []byte{0} /* PADDING frame */, nil
}).Times(3)
tracer.EXPECT().ReceivedShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(hdr *logging.ShortHeader, _ protocol.ByteCount, _ []logging.Frame) {
}).Times(3)
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version) // only expect a single call
for i := 0; i < 3; i++ {
conn.handlePacket(getShortHeaderPacket(srcConnID, 0x1337+protocol.PacketNumber(i), []byte("foobar")))
}
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
Consistently(conn.Context().Done()).ShouldNot(BeClosed())
// make the go routine return
streamManager.EXPECT().CloseWithError(gomock.Any())
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackConnectionClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
expectReplaceWithClosed()
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
conn.closeLocal(errors.New("close"))
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("doesn't processes multiple received packets before sending one before handshake completion", func() {
conn.handshakeComplete = false
conn.creationTime = time.Now()
var pn protocol.PacketNumber
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).DoAndReturn(func(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) {
pn++
return pn, protocol.PacketNumberLen4, protocol.KeyPhaseZero, []byte{0} /* PADDING frame */, nil
}).Times(3)
tracer.EXPECT().ReceivedShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(hdr *logging.ShortHeader, _ protocol.ByteCount, _ []logging.Frame) {
}).Times(3)
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).Times(3)
for i := 0; i < 3; i++ {
conn.handlePacket(getShortHeaderPacket(srcConnID, 0x1337+protocol.PacketNumber(i), []byte("foobar")))
}
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
Consistently(conn.Context().Done()).ShouldNot(BeClosed())
// make the go routine return
streamManager.EXPECT().CloseWithError(gomock.Any())
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackConnectionClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
expectReplaceWithClosed()
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
conn.closeLocal(errors.New("close"))
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("closes the connection when unpacking fails because the reserved bits were incorrect", func() {
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).Return(protocol.PacketNumber(0), protocol.PacketNumberLen(0), protocol.KeyPhaseBit(0), nil, wire.ErrInvalidReservedBits)
streamManager.EXPECT().CloseWithError(gomock.Any())
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackConnectionClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
done := make(chan struct{})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
err := conn.run()
Expect(err).To(HaveOccurred())
Expect(err).To(BeAssignableToTypeOf(&qerr.TransportError{}))
Expect(err.(*qerr.TransportError).ErrorCode).To(Equal(qerr.ProtocolViolation))
close(done)
}()
expectReplaceWithClosed()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
packet := getShortHeaderPacket(srcConnID, 0x42, nil)
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.handlePacket(packet)
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("ignores packets when unpacking the header fails", func() {
testErr := &headerParseError{errors.New("test error")}
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).Return(protocol.PacketNumber(0), protocol.PacketNumberLen(0), protocol.KeyPhaseBit(0), nil, testErr)
streamManager.EXPECT().CloseWithError(gomock.Any())
cryptoSetup.EXPECT().Close()
runErr := make(chan error)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
runErr <- conn.run()
}()
expectReplaceWithClosed()
tracer.EXPECT().DroppedPacket(logging.PacketType1RTT, gomock.Any(), logging.PacketDropHeaderParseError)
conn.handlePacket(getShortHeaderPacket(srcConnID, 0x42, nil))
Consistently(runErr).ShouldNot(Receive())
// make the go routine return
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
conn.shutdown()
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("closes the connection when unpacking fails because of an error other than a decryption error", func() {
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).Return(protocol.PacketNumber(0), protocol.PacketNumberLen(0), protocol.KeyPhaseBit(0), nil, &qerr.TransportError{ErrorCode: qerr.ConnectionIDLimitError})
streamManager.EXPECT().CloseWithError(gomock.Any())
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackConnectionClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
done := make(chan struct{})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
err := conn.run()
Expect(err).To(HaveOccurred())
Expect(err).To(BeAssignableToTypeOf(&qerr.TransportError{}))
Expect(err.(*qerr.TransportError).ErrorCode).To(Equal(qerr.ConnectionIDLimitError))
close(done)
}()
expectReplaceWithClosed()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.handlePacket(getShortHeaderPacket(srcConnID, 0x42, nil))
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("ignores packets with a different source connection ID", func() {
hdr1 := &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeInitial,
DestConnectionID: destConnID,
SrcConnectionID: srcConnID,
Length: 1,
Version: conn.version,
},
PacketNumberLen: protocol.PacketNumberLen1,
PacketNumber: 1,
}
hdr2 := &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeInitial,
DestConnectionID: destConnID,
SrcConnectionID: protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef}),
Length: 1,
Version: conn.version,
},
PacketNumberLen: protocol.PacketNumberLen1,
PacketNumber: 2,
}
Expect(srcConnID).ToNot(Equal(hdr2.SrcConnectionID))
// Send one packet, which might change the connection ID.
// only EXPECT one call to the unpacker
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any(), gomock.Any(), conn.version).Return(&unpackedPacket{
encryptionLevel: protocol.Encryption1RTT,
hdr: hdr1,
data: []byte{0}, // one PADDING frame
}, nil)
p1 := getLongHeaderPacket(hdr1, nil)
tracer.EXPECT().StartedConnection(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), protocol.ByteCount(len(p1.data)), gomock.Any())
Expect(conn.handlePacketImpl(p1)).To(BeTrue())
// The next packet has to be ignored, since the source connection ID doesn't match.
p2 := getLongHeaderPacket(hdr2, nil)
tracer.EXPECT().DroppedPacket(logging.PacketTypeInitial, protocol.ByteCount(len(p2.data)), logging.PacketDropUnknownConnectionID)
Expect(conn.handlePacketImpl(p2)).To(BeFalse())
})
It("queues undecryptable packets", func() {
conn.handshakeComplete = false
hdr := &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeHandshake,
DestConnectionID: destConnID,
SrcConnectionID: srcConnID,
Length: 1,
Version: conn.version,
},
PacketNumberLen: protocol.PacketNumberLen1,
PacketNumber: 1,
}
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any(), gomock.Any(), conn.version).Return(nil, handshake.ErrKeysNotYetAvailable)
packet := getLongHeaderPacket(hdr, nil)
tracer.EXPECT().BufferedPacket(logging.PacketTypeHandshake, packet.Size())
Expect(conn.handlePacketImpl(packet)).To(BeFalse())
Expect(conn.undecryptablePackets).To(Equal([]receivedPacket{packet}))
})
Context("updating the remote address", func() {
It("doesn't support connection migration", func() {
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).Return(protocol.PacketNumber(10), protocol.PacketNumberLen2, protocol.KeyPhaseZero, []byte{0} /* one PADDING frame */, nil)
packet := getShortHeaderPacket(srcConnID, 0x42, nil)
packet.remoteAddr = &net.IPAddr{IP: net.IPv4(192, 168, 0, 100)}
tracer.EXPECT().ReceivedShortHeaderPacket(gomock.Any(), protocol.ByteCount(len(packet.data)), gomock.Any())
Expect(conn.handlePacketImpl(packet)).To(BeTrue())
})
})
Context("coalesced packets", func() {
BeforeEach(func() {
tracer.EXPECT().StartedConnection(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).MaxTimes(1)
})
getPacketWithLength := func(connID protocol.ConnectionID, length protocol.ByteCount) (int /* header length */, receivedPacket) {
hdr := &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeHandshake,
DestConnectionID: connID,
SrcConnectionID: destConnID,
Version: protocol.Version1,
Length: length,
},
PacketNumberLen: protocol.PacketNumberLen3,
}
hdrLen := hdr.GetLength(conn.version)
b := make([]byte, 1)
rand.Read(b)
packet := getLongHeaderPacket(hdr, bytes.Repeat(b, int(length)-3))
return int(hdrLen), packet
}
It("cuts packets to the right length", func() {
hdrLen, packet := getPacketWithLength(srcConnID, 456)
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any(), gomock.Any(), conn.version).DoAndReturn(func(_ *wire.Header, _ time.Time, data []byte, _ protocol.VersionNumber) (*unpackedPacket, error) {
Expect(data).To(HaveLen(hdrLen + 456 - 3))
return &unpackedPacket{
encryptionLevel: protocol.EncryptionHandshake,
data: []byte{0},
hdr: &wire.ExtendedHeader{Header: wire.Header{}},
}, nil
})
cryptoSetup.EXPECT().DiscardInitialKeys()
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionInitial)
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), protocol.ByteCount(len(packet.data)), gomock.Any())
Expect(conn.handlePacketImpl(packet)).To(BeTrue())
})
It("handles coalesced packets", func() {
hdrLen1, packet1 := getPacketWithLength(srcConnID, 456)
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any(), gomock.Any(), conn.version).DoAndReturn(func(_ *wire.Header, _ time.Time, data []byte, _ protocol.VersionNumber) (*unpackedPacket, error) {
Expect(data).To(HaveLen(hdrLen1 + 456 - 3))
return &unpackedPacket{
encryptionLevel: protocol.EncryptionHandshake,
data: []byte{0},
hdr: &wire.ExtendedHeader{
PacketNumber: 1,
Header: wire.Header{SrcConnectionID: destConnID},
},
}, nil
})
hdrLen2, packet2 := getPacketWithLength(srcConnID, 123)
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any(), gomock.Any(), conn.version).DoAndReturn(func(_ *wire.Header, _ time.Time, data []byte, _ protocol.VersionNumber) (*unpackedPacket, error) {
Expect(data).To(HaveLen(hdrLen2 + 123 - 3))
return &unpackedPacket{
encryptionLevel: protocol.EncryptionHandshake,
data: []byte{0},
hdr: &wire.ExtendedHeader{
PacketNumber: 2,
Header: wire.Header{SrcConnectionID: destConnID},
},
}, nil
})
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionInitial).AnyTimes()
cryptoSetup.EXPECT().DiscardInitialKeys().AnyTimes()
gomock.InOrder(
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), protocol.ByteCount(len(packet1.data)), gomock.Any()),
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), protocol.ByteCount(len(packet2.data)), gomock.Any()),
)
packet1.data = append(packet1.data, packet2.data...)
Expect(conn.handlePacketImpl(packet1)).To(BeTrue())
})
It("works with undecryptable packets", func() {
conn.handshakeComplete = false
hdrLen1, packet1 := getPacketWithLength(srcConnID, 456)
hdrLen2, packet2 := getPacketWithLength(srcConnID, 123)
gomock.InOrder(
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any(), gomock.Any(), conn.version).Return(nil, handshake.ErrKeysNotYetAvailable),
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any(), gomock.Any(), conn.version).DoAndReturn(func(_ *wire.Header, _ time.Time, data []byte, _ protocol.VersionNumber) (*unpackedPacket, error) {
Expect(data).To(HaveLen(hdrLen2 + 123 - 3))
return &unpackedPacket{
encryptionLevel: protocol.EncryptionHandshake,
data: []byte{0},
hdr: &wire.ExtendedHeader{Header: wire.Header{}},
}, nil
}),
)
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionInitial).AnyTimes()
cryptoSetup.EXPECT().DiscardInitialKeys().AnyTimes()
gomock.InOrder(
tracer.EXPECT().BufferedPacket(gomock.Any(), protocol.ByteCount(len(packet1.data))),
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), protocol.ByteCount(len(packet2.data)), gomock.Any()),
)
packet1.data = append(packet1.data, packet2.data...)
Expect(conn.handlePacketImpl(packet1)).To(BeTrue())
Expect(conn.undecryptablePackets).To(HaveLen(1))
Expect(conn.undecryptablePackets[0].data).To(HaveLen(hdrLen1 + 456 - 3))
})
It("ignores coalesced packet parts if the destination connection IDs don't match", func() {
wrongConnID := protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef})
Expect(srcConnID).ToNot(Equal(wrongConnID))
hdrLen1, packet1 := getPacketWithLength(srcConnID, 456)
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any(), gomock.Any(), conn.version).DoAndReturn(func(_ *wire.Header, _ time.Time, data []byte, _ protocol.VersionNumber) (*unpackedPacket, error) {
Expect(data).To(HaveLen(hdrLen1 + 456 - 3))
return &unpackedPacket{
encryptionLevel: protocol.EncryptionHandshake,
data: []byte{0},
hdr: &wire.ExtendedHeader{Header: wire.Header{}},
}, nil
})
_, packet2 := getPacketWithLength(wrongConnID, 123)
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionInitial).AnyTimes()
cryptoSetup.EXPECT().DiscardInitialKeys().AnyTimes()
// don't EXPECT any more calls to unpacker.UnpackLongHeader()
gomock.InOrder(
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), protocol.ByteCount(len(packet1.data)), gomock.Any()),
tracer.EXPECT().DroppedPacket(gomock.Any(), protocol.ByteCount(len(packet2.data)), logging.PacketDropUnknownConnectionID),
)
packet1.data = append(packet1.data, packet2.data...)
Expect(conn.handlePacketImpl(packet1)).To(BeTrue())
})
})
})
Context("sending packets", func() {
var (
connDone chan struct{}
sender *MockSender
)
BeforeEach(func() {
sender = NewMockSender(mockCtrl)
sender.EXPECT().Run()
sender.EXPECT().WouldBlock().AnyTimes()
conn.sendQueue = sender
connDone = make(chan struct{})
})
AfterEach(func() {
streamManager.EXPECT().CloseWithError(gomock.Any())
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
sender.EXPECT().Close()
conn.shutdown()
Eventually(conn.Context().Done()).Should(BeClosed())
Eventually(connDone).Should(BeClosed())
})
runConn := func() {
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
close(connDone)
}()
}
It("sends packets", func() {
conn.handshakeConfirmed = true
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
conn.sentPacketHandler = sph
runConn()
p := shortHeaderPacket{
DestConnID: protocol.ParseConnectionID([]byte{1, 2, 3}),
PacketNumber: 1337,
PacketNumberLen: protocol.PacketNumberLen3,
KeyPhase: protocol.KeyPhaseOne,
}
expectAppendPacket(packer, p, []byte("foobar"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack).AnyTimes()
sent := make(chan struct{})
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any()).Do(func(*packetBuffer, protocol.ByteCount) { close(sent) })
tracer.EXPECT().SentShortHeaderPacket(&logging.ShortHeader{
DestConnectionID: p.DestConnID,
PacketNumber: p.PacketNumber,
PacketNumberLen: p.PacketNumberLen,
KeyPhase: p.KeyPhase,
}, gomock.Any(), nil, []logging.Frame{})
conn.scheduleSending()
Eventually(sent).Should(BeClosed())
})
It("doesn't send packets if there's nothing to send", func() {
conn.handshakeConfirmed = true
runConn()
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack).AnyTimes()
conn.receivedPacketHandler.ReceivedPacket(0x035e, protocol.ECNNon, protocol.Encryption1RTT, time.Now(), true)
conn.scheduleSending()
time.Sleep(50 * time.Millisecond) // make sure there are no calls to mconn.Write()
})
It("sends ACK only packets", func() {
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAck)
done := make(chan struct{})
packer.EXPECT().PackCoalescedPacket(true, gomock.Any(), conn.version).Do(func(bool, protocol.ByteCount, protocol.VersionNumber) { close(done) })
conn.sentPacketHandler = sph
runConn()
conn.scheduleSending()
Eventually(done).Should(BeClosed())
})
It("adds a BLOCKED frame when it is connection-level flow control blocked", func() {
conn.handshakeConfirmed = true
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
conn.sentPacketHandler = sph
fc := mocks.NewMockConnectionFlowController(mockCtrl)
fc.EXPECT().IsNewlyBlocked().Return(true, protocol.ByteCount(1337))
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 13}, []byte("foobar"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack).AnyTimes()
conn.connFlowController = fc
runConn()
sent := make(chan struct{})
sender.EXPECT().Send(gomock.Any(), gomock.Any()).Do(func(*packetBuffer, protocol.ByteCount) { close(sent) })
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), gomock.Any(), nil, []logging.Frame{})
conn.scheduleSending()
Eventually(sent).Should(BeClosed())
frames, _ := conn.framer.AppendControlFrames(nil, 1000, protocol.Version1)
Expect(frames).To(Equal([]ackhandler.Frame{{Frame: &logging.DataBlockedFrame{MaximumData: 1337}}}))
})
It("doesn't send when the SentPacketHandler doesn't allow it", func() {
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendNone).AnyTimes()
sph.EXPECT().TimeUntilSend().AnyTimes()
conn.sentPacketHandler = sph
runConn()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond)
})
for _, enc := range []protocol.EncryptionLevel{protocol.EncryptionInitial, protocol.EncryptionHandshake, protocol.Encryption1RTT} {
encLevel := enc
Context(fmt.Sprintf("sending %s probe packets", encLevel), func() {
var sendMode ackhandler.SendMode
var getFrame func(protocol.ByteCount, protocol.VersionNumber) wire.Frame
BeforeEach(func() {
//nolint:exhaustive
switch encLevel {
case protocol.EncryptionInitial:
sendMode = ackhandler.SendPTOInitial
getFrame = conn.retransmissionQueue.GetInitialFrame
case protocol.EncryptionHandshake:
sendMode = ackhandler.SendPTOHandshake
getFrame = conn.retransmissionQueue.GetHandshakeFrame
case protocol.Encryption1RTT:
sendMode = ackhandler.SendPTOAppData
getFrame = conn.retransmissionQueue.GetAppDataFrame
}
})
It("sends a probe packet", func() {
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(sendMode)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendNone)
sph.EXPECT().QueueProbePacket(encLevel)
p := getCoalescedPacket(123, enc != protocol.Encryption1RTT)
packer.EXPECT().MaybePackProbePacket(encLevel, gomock.Any(), conn.version).Return(p, nil)
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(_ time.Time, pn, _ protocol.PacketNumber, _ []ackhandler.StreamFrame, _ []ackhandler.Frame, _ protocol.EncryptionLevel, _ protocol.ByteCount, _ bool) {
Expect(pn).To(Equal(protocol.PacketNumber(123)))
})
conn.sentPacketHandler = sph
runConn()
sent := make(chan struct{})
sender.EXPECT().Send(gomock.Any(), gomock.Any()).Do(func(*packetBuffer, protocol.ByteCount) { close(sent) })
if enc == protocol.Encryption1RTT {
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), p.shortHdrPacket.Length, gomock.Any(), gomock.Any())
} else {
tracer.EXPECT().SentLongHeaderPacket(gomock.Any(), p.longHdrPackets[0].length, gomock.Any(), gomock.Any())
}
conn.scheduleSending()
Eventually(sent).Should(BeClosed())
})
It("sends a PING as a probe packet", func() {
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(sendMode)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendNone)
sph.EXPECT().QueueProbePacket(encLevel).Return(false)
p := getCoalescedPacket(123, enc != protocol.Encryption1RTT)
packer.EXPECT().MaybePackProbePacket(encLevel, gomock.Any(), conn.version).Return(p, nil)
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(_ time.Time, pn, _ protocol.PacketNumber, _ []ackhandler.StreamFrame, _ []ackhandler.Frame, _ protocol.EncryptionLevel, _ protocol.ByteCount, _ bool) {
Expect(pn).To(Equal(protocol.PacketNumber(123)))
})
conn.sentPacketHandler = sph
runConn()
sent := make(chan struct{})
sender.EXPECT().Send(gomock.Any(), gomock.Any()).Do(func(*packetBuffer, protocol.ByteCount) { close(sent) })
if enc == protocol.Encryption1RTT {
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), p.shortHdrPacket.Length, gomock.Any(), gomock.Any())
} else {
tracer.EXPECT().SentLongHeaderPacket(gomock.Any(), p.longHdrPackets[0].length, gomock.Any(), gomock.Any())
}
conn.scheduleSending()
Eventually(sent).Should(BeClosed())
// We're using a mock packet packer in this test.
// We therefore need to test separately that the PING was actually queued.
Expect(getFrame(1000, protocol.Version1)).To(BeAssignableToTypeOf(&wire.PingFrame{}))
})
})
}
})
Context("packet pacing", func() {
var (
sph *mockackhandler.MockSentPacketHandler
sender *MockSender
)
BeforeEach(func() {
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
sph = mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
conn.handshakeConfirmed = true
conn.handshakeComplete = true
conn.sentPacketHandler = sph
sender = NewMockSender(mockCtrl)
sender.EXPECT().Run()
conn.sendQueue = sender
streamManager.EXPECT().CloseWithError(gomock.Any())
})
AfterEach(func() {
// make the go routine return
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
sender.EXPECT().Close()
conn.shutdown()
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("sends multiple packets one by one immediately", func() {
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(2)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).Times(2)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendPacingLimited)
sph.EXPECT().TimeUntilSend().Return(time.Now().Add(time.Hour))
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 10}, []byte("packet10"))
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 11}, []byte("packet11"))
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any()).Do(func(b *packetBuffer, _ protocol.ByteCount) {
Expect(b.Data).To(Equal([]byte("packet10")))
})
sender.EXPECT().Send(gomock.Any(), gomock.Any()).Do(func(b *packetBuffer, _ protocol.ByteCount) {
Expect(b.Data).To(Equal([]byte("packet11")))
})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond) // make sure that only 2 packets are sent
})
It("sends multiple packets one by one immediately, with GSO", func() {
enableGSO()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(2)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).Times(3)
payload1 := make([]byte, conn.mtuDiscoverer.CurrentSize())
rand.Read(payload1)
payload2 := make([]byte, conn.mtuDiscoverer.CurrentSize())
rand.Read(payload2)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 10}, payload1)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 11}, payload2)
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), gomock.Any()).Return(shortHeaderPacket{}, errNothingToPack)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), conn.mtuDiscoverer.CurrentSize()).Do(func(b *packetBuffer, l protocol.ByteCount) {
Expect(b.Data).To(Equal(append(payload1, payload2...)))
})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond) // make sure that only 2 packets are sent
})
It("stops appending packets when a smaller packet is packed, with GSO", func() {
enableGSO()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(2)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).Times(2)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendNone)
payload1 := make([]byte, conn.mtuDiscoverer.CurrentSize())
rand.Read(payload1)
payload2 := make([]byte, conn.mtuDiscoverer.CurrentSize()-1)
rand.Read(payload2)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 10}, payload1)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 11}, payload2)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), conn.mtuDiscoverer.CurrentSize()).Do(func(b *packetBuffer, l protocol.ByteCount) {
Expect(b.Data).To(Equal(append(payload1, payload2...)))
})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond) // make sure that only 2 packets are sent
})
It("sends multiple packets, when the pacer allows immediate sending", func() {
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).Times(2)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 10}, []byte("packet10"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any())
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond) // make sure that only 1 packet is sent
})
It("allows an ACK to be sent when pacing limited", func() {
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
sph.EXPECT().TimeUntilSend().Return(time.Now().Add(time.Hour))
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendPacingLimited)
packer.EXPECT().PackAckOnlyPacket(gomock.Any(), conn.version).Return(shortHeaderPacket{PacketNumber: 123}, getPacketBuffer(), nil)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any())
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond) // make sure that only 1 packet is sent
})
// when becoming congestion limited, at some point the SendMode will change from SendAny to SendAck
// we shouldn't send the ACK in the same run
It("doesn't send an ACK right after becoming congestion limited", func() {
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAck)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 100}, []byte("packet100"))
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any())
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond) // make sure that only 1 packet is sent
})
It("paces packets", func() {
pacingDelay := scaleDuration(100 * time.Millisecond)
gomock.InOrder(
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny),
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 100}, []byte("packet100")),
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendPacingLimited),
sph.EXPECT().TimeUntilSend().Return(time.Now().Add(pacingDelay)),
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny),
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 101}, []byte("packet101")),
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendPacingLimited),
sph.EXPECT().TimeUntilSend().Return(time.Now().Add(time.Hour)),
)
written := make(chan struct{}, 2)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any()).DoAndReturn(func(*packetBuffer, protocol.ByteCount) { written <- struct{}{} }).Times(2)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
Eventually(written).Should(HaveLen(1))
Consistently(written, pacingDelay/2).Should(HaveLen(1))
Eventually(written, 2*pacingDelay).Should(HaveLen(2))
})
It("sends multiple packets at once", func() {
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).Times(3)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendPacingLimited)
sph.EXPECT().TimeUntilSend().Return(time.Now().Add(time.Hour))
for pn := protocol.PacketNumber(1000); pn < 1003; pn++ {
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: pn}, []byte("packet"))
}
written := make(chan struct{}, 3)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any()).DoAndReturn(func(*packetBuffer, protocol.ByteCount) { written <- struct{}{} }).Times(3)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
Eventually(written).Should(HaveLen(3))
})
for _, withGSO := range []bool{false, true} {
withGSO := withGSO
It(fmt.Sprintf("doesn't try to send if the send queue is full: %t", withGSO), func() {
if withGSO {
enableGSO()
}
available := make(chan struct{}, 1)
sender.EXPECT().WouldBlock().Return(true)
sender.EXPECT().Available().Return(available)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
time.Sleep(scaleDuration(50 * time.Millisecond))
written := make(chan struct{})
sender.EXPECT().WouldBlock().AnyTimes()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 1000}, []byte("packet1000"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack)
sender.EXPECT().Send(gomock.Any(), gomock.Any()).DoAndReturn(func(*packetBuffer, protocol.ByteCount) { close(written) })
available <- struct{}{}
Eventually(written).Should(BeClosed())
})
}
It("stops sending when there are new packets to receive", func() {
sender.EXPECT().WouldBlock().AnyTimes()
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
written := make(chan struct{})
sender.EXPECT().WouldBlock().AnyTimes()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(time.Time, protocol.PacketNumber, protocol.PacketNumber, []ackhandler.StreamFrame, []ackhandler.Frame, protocol.EncryptionLevel, protocol.ByteCount, bool) {
sph.EXPECT().ReceivedBytes(gomock.Any())
conn.handlePacket(receivedPacket{buffer: getPacketBuffer()})
})
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 10}, []byte("packet10"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack)
sender.EXPECT().Send(gomock.Any(), gomock.Any()).DoAndReturn(func(*packetBuffer, protocol.ByteCount) { close(written) })
conn.scheduleSending()
time.Sleep(scaleDuration(50 * time.Millisecond))
Eventually(written).Should(BeClosed())
})
It("stops sending when the send queue is full", func() {
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 1000}, []byte("packet1000"))
written := make(chan struct{}, 1)
sender.EXPECT().WouldBlock()
sender.EXPECT().WouldBlock().Return(true).Times(2)
sender.EXPECT().Send(gomock.Any(), gomock.Any()).DoAndReturn(func(*packetBuffer, protocol.ByteCount) { written <- struct{}{} })
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
available := make(chan struct{}, 1)
sender.EXPECT().Available().Return(available)
conn.scheduleSending()
Eventually(written).Should(Receive())
time.Sleep(scaleDuration(50 * time.Millisecond))
// now make room in the send queue
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sender.EXPECT().WouldBlock().AnyTimes()
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 1001}, []byte("packet1001"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack)
sender.EXPECT().Send(gomock.Any(), gomock.Any()).DoAndReturn(func(*packetBuffer, protocol.ByteCount) { written <- struct{}{} })
available <- struct{}{}
Eventually(written).Should(Receive())
// The send queue is not full any more. Sending on the available channel should have no effect.
available <- struct{}{}
time.Sleep(scaleDuration(50 * time.Millisecond))
})
It("doesn't set a pacing timer when there is no data to send", func() {
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sender.EXPECT().WouldBlock().AnyTimes()
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack)
// don't EXPECT any calls to mconn.Write()
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending() // no packet will get sent
time.Sleep(50 * time.Millisecond)
})
It("sends a Path MTU probe packet", func() {
mtuDiscoverer := NewMockMTUDiscoverer(mockCtrl)
conn.mtuDiscoverer = mtuDiscoverer
conn.config.DisablePathMTUDiscovery = false
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendNone)
written := make(chan struct{}, 1)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any()).DoAndReturn(func(*packetBuffer, protocol.ByteCount) { written <- struct{}{} })
mtuDiscoverer.EXPECT().ShouldSendProbe(gomock.Any()).Return(true)
ping := ackhandler.Frame{Frame: &wire.PingFrame{}}
mtuDiscoverer.EXPECT().GetPing().Return(ping, protocol.ByteCount(1234))
packer.EXPECT().PackMTUProbePacket(ping, protocol.ByteCount(1234), conn.version).Return(shortHeaderPacket{PacketNumber: 1}, getPacketBuffer(), nil)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
Eventually(written).Should(Receive())
mtuDiscoverer.EXPECT().CurrentSize().Return(protocol.ByteCount(1234))
})
})
Context("scheduling sending", func() {
var sender *MockSender
BeforeEach(func() {
sender = NewMockSender(mockCtrl)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Run()
conn.sendQueue = sender
conn.handshakeConfirmed = true
})
AfterEach(func() {
// make the go routine return
expectReplaceWithClosed()
streamManager.EXPECT().CloseWithError(gomock.Any())
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
sender.EXPECT().Close()
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.shutdown()
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("sends when scheduleSending is called", func() {
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
conn.sentPacketHandler = sph
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 1}, []byte("packet1"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
// don't EXPECT any calls to mconn.Write()
time.Sleep(50 * time.Millisecond)
// only EXPECT calls after scheduleSending is called
written := make(chan struct{})
sender.EXPECT().Send(gomock.Any(), gomock.Any()).Do(func(*packetBuffer, protocol.ByteCount) { close(written) })
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
conn.scheduleSending()
Eventually(written).Should(BeClosed())
})
It("sets the timer to the ack timer", func() {
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 1234}, []byte("packet1234"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack)
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(_ time.Time, pn, _ protocol.PacketNumber, _ []ackhandler.StreamFrame, _ []ackhandler.Frame, _ protocol.EncryptionLevel, _ protocol.ByteCount, _ bool) {
Expect(pn).To(Equal(protocol.PacketNumber(1234)))
})
conn.sentPacketHandler = sph
rph := mockackhandler.NewMockReceivedPacketHandler(mockCtrl)
rph.EXPECT().GetAlarmTimeout().Return(time.Now().Add(10 * time.Millisecond))
// make the run loop wait
rph.EXPECT().GetAlarmTimeout().Return(time.Now().Add(time.Hour)).MaxTimes(1)
conn.receivedPacketHandler = rph
written := make(chan struct{})
sender.EXPECT().Send(gomock.Any(), gomock.Any()).Do(func(*packetBuffer, protocol.ByteCount) { close(written) })
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
Eventually(written).Should(BeClosed())
})
})
It("sends coalesced packets before the handshake is confirmed", func() {
conn.handshakeComplete = false
conn.handshakeConfirmed = false
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
conn.sentPacketHandler = sph
buffer := getPacketBuffer()
buffer.Data = append(buffer.Data, []byte("foobar")...)
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).Return(&coalescedPacket{
buffer: buffer,
longHdrPackets: []*longHeaderPacket{
{
header: &wire.ExtendedHeader{
Header: wire.Header{Type: protocol.PacketTypeInitial},
PacketNumber: 13,
},
length: 123,
},
{
header: &wire.ExtendedHeader{
Header: wire.Header{Type: protocol.PacketTypeHandshake},
PacketNumber: 37,
},
length: 1234,
},
},
}, nil)
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).AnyTimes()
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().TimeUntilSend().Return(time.Now()).AnyTimes()
gomock.InOrder(
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(_ time.Time, pn, _ protocol.PacketNumber, _ []ackhandler.StreamFrame, _ []ackhandler.Frame, encLevel protocol.EncryptionLevel, size protocol.ByteCount, _ bool) {
Expect(encLevel).To(Equal(protocol.EncryptionInitial))
Expect(pn).To(Equal(protocol.PacketNumber(13)))
Expect(size).To(BeEquivalentTo(123))
}),
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(_ time.Time, pn, _ protocol.PacketNumber, _ []ackhandler.StreamFrame, _ []ackhandler.Frame, encLevel protocol.EncryptionLevel, size protocol.ByteCount, _ bool) {
Expect(encLevel).To(Equal(protocol.EncryptionHandshake))
Expect(pn).To(Equal(protocol.PacketNumber(37)))
Expect(size).To(BeEquivalentTo(1234))
}),
)
gomock.InOrder(
tracer.EXPECT().SentLongHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(hdr *wire.ExtendedHeader, _ protocol.ByteCount, _ *wire.AckFrame, _ []logging.Frame) {
Expect(hdr.Type).To(Equal(protocol.PacketTypeInitial))
}),
tracer.EXPECT().SentLongHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(hdr *wire.ExtendedHeader, _ protocol.ByteCount, _ *wire.AckFrame, _ []logging.Frame) {
Expect(hdr.Type).To(Equal(protocol.PacketTypeHandshake))
}),
)
sent := make(chan struct{})
mconn.EXPECT().Write([]byte("foobar"), protocol.ByteCount(6)).Do(func([]byte, protocol.ByteCount) { close(sent) })
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
Eventually(sent).Should(BeClosed())
// make sure the go routine returns
streamManager.EXPECT().CloseWithError(gomock.Any())
expectReplaceWithClosed()
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.shutdown()
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("cancels the HandshakeComplete context when the handshake completes", func() {
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).AnyTimes()
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
conn.sentPacketHandler = sph
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionHandshake)
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).AnyTimes()
sph.EXPECT().DropPackets(protocol.EncryptionHandshake)
sph.EXPECT().SetHandshakeConfirmed()
connRunner.EXPECT().Retire(clientDestConnID)
cryptoSetup.EXPECT().SetHandshakeConfirmed()
cryptoSetup.EXPECT().GetSessionTicket()
handshakeCtx := conn.HandshakeComplete()
Consistently(handshakeCtx).ShouldNot(BeClosed())
Expect(conn.handleHandshakeComplete()).To(Succeed())
Eventually(handshakeCtx).Should(BeClosed())
})
It("sends a session ticket when the handshake completes", func() {
const size = protocol.MaxPostHandshakeCryptoFrameSize * 3 / 2
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).AnyTimes()
connRunner.EXPECT().Retire(clientDestConnID)
conn.sentPacketHandler.DropPackets(protocol.EncryptionInitial)
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionHandshake)
cryptoSetup.EXPECT().SetHandshakeConfirmed()
cryptoSetup.EXPECT().GetSessionTicket().Return(make([]byte, size), nil)
handshakeCtx := conn.HandshakeComplete()
Consistently(handshakeCtx).ShouldNot(BeClosed())
Expect(conn.handleHandshakeComplete()).To(Succeed())
var frames []ackhandler.Frame
Eventually(func() []ackhandler.Frame {
frames, _ = conn.framer.AppendControlFrames(nil, protocol.MaxByteCount, protocol.Version1)
return frames
}).ShouldNot(BeEmpty())
var count int
var s int
for _, f := range frames {
if cf, ok := f.Frame.(*wire.CryptoFrame); ok {
count++
s += len(cf.Data)
Expect(f.Frame.Length(conn.version)).To(BeNumerically("<=", protocol.MaxPostHandshakeCryptoFrameSize))
}
}
Expect(size).To(BeEquivalentTo(s))
})
It("doesn't cancel the HandshakeComplete context when the handshake fails", func() {
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).AnyTimes()
streamManager.EXPECT().CloseWithError(gomock.Any())
expectReplaceWithClosed()
packer.EXPECT().PackConnectionClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
cryptoSetup.EXPECT().Close()
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake()
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
handshakeCtx := conn.HandshakeComplete()
Consistently(handshakeCtx).ShouldNot(BeClosed())
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
conn.closeLocal(errors.New("handshake error"))
Consistently(handshakeCtx).ShouldNot(BeClosed())
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("sends a HANDSHAKE_DONE frame when the handshake completes", func() {
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().SetHandshakeConfirmed()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
conn.sentPacketHandler = sph
done := make(chan struct{})
connRunner.EXPECT().Retire(clientDestConnID)
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).DoAndReturn(func(_ *packetBuffer, _ protocol.ByteCount, v protocol.VersionNumber) (shortHeaderPacket, error) {
frames, _ := conn.framer.AppendControlFrames(nil, protocol.MaxByteCount, v)
Expect(frames).ToNot(BeEmpty())
Expect(frames[0].Frame).To(BeEquivalentTo(&wire.HandshakeDoneFrame{}))
defer close(done)
return shortHeaderPacket{}, nil
})
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack).AnyTimes()
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionHandshake)
sph.EXPECT().DropPackets(protocol.EncryptionHandshake)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake()
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventHandshakeComplete})
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
cryptoSetup.EXPECT().SetHandshakeConfirmed()
cryptoSetup.EXPECT().GetSessionTicket()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
Expect(conn.handleHandshakeComplete()).To(Succeed())
conn.run()
}()
Eventually(done).Should(BeClosed())
// make sure the go routine returns
streamManager.EXPECT().CloseWithError(gomock.Any())
expectReplaceWithClosed()
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
cryptoSetup.EXPECT().Close()
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.shutdown()
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("doesn't return a run error when closing", func() {
done := make(chan struct{})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
Expect(conn.run()).To(Succeed())
close(done)
}()
streamManager.EXPECT().CloseWithError(gomock.Any())
expectReplaceWithClosed()
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.shutdown()
Eventually(done).Should(BeClosed())
Expect(context.Cause(conn.Context())).To(MatchError(context.Canceled))
})
It("passes errors to the connection runner", func() {
testErr := errors.New("handshake error")
expectedErr := &qerr.ApplicationError{
ErrorCode: 0x1337,
ErrorMessage: testErr.Error(),
}
done := make(chan struct{})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
err := conn.run()
Expect(err).To(MatchError(expectedErr))
close(done)
}()
streamManager.EXPECT().CloseWithError(gomock.Any())
expectReplaceWithClosed()
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
Expect(conn.CloseWithError(0x1337, testErr.Error())).To(Succeed())
Eventually(done).Should(BeClosed())
Expect(context.Cause(conn.Context())).To(MatchError(expectedErr))
})
Context("transport parameters", func() {
It("processes transport parameters received from the client", func() {
params := &wire.TransportParameters{
MaxIdleTimeout: 90 * time.Second,
InitialMaxStreamDataBidiLocal: 0x5000,
InitialMaxData: 0x5000,
ActiveConnectionIDLimit: 3,
// marshaling always sets it to this value
MaxUDPPayloadSize: protocol.MaxPacketBufferSize,
InitialSourceConnectionID: destConnID,
}
streamManager.EXPECT().UpdateLimits(params)
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).MaxTimes(3)
Expect(conn.earlyConnReady()).ToNot(BeClosed())
tracer.EXPECT().ReceivedTransportParameters(params)
conn.handleTransportParameters(params)
Expect(conn.earlyConnReady()).To(BeClosed())
})
})
Context("keep-alives", func() {
setRemoteIdleTimeout := func(t time.Duration) {
streamManager.EXPECT().UpdateLimits(gomock.Any())
tracer.EXPECT().ReceivedTransportParameters(gomock.Any())
conn.handleTransportParameters(&wire.TransportParameters{
MaxIdleTimeout: t,
InitialSourceConnectionID: destConnID,
})
}
runConn := func() {
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
}
BeforeEach(func() {
conn.config.MaxIdleTimeout = 30 * time.Second
conn.config.KeepAlivePeriod = 15 * time.Second
conn.receivedPacketHandler.ReceivedPacket(0, protocol.ECNNon, protocol.EncryptionHandshake, time.Now(), true)
})
AfterEach(func() {
// make the go routine return
expectReplaceWithClosed()
streamManager.EXPECT().CloseWithError(gomock.Any())
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.shutdown()
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("sends a PING as a keep-alive after half the idle timeout", func() {
setRemoteIdleTimeout(5 * time.Second)
conn.lastPacketReceivedTime = time.Now().Add(-5 * time.Second / 2)
sent := make(chan struct{})
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).Do(func(bool, protocol.ByteCount, protocol.VersionNumber) (*coalescedPacket, error) {
close(sent)
return nil, nil
})
runConn()
Eventually(sent).Should(BeClosed())
})
It("sends a PING after a maximum of protocol.MaxKeepAliveInterval", func() {
conn.config.MaxIdleTimeout = time.Hour
setRemoteIdleTimeout(time.Hour)
conn.lastPacketReceivedTime = time.Now().Add(-protocol.MaxKeepAliveInterval).Add(-time.Millisecond)
sent := make(chan struct{})
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).Do(func(bool, protocol.ByteCount, protocol.VersionNumber) (*coalescedPacket, error) {
close(sent)
return nil, nil
})
runConn()
Eventually(sent).Should(BeClosed())
})
It("doesn't send a PING packet if keep-alive is disabled", func() {
setRemoteIdleTimeout(5 * time.Second)
conn.config.KeepAlivePeriod = 0
conn.lastPacketReceivedTime = time.Now().Add(-time.Second * 5 / 2)
runConn()
// don't EXPECT() any calls to mconn.Write()
time.Sleep(50 * time.Millisecond)
})
It("doesn't send a PING if the handshake isn't completed yet", func() {
conn.config.HandshakeIdleTimeout = time.Hour
conn.handshakeComplete = false
// Needs to be shorter than our idle timeout.
// Otherwise we'll try to send a CONNECTION_CLOSE.
conn.lastPacketReceivedTime = time.Now().Add(-20 * time.Second)
runConn()
// don't EXPECT() any calls to mconn.Write()
time.Sleep(50 * time.Millisecond)
})
It("send PING as keep-alive earliest after 1.5 times the PTO", func() {
conn.config.KeepAlivePeriod = time.Microsecond
pto := conn.rttStats.PTO(true)
conn.lastPacketReceivedTime = time.Now()
sentPingTimeChan := make(chan time.Time)
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).Do(func(bool, protocol.ByteCount, protocol.VersionNumber) (*coalescedPacket, error) {
sentPingTimeChan <- time.Now()
return nil, nil
})
runConn()
sentPingTime := <-sentPingTimeChan
Expect(sentPingTime.Sub(conn.lastPacketReceivedTime)).To(BeNumerically(">", pto*3/2))
})
})
Context("timeouts", func() {
BeforeEach(func() {
streamManager.EXPECT().CloseWithError(gomock.Any())
})
It("times out due to no network activity", func() {
connRunner.EXPECT().Remove(gomock.Any()).Times(2)
conn.lastPacketReceivedTime = time.Now().Add(-time.Hour)
done := make(chan struct{})
cryptoSetup.EXPECT().Close()
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
Expect(e).To(MatchError(&qerr.IdleTimeoutError{}))
}),
tracer.EXPECT().Close(),
)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
err := conn.run()
nerr, ok := err.(net.Error)
Expect(ok).To(BeTrue())
Expect(nerr.Timeout()).To(BeTrue())
Expect(err).To(MatchError(qerr.ErrIdleTimeout))
close(done)
}()
Eventually(done).Should(BeClosed())
})
It("times out due to non-completed handshake", func() {
conn.handshakeComplete = false
conn.creationTime = time.Now().Add(-protocol.DefaultHandshakeTimeout).Add(-time.Second)
connRunner.EXPECT().Remove(gomock.Any()).Times(2)
cryptoSetup.EXPECT().Close()
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
Expect(e).To(MatchError(&HandshakeTimeoutError{}))
}),
tracer.EXPECT().Close(),
)
done := make(chan struct{})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
err := conn.run()
nerr, ok := err.(net.Error)
Expect(ok).To(BeTrue())
Expect(nerr.Timeout()).To(BeTrue())
Expect(err).To(MatchError(qerr.ErrHandshakeTimeout))
close(done)
}()
Eventually(done).Should(BeClosed())
})
It("does not use the idle timeout before the handshake complete", func() {
conn.handshakeComplete = false
conn.config.HandshakeIdleTimeout = 9999 * time.Second
conn.config.MaxIdleTimeout = 9999 * time.Second
conn.lastPacketReceivedTime = time.Now().Add(-time.Minute)
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).DoAndReturn(func(e *qerr.ApplicationError, _ protocol.ByteCount, _ protocol.VersionNumber) (*coalescedPacket, error) {
Expect(e.ErrorCode).To(BeZero())
return &coalescedPacket{buffer: getPacketBuffer()}, nil
})
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
idleTimeout := &IdleTimeoutError{}
handshakeTimeout := &HandshakeTimeoutError{}
Expect(errors.As(e, &idleTimeout)).To(BeFalse())
Expect(errors.As(e, &handshakeTimeout)).To(BeFalse())
}),
tracer.EXPECT().Close(),
)
// the handshake timeout is irrelevant here, since it depends on the time the connection was created,
// and not on the last network activity
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
Consistently(conn.Context().Done()).ShouldNot(BeClosed())
// make the go routine return
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
conn.shutdown()
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("closes the connection due to the idle timeout before handshake", func() {
conn.config.HandshakeIdleTimeout = 0
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).AnyTimes()
connRunner.EXPECT().Remove(gomock.Any()).AnyTimes()
cryptoSetup.EXPECT().Close()
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
Expect(e).To(MatchError(&IdleTimeoutError{}))
}),
tracer.EXPECT().Close(),
)
done := make(chan struct{})
conn.handshakeComplete = false
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
cryptoSetup.EXPECT().GetSessionTicket().MaxTimes(1)
err := conn.run()
nerr, ok := err.(net.Error)
Expect(ok).To(BeTrue())
Expect(nerr.Timeout()).To(BeTrue())
Expect(err).To(MatchError(qerr.ErrIdleTimeout))
close(done)
}()
Eventually(done).Should(BeClosed())
})
It("closes the connection due to the idle timeout after handshake", func() {
conn.sentPacketHandler.DropPackets(protocol.EncryptionInitial)
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).AnyTimes()
gomock.InOrder(
connRunner.EXPECT().Retire(clientDestConnID),
connRunner.EXPECT().Remove(gomock.Any()),
)
cryptoSetup.EXPECT().Close()
gomock.InOrder(
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionHandshake),
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
Expect(e).To(MatchError(&IdleTimeoutError{}))
}),
tracer.EXPECT().Close(),
)
conn.idleTimeout = 0
done := make(chan struct{})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventHandshakeComplete})
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
cryptoSetup.EXPECT().GetSessionTicket().MaxTimes(1)
cryptoSetup.EXPECT().SetHandshakeConfirmed().MaxTimes(1)
Expect(conn.handleHandshakeComplete()).To(Succeed())
err := conn.run()
nerr, ok := err.(net.Error)
Expect(ok).To(BeTrue())
Expect(nerr.Timeout()).To(BeTrue())
Expect(err).To(MatchError(qerr.ErrIdleTimeout))
close(done)
}()
Eventually(done).Should(BeClosed())
})
It("doesn't time out when it just sent a packet", func() {
conn.lastPacketReceivedTime = time.Now().Add(-time.Hour)
conn.firstAckElicitingPacketAfterIdleSentTime = time.Now().Add(-time.Second)
conn.idleTimeout = 30 * time.Second
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
Consistently(conn.Context().Done()).ShouldNot(BeClosed())
// make the go routine return
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.shutdown()
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("times out earliest after 3 times the PTO", func() {
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).AnyTimes()
connRunner.EXPECT().Retire(gomock.Any()).AnyTimes()
connRunner.EXPECT().Remove(gomock.Any()).Times(2)
cryptoSetup.EXPECT().Close()
closeTimeChan := make(chan time.Time)
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
Expect(e).To(MatchError(&IdleTimeoutError{}))
closeTimeChan <- time.Now()
})
tracer.EXPECT().Close()
conn.idleTimeout = time.Millisecond
done := make(chan struct{})
pto := conn.rttStats.PTO(true)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
cryptoSetup.EXPECT().GetSessionTicket().MaxTimes(1)
cryptoSetup.EXPECT().SetHandshakeConfirmed().MaxTimes(1)
conn.run()
close(done)
}()
closeTime := <-closeTimeChan
Expect(closeTime.Sub(conn.lastPacketReceivedTime)).To(BeNumerically(">", pto*3))
Eventually(done).Should(BeClosed())
})
})
It("stores up to MaxConnUnprocessedPackets packets", func() {
done := make(chan struct{})
tracer.EXPECT().DroppedPacket(logging.PacketTypeNotDetermined, logging.ByteCount(6), logging.PacketDropDOSPrevention).Do(func(logging.PacketType, logging.ByteCount, logging.PacketDropReason) {
close(done)
})
// Nothing here should block
for i := protocol.PacketNumber(0); i < protocol.MaxConnUnprocessedPackets+1; i++ {
conn.handlePacket(receivedPacket{data: []byte("foobar")})
}
Eventually(done).Should(BeClosed())
})
Context("getting streams", func() {
It("opens streams", func() {
mstr := NewMockStreamI(mockCtrl)
streamManager.EXPECT().OpenStream().Return(mstr, nil)
str, err := conn.OpenStream()
Expect(err).ToNot(HaveOccurred())
Expect(str).To(Equal(mstr))
})
It("opens streams synchronously", func() {
mstr := NewMockStreamI(mockCtrl)
streamManager.EXPECT().OpenStreamSync(context.Background()).Return(mstr, nil)
str, err := conn.OpenStreamSync(context.Background())
Expect(err).ToNot(HaveOccurred())
Expect(str).To(Equal(mstr))
})
It("opens unidirectional streams", func() {
mstr := NewMockSendStreamI(mockCtrl)
streamManager.EXPECT().OpenUniStream().Return(mstr, nil)
str, err := conn.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
Expect(str).To(Equal(mstr))
})
It("opens unidirectional streams synchronously", func() {
mstr := NewMockSendStreamI(mockCtrl)
streamManager.EXPECT().OpenUniStreamSync(context.Background()).Return(mstr, nil)
str, err := conn.OpenUniStreamSync(context.Background())
Expect(err).ToNot(HaveOccurred())
Expect(str).To(Equal(mstr))
})
It("accepts streams", func() {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
mstr := NewMockStreamI(mockCtrl)
streamManager.EXPECT().AcceptStream(ctx).Return(mstr, nil)
str, err := conn.AcceptStream(ctx)
Expect(err).ToNot(HaveOccurred())
Expect(str).To(Equal(mstr))
})
It("accepts unidirectional streams", func() {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
mstr := NewMockReceiveStreamI(mockCtrl)
streamManager.EXPECT().AcceptUniStream(ctx).Return(mstr, nil)
str, err := conn.AcceptUniStream(ctx)
Expect(err).ToNot(HaveOccurred())
Expect(str).To(Equal(mstr))
})
})
It("returns the local address", func() {
Expect(conn.LocalAddr()).To(Equal(localAddr))
})
It("returns the remote address", func() {
Expect(conn.RemoteAddr()).To(Equal(remoteAddr))
})
})
var _ = Describe("Client Connection", func() {
var (
conn *connection
connRunner *MockConnRunner
packer *MockPacker
mconn *MockSendConn
cryptoSetup *mocks.MockCryptoSetup
tracer *mocklogging.MockConnectionTracer
tlsConf *tls.Config
quicConf *Config
)
srcConnID := protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5, 6, 7, 8})
destConnID := protocol.ParseConnectionID([]byte{8, 7, 6, 5, 4, 3, 2, 1})
getPacket := func(hdr *wire.ExtendedHeader, data []byte) receivedPacket {
b, err := hdr.Append(nil, conn.version)
Expect(err).ToNot(HaveOccurred())
return receivedPacket{
rcvTime: time.Now(),
data: append(b, data...),
buffer: getPacketBuffer(),
}
}
BeforeEach(func() {
quicConf = populateConfig(&Config{})
tlsConf = nil
})
JustBeforeEach(func() {
Eventually(areConnsRunning).Should(BeFalse())
mconn = NewMockSendConn(mockCtrl)
mconn.EXPECT().capabilities().AnyTimes()
mconn.EXPECT().RemoteAddr().Return(&net.UDPAddr{}).AnyTimes()
mconn.EXPECT().LocalAddr().Return(&net.UDPAddr{}).AnyTimes()
mconn.EXPECT().capabilities().AnyTimes()
if tlsConf == nil {
tlsConf = &tls.Config{}
}
connRunner = NewMockConnRunner(mockCtrl)
tracer = mocklogging.NewMockConnectionTracer(mockCtrl)
tracer.EXPECT().NegotiatedVersion(gomock.Any(), gomock.Any(), gomock.Any()).MaxTimes(1)
tracer.EXPECT().SentTransportParameters(gomock.Any())
tracer.EXPECT().UpdatedKeyFromTLS(gomock.Any(), gomock.Any()).AnyTimes()
tracer.EXPECT().UpdatedCongestionState(gomock.Any())
conn = newClientConnection(
mconn,
connRunner,
destConnID,
protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5, 6, 7, 8}),
&protocol.DefaultConnectionIDGenerator{},
quicConf,
tlsConf,
42, // initial packet number
false,
false,
tracer,
1234,
utils.DefaultLogger,
protocol.Version1,
).(*connection)
packer = NewMockPacker(mockCtrl)
conn.packer = packer
cryptoSetup = mocks.NewMockCryptoSetup(mockCtrl)
conn.cryptoStreamHandler = cryptoSetup
conn.sentFirstPacket = true
})
It("changes the connection ID when receiving the first packet from the server", func() {
unpacker := NewMockUnpacker(mockCtrl)
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any(), gomock.Any(), conn.version).DoAndReturn(func(hdr *wire.Header, _ time.Time, data []byte, _ protocol.VersionNumber) (*unpackedPacket, error) {
return &unpackedPacket{
encryptionLevel: protocol.Encryption1RTT,
hdr: &wire.ExtendedHeader{Header: *hdr},
data: []byte{0}, // one PADDING frame
}, nil
})
conn.unpacker = unpacker
done := make(chan struct{})
packer.EXPECT().PackCoalescedPacket(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(onlyAck bool, maxPacketSize protocol.ByteCount, v protocol.VersionNumber) { close(done) })
newConnID := protocol.ParseConnectionID([]byte{1, 3, 3, 7, 1, 3, 3, 7})
p := getPacket(&wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeHandshake,
SrcConnectionID: newConnID,
DestConnectionID: srcConnID,
Length: 2 + 6,
Version: conn.version,
},
PacketNumberLen: protocol.PacketNumberLen2,
}, []byte("foobar"))
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), p.Size(), []logging.Frame{})
Expect(conn.handlePacketImpl(p)).To(BeTrue())
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
Eventually(done).Should(BeClosed())
// make sure the go routine returns
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
cryptoSetup.EXPECT().Close()
connRunner.EXPECT().ReplaceWithClosed([]protocol.ConnectionID{srcConnID}, gomock.Any(), gomock.Any())
mconn.EXPECT().Write(gomock.Any(), gomock.Any()).MaxTimes(1)
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.shutdown()
Eventually(conn.Context().Done()).Should(BeClosed())
time.Sleep(200 * time.Millisecond)
})
It("continues accepting Long Header packets after using a new connection ID", func() {
unpacker := NewMockUnpacker(mockCtrl)
conn.unpacker = unpacker
connRunner.EXPECT().AddResetToken(gomock.Any(), gomock.Any())
conn.connIDManager.SetHandshakeComplete()
conn.handleNewConnectionIDFrame(&wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5}),
})
Expect(conn.connIDManager.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5})))
// now receive a packet with the original source connection ID
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any(), gomock.Any(), conn.version).DoAndReturn(func(hdr *wire.Header, _ time.Time, _ []byte, _ protocol.VersionNumber) (*unpackedPacket, error) {
return &unpackedPacket{
hdr: &wire.ExtendedHeader{Header: *hdr},
data: []byte{0},
encryptionLevel: protocol.EncryptionHandshake,
}, nil
})
hdr := &wire.Header{
Type: protocol.PacketTypeHandshake,
DestConnectionID: srcConnID,
SrcConnectionID: destConnID,
}
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any())
Expect(conn.handleLongHeaderPacket(receivedPacket{buffer: getPacketBuffer()}, hdr)).To(BeTrue())
})
It("handles HANDSHAKE_DONE frames", func() {
conn.peerParams = &wire.TransportParameters{}
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
conn.sentPacketHandler = sph
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionHandshake)
sph.EXPECT().DropPackets(protocol.EncryptionHandshake)
sph.EXPECT().SetHandshakeConfirmed()
cryptoSetup.EXPECT().SetHandshakeConfirmed()
Expect(conn.handleHandshakeDoneFrame()).To(Succeed())
})
It("interprets an ACK for 1-RTT packets as confirmation of the handshake", func() {
conn.peerParams = &wire.TransportParameters{}
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
conn.sentPacketHandler = sph
ack := &wire.AckFrame{AckRanges: []wire.AckRange{{Smallest: 1, Largest: 3}}}
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionHandshake)
sph.EXPECT().ReceivedAck(ack, protocol.Encryption1RTT, gomock.Any()).Return(true, nil)
sph.EXPECT().DropPackets(protocol.EncryptionHandshake)
sph.EXPECT().SetHandshakeConfirmed()
cryptoSetup.EXPECT().SetLargest1RTTAcked(protocol.PacketNumber(3))
cryptoSetup.EXPECT().SetHandshakeConfirmed()
Expect(conn.handleAckFrame(ack, protocol.Encryption1RTT)).To(Succeed())
})
It("doesn't send a CONNECTION_CLOSE when no packet was sent", func() {
conn.sentFirstPacket = false
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
running := make(chan struct{})
cryptoSetup.EXPECT().StartHandshake().Do(func() {
close(running)
conn.closeLocal(errors.New("early error"))
})
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
cryptoSetup.EXPECT().Close()
connRunner.EXPECT().Remove(gomock.Any())
go func() {
defer GinkgoRecover()
conn.run()
}()
Eventually(running).Should(BeClosed())
Eventually(areConnsRunning).Should(BeFalse())
})
Context("handling tokens", func() {
var mockTokenStore *MockTokenStore
BeforeEach(func() {
mockTokenStore = NewMockTokenStore(mockCtrl)
tlsConf = &tls.Config{ServerName: "server"}
quicConf.TokenStore = mockTokenStore
mockTokenStore.EXPECT().Pop(gomock.Any())
quicConf.TokenStore = mockTokenStore
})
It("handles NEW_TOKEN frames", func() {
mockTokenStore.EXPECT().Put("server", &ClientToken{data: []byte("foobar")})
Expect(conn.handleNewTokenFrame(&wire.NewTokenFrame{Token: []byte("foobar")})).To(Succeed())
})
})
Context("handling Version Negotiation", func() {
getVNP := func(versions ...protocol.VersionNumber) receivedPacket {
b := wire.ComposeVersionNegotiation(
protocol.ArbitraryLenConnectionID(srcConnID.Bytes()),
protocol.ArbitraryLenConnectionID(destConnID.Bytes()),
versions,
)
return receivedPacket{
rcvTime: time.Now(),
data: b,
buffer: getPacketBuffer(),
}
}
It("closes and returns the right error", func() {
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
conn.sentPacketHandler = sph
sph.EXPECT().ReceivedBytes(gomock.Any())
sph.EXPECT().PeekPacketNumber(protocol.EncryptionInitial).Return(protocol.PacketNumber(128), protocol.PacketNumberLen4)
conn.config.Versions = []protocol.VersionNumber{1234, 4321}
errChan := make(chan error, 1)
start := make(chan struct{})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().DoAndReturn(func() handshake.Event {
<-start
return handshake.Event{Kind: handshake.EventNoEvent}
})
errChan <- conn.run()
}()
connRunner.EXPECT().Remove(srcConnID)
tracer.EXPECT().ReceivedVersionNegotiationPacket(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(_, _ protocol.ArbitraryLenConnectionID, versions []logging.VersionNumber) {
Expect(versions).To(And(
ContainElement(protocol.VersionNumber(4321)),
ContainElement(protocol.VersionNumber(1337)),
))
})
cryptoSetup.EXPECT().Close()
Expect(conn.handlePacketImpl(getVNP(4321, 1337))).To(BeFalse())
close(start)
var err error
Eventually(errChan).Should(Receive(&err))
Expect(err).To(HaveOccurred())
Expect(err).To(BeAssignableToTypeOf(&errCloseForRecreating{}))
recreateErr := err.(*errCloseForRecreating)
Expect(recreateErr.nextVersion).To(Equal(protocol.VersionNumber(4321)))
Expect(recreateErr.nextPacketNumber).To(Equal(protocol.PacketNumber(128)))
})
It("it closes when no matching version is found", func() {
errChan := make(chan error, 1)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
errChan <- conn.run()
}()
connRunner.EXPECT().Remove(srcConnID).MaxTimes(1)
packer.EXPECT().PackCoalescedPacket(gomock.Any(), gomock.Any(), gomock.Any()).MaxTimes(1)
gomock.InOrder(
tracer.EXPECT().ReceivedVersionNegotiationPacket(gomock.Any(), gomock.Any(), gomock.Any()),
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
var vnErr *VersionNegotiationError
Expect(errors.As(e, &vnErr)).To(BeTrue())
Expect(vnErr.Theirs).To(ContainElement(logging.VersionNumber(12345678)))
}),
tracer.EXPECT().Close(),
)
cryptoSetup.EXPECT().Close()
Expect(conn.handlePacketImpl(getVNP(12345678))).To(BeFalse())
var err error
Eventually(errChan).Should(Receive(&err))
Expect(err).To(HaveOccurred())
Expect(err).ToNot(BeAssignableToTypeOf(errCloseForRecreating{}))
Expect(err.Error()).To(ContainSubstring("no compatible QUIC version found"))
})
It("ignores Version Negotiation packets that offer the current version", func() {
p := getVNP(conn.version)
tracer.EXPECT().DroppedPacket(logging.PacketTypeVersionNegotiation, p.Size(), logging.PacketDropUnexpectedVersion)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
It("ignores unparseable Version Negotiation packets", func() {
p := getVNP(conn.version)
p.data = p.data[:len(p.data)-2]
tracer.EXPECT().DroppedPacket(logging.PacketTypeVersionNegotiation, p.Size(), logging.PacketDropHeaderParseError)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
})
Context("handling Retry", func() {
origDestConnID := protocol.ParseConnectionID([]byte{8, 7, 6, 5, 4, 3, 2, 1})
var retryHdr *wire.ExtendedHeader
JustBeforeEach(func() {
retryHdr = &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeRetry,
SrcConnectionID: protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef}),
DestConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5, 6, 7, 8}),
Token: []byte("foobar"),
Version: conn.version,
},
}
})
getRetryTag := func(hdr *wire.ExtendedHeader) []byte {
b, err := hdr.Append(nil, conn.version)
Expect(err).ToNot(HaveOccurred())
return handshake.GetRetryIntegrityTag(b, origDestConnID, hdr.Version)[:]
}
It("handles Retry packets", func() {
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
conn.sentPacketHandler = sph
sph.EXPECT().ResetForRetry()
sph.EXPECT().ReceivedBytes(gomock.Any())
cryptoSetup.EXPECT().ChangeConnectionID(protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef}))
packer.EXPECT().SetToken([]byte("foobar"))
tracer.EXPECT().ReceivedRetry(gomock.Any()).Do(func(hdr *wire.Header) {
Expect(hdr.DestConnectionID).To(Equal(retryHdr.DestConnectionID))
Expect(hdr.SrcConnectionID).To(Equal(retryHdr.SrcConnectionID))
Expect(hdr.Token).To(Equal(retryHdr.Token))
})
Expect(conn.handlePacketImpl(getPacket(retryHdr, getRetryTag(retryHdr)))).To(BeTrue())
})
It("ignores Retry packets after receiving a regular packet", func() {
conn.receivedFirstPacket = true
p := getPacket(retryHdr, getRetryTag(retryHdr))
tracer.EXPECT().DroppedPacket(logging.PacketTypeRetry, p.Size(), logging.PacketDropUnexpectedPacket)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
It("ignores Retry packets if the server didn't change the connection ID", func() {
retryHdr.SrcConnectionID = destConnID
p := getPacket(retryHdr, getRetryTag(retryHdr))
tracer.EXPECT().DroppedPacket(logging.PacketTypeRetry, p.Size(), logging.PacketDropUnexpectedPacket)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
It("ignores Retry packets with the a wrong Integrity tag", func() {
tag := getRetryTag(retryHdr)
tag[0]++
p := getPacket(retryHdr, tag)
tracer.EXPECT().DroppedPacket(logging.PacketTypeRetry, p.Size(), logging.PacketDropPayloadDecryptError)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
})
Context("transport parameters", func() {
var (
closed bool
errChan chan error
paramsChan chan *wire.TransportParameters
)
JustBeforeEach(func() {
errChan = make(chan error, 1)
paramsChan = make(chan *wire.TransportParameters, 1)
closed = false
packer.EXPECT().PackCoalescedPacket(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake().MaxTimes(1)
// This is not 100% what would happen in reality.
// The run loop calls NextEvent once when it starts up (to send out the ClientHello),
// and then again every time a CRYPTO frame is handled.
// Injecting a CRYPTO frame is not straightforward though,
// so we inject the transport parameters on the first call to NextEvent.
params := <-paramsChan
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{
Kind: handshake.EventReceivedTransportParameters,
TransportParameters: params,
})
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventHandshakeComplete}).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent}).MaxTimes(1).Do(func() {
defer GinkgoRecover()
Expect(conn.handleHandshakeComplete()).To(Succeed())
})
errChan <- conn.run()
close(errChan)
}()
})
expectClose := func(applicationClose, errored bool) {
if !closed && !errored {
connRunner.EXPECT().ReplaceWithClosed(gomock.Any(), gomock.Any(), gomock.Any())
if applicationClose {
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil).MaxTimes(1)
} else {
packer.EXPECT().PackConnectionClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil).MaxTimes(1)
}
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any())
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()),
tracer.EXPECT().Close(),
)
}
closed = true
}
AfterEach(func() {
conn.shutdown()
Eventually(conn.Context().Done()).Should(BeClosed())
Eventually(errChan).Should(BeClosed())
})
It("uses the preferred_address connection ID", func() {
params := &wire.TransportParameters{
OriginalDestinationConnectionID: destConnID,
InitialSourceConnectionID: destConnID,
PreferredAddress: &wire.PreferredAddress{
IPv4: net.IPv4(127, 0, 0, 1),
IPv6: net.IP{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1},
},
}
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).MaxTimes(1)
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
paramsChan <- params
Eventually(processed).Should(BeClosed())
// make sure the connection ID is not retired
cf, _ := conn.framer.AppendControlFrames(nil, protocol.MaxByteCount, protocol.Version1)
Expect(cf).To(BeEmpty())
connRunner.EXPECT().AddResetToken(protocol.StatelessResetToken{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}, conn)
Expect(conn.connIDManager.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4})))
// shut down
connRunner.EXPECT().RemoveResetToken(protocol.StatelessResetToken{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
expectClose(true, false)
})
It("uses the minimum of the peers' idle timeouts", func() {
conn.config.MaxIdleTimeout = 19 * time.Second
params := &wire.TransportParameters{
OriginalDestinationConnectionID: destConnID,
InitialSourceConnectionID: destConnID,
MaxIdleTimeout: 18 * time.Second,
}
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
paramsChan <- params
Eventually(processed).Should(BeClosed())
// close first
expectClose(true, false)
conn.shutdown()
// then check. Avoids race condition when accessing idleTimeout
Expect(conn.idleTimeout).To(Equal(18 * time.Second))
})
It("errors if the transport parameters contain a wrong initial_source_connection_id", func() {
conn.handshakeDestConnID = protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef})
params := &wire.TransportParameters{
OriginalDestinationConnectionID: destConnID,
InitialSourceConnectionID: protocol.ParseConnectionID([]byte{0xde, 0xca, 0xfb, 0xad}),
StatelessResetToken: &protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
}
expectClose(false, true)
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
paramsChan <- params
Eventually(processed).Should(BeClosed())
Eventually(errChan).Should(Receive(MatchError(&qerr.TransportError{
ErrorCode: qerr.TransportParameterError,
ErrorMessage: "expected initial_source_connection_id to equal deadbeef, is decafbad",
})))
})
It("errors if the transport parameters don't contain the retry_source_connection_id, if a Retry was performed", func() {
rcid := protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef})
conn.retrySrcConnID = &rcid
params := &wire.TransportParameters{
OriginalDestinationConnectionID: destConnID,
InitialSourceConnectionID: destConnID,
StatelessResetToken: &protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
}
expectClose(false, true)
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
paramsChan <- params
Eventually(processed).Should(BeClosed())
Eventually(errChan).Should(Receive(MatchError(&qerr.TransportError{
ErrorCode: qerr.TransportParameterError,
ErrorMessage: "missing retry_source_connection_id",
})))
})
It("errors if the transport parameters contain the wrong retry_source_connection_id, if a Retry was performed", func() {
rcid := protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef})
rcid2 := protocol.ParseConnectionID([]byte{0xde, 0xad, 0xc0, 0xde})
conn.retrySrcConnID = &rcid
params := &wire.TransportParameters{
OriginalDestinationConnectionID: destConnID,
InitialSourceConnectionID: destConnID,
RetrySourceConnectionID: &rcid2,
StatelessResetToken: &protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
}
expectClose(false, true)
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
paramsChan <- params
Eventually(processed).Should(BeClosed())
Eventually(errChan).Should(Receive(MatchError(&qerr.TransportError{
ErrorCode: qerr.TransportParameterError,
ErrorMessage: "expected retry_source_connection_id to equal deadbeef, is deadc0de",
})))
})
It("errors if the transport parameters contain the retry_source_connection_id, if no Retry was performed", func() {
rcid := protocol.ParseConnectionID([]byte{0xde, 0xad, 0xc0, 0xde})
params := &wire.TransportParameters{
OriginalDestinationConnectionID: destConnID,
InitialSourceConnectionID: destConnID,
RetrySourceConnectionID: &rcid,
StatelessResetToken: &protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
}
expectClose(false, true)
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
paramsChan <- params
Eventually(processed).Should(BeClosed())
Eventually(errChan).Should(Receive(MatchError(&qerr.TransportError{
ErrorCode: qerr.TransportParameterError,
ErrorMessage: "received retry_source_connection_id, although no Retry was performed",
})))
})
It("errors if the transport parameters contain a wrong original_destination_connection_id", func() {
conn.origDestConnID = protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef})
params := &wire.TransportParameters{
OriginalDestinationConnectionID: protocol.ParseConnectionID([]byte{0xde, 0xca, 0xfb, 0xad}),
InitialSourceConnectionID: conn.handshakeDestConnID,
StatelessResetToken: &protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
}
expectClose(false, true)
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
paramsChan <- params
Eventually(processed).Should(BeClosed())
Eventually(errChan).Should(Receive(MatchError(&qerr.TransportError{
ErrorCode: qerr.TransportParameterError,
ErrorMessage: "expected original_destination_connection_id to equal deadbeef, is decafbad",
})))
})
It("errors if the transport parameters contain reduced limits after knowing 0-RTT data is accepted by the server", func() {
conn.perspective = protocol.PerspectiveClient
conn.peerParams = &wire.TransportParameters{
ActiveConnectionIDLimit: 3,
InitialMaxData: 0x5000,
InitialMaxStreamDataBidiLocal: 0x5000,
InitialMaxStreamDataBidiRemote: 1000,
InitialMaxStreamDataUni: 1000,
MaxBidiStreamNum: 500,
MaxUniStreamNum: 500,
}
params := &wire.TransportParameters{
OriginalDestinationConnectionID: destConnID,
InitialSourceConnectionID: destConnID,
ActiveConnectionIDLimit: 3,
InitialMaxData: 0x5000,
InitialMaxStreamDataBidiLocal: 0x5000,
InitialMaxStreamDataBidiRemote: 1000,
InitialMaxStreamDataUni: 1000,
MaxBidiStreamNum: 300,
MaxUniStreamNum: 300,
}
expectClose(false, true)
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
cryptoSetup.EXPECT().ConnectionState().Return(handshake.ConnectionState{Used0RTT: true})
paramsChan <- params
Eventually(processed).Should(BeClosed())
Eventually(errChan).Should(Receive(MatchError(&qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "server sent reduced limits after accepting 0-RTT data",
})))
})
})
Context("handling potentially injected packets", func() {
var unpacker *MockUnpacker
getPacket := func(extHdr *wire.ExtendedHeader, data []byte) receivedPacket {
b, err := extHdr.Append(nil, conn.version)
Expect(err).ToNot(HaveOccurred())
return receivedPacket{
data: append(b, data...),
buffer: getPacketBuffer(),
}
}
// Convert an already packed raw packet into a receivedPacket
wrapPacket := func(packet []byte) receivedPacket {
return receivedPacket{
data: packet,
buffer: getPacketBuffer(),
}
}
// Illustrates that attacker may inject an Initial packet with a different
// source connection ID, causing endpoint to ignore a subsequent real Initial packets.
It("ignores Initial packets with a different source connection ID", func() {
// Modified from test "ignores packets with a different source connection ID"
unpacker = NewMockUnpacker(mockCtrl)
conn.unpacker = unpacker
hdr1 := &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeInitial,
DestConnectionID: destConnID,
SrcConnectionID: srcConnID,
Length: 1,
Version: conn.version,
},
PacketNumberLen: protocol.PacketNumberLen1,
PacketNumber: 1,
}
hdr2 := &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeInitial,
DestConnectionID: destConnID,
SrcConnectionID: protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef}),
Length: 1,
Version: conn.version,
},
PacketNumberLen: protocol.PacketNumberLen1,
PacketNumber: 2,
}
Expect(hdr2.SrcConnectionID).ToNot(Equal(srcConnID))
// Send one packet, which might change the connection ID.
// only EXPECT one call to the unpacker
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any(), gomock.Any(), conn.version).Return(&unpackedPacket{
encryptionLevel: protocol.EncryptionInitial,
hdr: hdr1,
data: []byte{0}, // one PADDING frame
}, nil)
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any())
Expect(conn.handlePacketImpl(getPacket(hdr1, nil))).To(BeTrue())
// The next packet has to be ignored, since the source connection ID doesn't match.
tracer.EXPECT().DroppedPacket(gomock.Any(), gomock.Any(), gomock.Any())
Expect(conn.handlePacketImpl(getPacket(hdr2, nil))).To(BeFalse())
})
It("ignores 0-RTT packets", func() {
p := getPacket(&wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketType0RTT,
DestConnectionID: srcConnID,
Length: 2 + 6,
Version: conn.version,
},
PacketNumber: 0x42,
PacketNumberLen: protocol.PacketNumberLen2,
}, []byte("foobar"))
tracer.EXPECT().DroppedPacket(logging.PacketType0RTT, p.Size(), gomock.Any())
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
// Illustrates that an injected Initial with an ACK frame for an unsent packet causes
// the connection to immediately break down
It("fails on Initial-level ACK for unsent packet", func() {
ack := &wire.AckFrame{AckRanges: []wire.AckRange{{Smallest: 2, Largest: 2}}}
initialPacket := testutils.ComposeInitialPacket(destConnID, srcConnID, conn.version, destConnID, []wire.Frame{ack})
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any())
Expect(conn.handlePacketImpl(wrapPacket(initialPacket))).To(BeFalse())
})
// Illustrates that an injected Initial with a CONNECTION_CLOSE frame causes
// the connection to immediately break down
It("fails on Initial-level CONNECTION_CLOSE frame", func() {
connCloseFrame := &wire.ConnectionCloseFrame{
IsApplicationError: true,
ReasonPhrase: "mitm attacker",
}
initialPacket := testutils.ComposeInitialPacket(destConnID, srcConnID, conn.version, destConnID, []wire.Frame{connCloseFrame})
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any())
Expect(conn.handlePacketImpl(wrapPacket(initialPacket))).To(BeTrue())
})
// Illustrates that attacker who injects a Retry packet and changes the connection ID
// can cause subsequent real Initial packets to be ignored
It("ignores Initial packets which use original source id, after accepting a Retry", func() {
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
conn.sentPacketHandler = sph
sph.EXPECT().ReceivedBytes(gomock.Any()).Times(2)
sph.EXPECT().ResetForRetry()
newSrcConnID := protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef})
cryptoSetup.EXPECT().ChangeConnectionID(newSrcConnID)
packer.EXPECT().SetToken([]byte("foobar"))
tracer.EXPECT().ReceivedRetry(gomock.Any())
conn.handlePacketImpl(wrapPacket(testutils.ComposeRetryPacket(newSrcConnID, destConnID, destConnID, []byte("foobar"), conn.version)))
initialPacket := testutils.ComposeInitialPacket(conn.connIDManager.Get(), srcConnID, conn.version, conn.connIDManager.Get(), nil)
tracer.EXPECT().DroppedPacket(gomock.Any(), gomock.Any(), gomock.Any())
Expect(conn.handlePacketImpl(wrapPacket(initialPacket))).To(BeFalse())
})
})
})
golang-github-lucas-clemente-quic-go-0.38.2/connection_timer.go 0000664 0000000 0000000 00000002565 14545452366 0024510 0 ustar 00root root 0000000 0000000 package quic
import (
"time"
"github.com/quic-go/quic-go/internal/utils"
)
var deadlineSendImmediately = time.Time{}.Add(42 * time.Millisecond) // any value > time.Time{} and before time.Now() is fine
type connectionTimer struct {
timer *utils.Timer
last time.Time
}
func newTimer() *connectionTimer {
return &connectionTimer{timer: utils.NewTimer()}
}
func (t *connectionTimer) SetRead() {
if deadline := t.timer.Deadline(); deadline != deadlineSendImmediately {
t.last = deadline
}
t.timer.SetRead()
}
func (t *connectionTimer) Chan() <-chan time.Time {
return t.timer.Chan()
}
// SetTimer resets the timer.
// It makes sure that the deadline is strictly increasing.
// This prevents busy-looping in cases where the timer fires, but we can't actually send out a packet.
// This doesn't apply to the pacing deadline, which can be set multiple times to deadlineSendImmediately.
func (t *connectionTimer) SetTimer(idleTimeoutOrKeepAlive, ackAlarm, lossTime, pacing time.Time) {
deadline := idleTimeoutOrKeepAlive
if !ackAlarm.IsZero() && ackAlarm.Before(deadline) && ackAlarm.After(t.last) {
deadline = ackAlarm
}
if !lossTime.IsZero() && lossTime.Before(deadline) && lossTime.After(t.last) {
deadline = lossTime
}
if !pacing.IsZero() && pacing.Before(deadline) {
deadline = pacing
}
t.timer.Reset(deadline)
}
func (t *connectionTimer) Stop() {
t.timer.Stop()
}
golang-github-lucas-clemente-quic-go-0.38.2/connection_timer_test.go 0000664 0000000 0000000 00000003534 14545452366 0025544 0 ustar 00root root 0000000 0000000 package quic
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func (t *connectionTimer) Deadline() time.Time { return t.timer.Deadline() }
var _ = Describe("Timer", func() {
It("sets an idle timeout", func() {
now := time.Now()
t := newTimer()
t.SetTimer(now.Add(time.Hour), time.Time{}, time.Time{}, time.Time{})
Expect(t.Deadline()).To(Equal(now.Add(time.Hour)))
})
It("sets an ACK timer", func() {
now := time.Now()
t := newTimer()
t.SetTimer(now.Add(time.Hour), now.Add(time.Minute), time.Time{}, time.Time{})
Expect(t.Deadline()).To(Equal(now.Add(time.Minute)))
})
It("sets a loss timer", func() {
now := time.Now()
t := newTimer()
t.SetTimer(now.Add(time.Hour), now.Add(time.Minute), now.Add(time.Second), time.Time{})
Expect(t.Deadline()).To(Equal(now.Add(time.Second)))
})
It("sets a pacing timer", func() {
now := time.Now()
t := newTimer()
t.SetTimer(now.Add(time.Hour), now.Add(time.Minute), now.Add(time.Second), now.Add(time.Millisecond))
Expect(t.Deadline()).To(Equal(now.Add(time.Millisecond)))
})
It("doesn't reset to an earlier time", func() {
now := time.Now()
t := newTimer()
t.SetTimer(now.Add(time.Hour), now.Add(time.Minute), time.Time{}, time.Time{})
Expect(t.Deadline()).To(Equal(now.Add(time.Minute)))
t.SetRead()
t.SetTimer(now.Add(time.Hour), now.Add(time.Minute), time.Time{}, time.Time{})
Expect(t.Deadline()).To(Equal(now.Add(time.Hour)))
})
It("allows the pacing timer to be set to send immediately", func() {
now := time.Now()
t := newTimer()
t.SetTimer(now.Add(time.Hour), now.Add(time.Minute), time.Time{}, time.Time{})
Expect(t.Deadline()).To(Equal(now.Add(time.Minute)))
t.SetRead()
t.SetTimer(now.Add(time.Hour), now.Add(time.Minute), time.Time{}, deadlineSendImmediately)
Expect(t.Deadline()).To(Equal(deadlineSendImmediately))
})
})
golang-github-lucas-clemente-quic-go-0.38.2/crypto_stream.go 0000664 0000000 0000000 00000006473 14545452366 0024046 0 ustar 00root root 0000000 0000000 package quic
import (
"fmt"
"io"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/internal/wire"
)
type cryptoStream interface {
// for receiving data
HandleCryptoFrame(*wire.CryptoFrame) error
GetCryptoData() []byte
Finish() error
// for sending data
io.Writer
HasData() bool
PopCryptoFrame(protocol.ByteCount) *wire.CryptoFrame
}
type cryptoStreamImpl struct {
queue *frameSorter
msgBuf []byte
highestOffset protocol.ByteCount
finished bool
writeOffset protocol.ByteCount
writeBuf []byte
// Reassemble TLS handshake messages before returning them from GetCryptoData.
// This is only needed because crypto/tls doesn't correctly handle post-handshake messages.
onlyCompleteMsg bool
}
func newCryptoStream(onlyCompleteMsg bool) cryptoStream {
return &cryptoStreamImpl{
queue: newFrameSorter(),
onlyCompleteMsg: onlyCompleteMsg,
}
}
func (s *cryptoStreamImpl) HandleCryptoFrame(f *wire.CryptoFrame) error {
highestOffset := f.Offset + protocol.ByteCount(len(f.Data))
if maxOffset := highestOffset; maxOffset > protocol.MaxCryptoStreamOffset {
return &qerr.TransportError{
ErrorCode: qerr.CryptoBufferExceeded,
ErrorMessage: fmt.Sprintf("received invalid offset %d on crypto stream, maximum allowed %d", maxOffset, protocol.MaxCryptoStreamOffset),
}
}
if s.finished {
if highestOffset > s.highestOffset {
// reject crypto data received after this stream was already finished
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "received crypto data after change of encryption level",
}
}
// ignore data with a smaller offset than the highest received
// could e.g. be a retransmission
return nil
}
s.highestOffset = utils.Max(s.highestOffset, highestOffset)
if err := s.queue.Push(f.Data, f.Offset, nil); err != nil {
return err
}
for {
_, data, _ := s.queue.Pop()
if data == nil {
return nil
}
s.msgBuf = append(s.msgBuf, data...)
}
}
// GetCryptoData retrieves data that was received in CRYPTO frames
func (s *cryptoStreamImpl) GetCryptoData() []byte {
if s.onlyCompleteMsg {
if len(s.msgBuf) < 4 {
return nil
}
msgLen := 4 + int(s.msgBuf[1])<<16 + int(s.msgBuf[2])<<8 + int(s.msgBuf[3])
if len(s.msgBuf) < msgLen {
return nil
}
msg := make([]byte, msgLen)
copy(msg, s.msgBuf[:msgLen])
s.msgBuf = s.msgBuf[msgLen:]
return msg
}
b := s.msgBuf
s.msgBuf = nil
return b
}
func (s *cryptoStreamImpl) Finish() error {
if s.queue.HasMoreData() {
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "encryption level changed, but crypto stream has more data to read",
}
}
s.finished = true
return nil
}
// Writes writes data that should be sent out in CRYPTO frames
func (s *cryptoStreamImpl) Write(p []byte) (int, error) {
s.writeBuf = append(s.writeBuf, p...)
return len(p), nil
}
func (s *cryptoStreamImpl) HasData() bool {
return len(s.writeBuf) > 0
}
func (s *cryptoStreamImpl) PopCryptoFrame(maxLen protocol.ByteCount) *wire.CryptoFrame {
f := &wire.CryptoFrame{Offset: s.writeOffset}
n := utils.Min(f.MaxDataLen(maxLen), protocol.ByteCount(len(s.writeBuf)))
f.Data = s.writeBuf[:n]
s.writeBuf = s.writeBuf[n:]
s.writeOffset += n
return f
}
golang-github-lucas-clemente-quic-go-0.38.2/crypto_stream_manager.go 0000664 0000000 0000000 00000004132 14545452366 0025526 0 ustar 00root root 0000000 0000000 package quic
import (
"fmt"
"github.com/quic-go/quic-go/internal/handshake"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/wire"
)
type cryptoDataHandler interface {
HandleMessage([]byte, protocol.EncryptionLevel) error
NextEvent() handshake.Event
}
type cryptoStreamManager struct {
cryptoHandler cryptoDataHandler
initialStream cryptoStream
handshakeStream cryptoStream
oneRTTStream cryptoStream
}
func newCryptoStreamManager(
cryptoHandler cryptoDataHandler,
initialStream cryptoStream,
handshakeStream cryptoStream,
oneRTTStream cryptoStream,
) *cryptoStreamManager {
return &cryptoStreamManager{
cryptoHandler: cryptoHandler,
initialStream: initialStream,
handshakeStream: handshakeStream,
oneRTTStream: oneRTTStream,
}
}
func (m *cryptoStreamManager) HandleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel) error {
var str cryptoStream
//nolint:exhaustive // CRYPTO frames cannot be sent in 0-RTT packets.
switch encLevel {
case protocol.EncryptionInitial:
str = m.initialStream
case protocol.EncryptionHandshake:
str = m.handshakeStream
case protocol.Encryption1RTT:
str = m.oneRTTStream
default:
return fmt.Errorf("received CRYPTO frame with unexpected encryption level: %s", encLevel)
}
if err := str.HandleCryptoFrame(frame); err != nil {
return err
}
for {
data := str.GetCryptoData()
if data == nil {
return nil
}
if err := m.cryptoHandler.HandleMessage(data, encLevel); err != nil {
return err
}
}
}
func (m *cryptoStreamManager) GetPostHandshakeData(maxSize protocol.ByteCount) *wire.CryptoFrame {
if !m.oneRTTStream.HasData() {
return nil
}
return m.oneRTTStream.PopCryptoFrame(maxSize)
}
func (m *cryptoStreamManager) Drop(encLevel protocol.EncryptionLevel) error {
//nolint:exhaustive // 1-RTT keys should never get dropped.
switch encLevel {
case protocol.EncryptionInitial:
return m.initialStream.Finish()
case protocol.EncryptionHandshake:
return m.handshakeStream.Finish()
default:
panic(fmt.Sprintf("dropped unexpected encryption level: %s", encLevel))
}
}
golang-github-lucas-clemente-quic-go-0.38.2/crypto_stream_manager_test.go 0000664 0000000 0000000 00000006470 14545452366 0026574 0 ustar 00root root 0000000 0000000 package quic
import (
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/wire"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Crypto Stream Manager", func() {
var (
csm *cryptoStreamManager
cs *MockCryptoDataHandler
initialStream *MockCryptoStream
handshakeStream *MockCryptoStream
oneRTTStream *MockCryptoStream
)
BeforeEach(func() {
initialStream = NewMockCryptoStream(mockCtrl)
handshakeStream = NewMockCryptoStream(mockCtrl)
oneRTTStream = NewMockCryptoStream(mockCtrl)
cs = NewMockCryptoDataHandler(mockCtrl)
csm = newCryptoStreamManager(cs, initialStream, handshakeStream, oneRTTStream)
})
It("passes messages to the initial stream", func() {
cf := &wire.CryptoFrame{Data: []byte("foobar")}
initialStream.EXPECT().HandleCryptoFrame(cf)
initialStream.EXPECT().GetCryptoData().Return([]byte("foobar"))
initialStream.EXPECT().GetCryptoData()
cs.EXPECT().HandleMessage([]byte("foobar"), protocol.EncryptionInitial)
Expect(csm.HandleCryptoFrame(cf, protocol.EncryptionInitial)).To(Succeed())
})
It("passes messages to the handshake stream", func() {
cf := &wire.CryptoFrame{Data: []byte("foobar")}
handshakeStream.EXPECT().HandleCryptoFrame(cf)
handshakeStream.EXPECT().GetCryptoData().Return([]byte("foobar"))
handshakeStream.EXPECT().GetCryptoData()
cs.EXPECT().HandleMessage([]byte("foobar"), protocol.EncryptionHandshake)
Expect(csm.HandleCryptoFrame(cf, protocol.EncryptionHandshake)).To(Succeed())
})
It("passes messages to the 1-RTT stream", func() {
cf := &wire.CryptoFrame{Data: []byte("foobar")}
oneRTTStream.EXPECT().HandleCryptoFrame(cf)
oneRTTStream.EXPECT().GetCryptoData().Return([]byte("foobar"))
oneRTTStream.EXPECT().GetCryptoData()
cs.EXPECT().HandleMessage([]byte("foobar"), protocol.Encryption1RTT)
Expect(csm.HandleCryptoFrame(cf, protocol.Encryption1RTT)).To(Succeed())
})
It("doesn't call the message handler, if there's no message", func() {
cf := &wire.CryptoFrame{Data: []byte("foobar")}
handshakeStream.EXPECT().HandleCryptoFrame(cf)
handshakeStream.EXPECT().GetCryptoData() // don't return any data to handle
// don't EXPECT any calls to HandleMessage()
Expect(csm.HandleCryptoFrame(cf, protocol.EncryptionHandshake)).To(Succeed())
})
It("processes all messages", func() {
cf := &wire.CryptoFrame{Data: []byte("foobar")}
handshakeStream.EXPECT().HandleCryptoFrame(cf)
handshakeStream.EXPECT().GetCryptoData().Return([]byte("foo"))
handshakeStream.EXPECT().GetCryptoData().Return([]byte("bar"))
handshakeStream.EXPECT().GetCryptoData()
cs.EXPECT().HandleMessage([]byte("foo"), protocol.EncryptionHandshake)
cs.EXPECT().HandleMessage([]byte("bar"), protocol.EncryptionHandshake)
Expect(csm.HandleCryptoFrame(cf, protocol.EncryptionHandshake)).To(Succeed())
})
It("errors for unknown encryption levels", func() {
err := csm.HandleCryptoFrame(&wire.CryptoFrame{}, 42)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("received CRYPTO frame with unexpected encryption level"))
})
It("drops Initial", func() {
initialStream.EXPECT().Finish()
Expect(csm.Drop(protocol.EncryptionInitial)).To(Succeed())
})
It("drops Handshake", func() {
handshakeStream.EXPECT().Finish()
Expect(csm.Drop(protocol.EncryptionHandshake)).To(Succeed())
})
})
golang-github-lucas-clemente-quic-go-0.38.2/crypto_stream_test.go 0000664 0000000 0000000 00000011714 14545452366 0025077 0 ustar 00root root 0000000 0000000 package quic
import (
"crypto/rand"
"fmt"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/wire"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Crypto Stream", func() {
var str cryptoStream
BeforeEach(func() {
str = newCryptoStream(false)
})
Context("handling incoming data", func() {
It("handles in-order CRYPTO frames", func() {
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{Data: []byte("foo")})).To(Succeed())
Expect(str.GetCryptoData()).To(Equal([]byte("foo")))
Expect(str.GetCryptoData()).To(BeNil())
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{Data: []byte("bar"), Offset: 3})).To(Succeed())
Expect(str.GetCryptoData()).To(Equal([]byte("bar")))
Expect(str.GetCryptoData()).To(BeNil())
})
It("errors if the frame exceeds the maximum offset", func() {
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{
Offset: protocol.MaxCryptoStreamOffset - 5,
Data: []byte("foobar"),
})).To(MatchError(&qerr.TransportError{
ErrorCode: qerr.CryptoBufferExceeded,
ErrorMessage: fmt.Sprintf("received invalid offset %d on crypto stream, maximum allowed %d", protocol.MaxCryptoStreamOffset+1, protocol.MaxCryptoStreamOffset),
}))
})
It("handles out-of-order CRYPTO frames", func() {
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{Offset: 3, Data: []byte("bar")})).To(Succeed())
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{Data: []byte("foo")})).To(Succeed())
Expect(str.GetCryptoData()).To(Equal([]byte("foobar")))
Expect(str.GetCryptoData()).To(BeNil())
})
Context("finishing", func() {
It("errors if there's still data to read after finishing", func() {
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{
Data: []byte("foobar"),
Offset: 10,
})).To(Succeed())
Expect(str.Finish()).To(MatchError(&qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "encryption level changed, but crypto stream has more data to read",
}))
})
It("works with reordered data", func() {
f1 := &wire.CryptoFrame{
Data: []byte("foo"),
}
f2 := &wire.CryptoFrame{
Offset: 3,
Data: []byte("bar"),
}
Expect(str.HandleCryptoFrame(f2)).To(Succeed())
Expect(str.HandleCryptoFrame(f1)).To(Succeed())
Expect(str.Finish()).To(Succeed())
Expect(str.HandleCryptoFrame(f2)).To(Succeed())
})
It("rejects new crypto data after finishing", func() {
Expect(str.Finish()).To(Succeed())
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{
Data: []byte("foo"),
})).To(MatchError(&qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "received crypto data after change of encryption level",
}))
})
It("ignores crypto data below the maximum offset received before finishing", func() {
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{
Data: []byte("foobar"),
})).To(Succeed())
Expect(str.GetCryptoData()).To(Equal([]byte("foobar")))
Expect(str.Finish()).To(Succeed())
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{
Offset: 2,
Data: []byte("foo"),
})).To(Succeed())
})
})
})
Context("writing data", func() {
It("says if it has data", func() {
Expect(str.HasData()).To(BeFalse())
_, err := str.Write([]byte("foobar"))
Expect(err).ToNot(HaveOccurred())
Expect(str.HasData()).To(BeTrue())
})
It("pops crypto frames", func() {
_, err := str.Write([]byte("foobar"))
Expect(err).ToNot(HaveOccurred())
f := str.PopCryptoFrame(1000)
Expect(f).ToNot(BeNil())
Expect(f.Offset).To(BeZero())
Expect(f.Data).To(Equal([]byte("foobar")))
})
It("coalesces multiple writes", func() {
_, err := str.Write([]byte("foo"))
Expect(err).ToNot(HaveOccurred())
_, err = str.Write([]byte("bar"))
Expect(err).ToNot(HaveOccurred())
f := str.PopCryptoFrame(1000)
Expect(f).ToNot(BeNil())
Expect(f.Offset).To(BeZero())
Expect(f.Data).To(Equal([]byte("foobar")))
})
It("respects the maximum size", func() {
frameHeaderLen := (&wire.CryptoFrame{}).Length(protocol.Version1)
_, err := str.Write([]byte("foobar"))
Expect(err).ToNot(HaveOccurred())
f := str.PopCryptoFrame(frameHeaderLen + 3)
Expect(f).ToNot(BeNil())
Expect(f.Offset).To(BeZero())
Expect(f.Data).To(Equal([]byte("foo")))
f = str.PopCryptoFrame(frameHeaderLen + 3)
Expect(f).ToNot(BeNil())
Expect(f.Offset).To(Equal(protocol.ByteCount(3)))
Expect(f.Data).To(Equal([]byte("bar")))
})
})
It("reassembles data", func() {
str = newCryptoStream(true)
data := make([]byte, 1337)
l := len(data) - 4
data[1] = uint8(l >> 16)
data[2] = uint8(l >> 8)
data[3] = uint8(l)
rand.Read(data[4:])
for i, b := range data {
Expect(str.GetCryptoData()).To(BeEmpty())
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{
Offset: protocol.ByteCount(i),
Data: []byte{b},
})).To(Succeed())
}
Expect(str.GetCryptoData()).To(Equal(data))
})
})
golang-github-lucas-clemente-quic-go-0.38.2/datagram_queue.go 0000664 0000000 0000000 00000005163 14545452366 0024132 0 ustar 00root root 0000000 0000000 package quic
import (
"context"
"sync"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/internal/wire"
)
type datagramQueue struct {
sendQueue chan *wire.DatagramFrame
nextFrame *wire.DatagramFrame
rcvMx sync.Mutex
rcvQueue [][]byte
rcvd chan struct{} // used to notify Receive that a new datagram was received
closeErr error
closed chan struct{}
hasData func()
dequeued chan struct{}
logger utils.Logger
}
func newDatagramQueue(hasData func(), logger utils.Logger) *datagramQueue {
return &datagramQueue{
hasData: hasData,
sendQueue: make(chan *wire.DatagramFrame, 1),
rcvd: make(chan struct{}, 1),
dequeued: make(chan struct{}),
closed: make(chan struct{}),
logger: logger,
}
}
// AddAndWait queues a new DATAGRAM frame for sending.
// It blocks until the frame has been dequeued.
func (h *datagramQueue) AddAndWait(f *wire.DatagramFrame) error {
select {
case h.sendQueue <- f:
h.hasData()
case <-h.closed:
return h.closeErr
}
select {
case <-h.dequeued:
return nil
case <-h.closed:
return h.closeErr
}
}
// Peek gets the next DATAGRAM frame for sending.
// If actually sent out, Pop needs to be called before the next call to Peek.
func (h *datagramQueue) Peek() *wire.DatagramFrame {
if h.nextFrame != nil {
return h.nextFrame
}
select {
case h.nextFrame = <-h.sendQueue:
h.dequeued <- struct{}{}
default:
return nil
}
return h.nextFrame
}
func (h *datagramQueue) Pop() {
if h.nextFrame == nil {
panic("datagramQueue BUG: Pop called for nil frame")
}
h.nextFrame = nil
}
// HandleDatagramFrame handles a received DATAGRAM frame.
func (h *datagramQueue) HandleDatagramFrame(f *wire.DatagramFrame) {
data := make([]byte, len(f.Data))
copy(data, f.Data)
var queued bool
h.rcvMx.Lock()
if len(h.rcvQueue) < protocol.DatagramRcvQueueLen {
h.rcvQueue = append(h.rcvQueue, data)
queued = true
select {
case h.rcvd <- struct{}{}:
default:
}
}
h.rcvMx.Unlock()
if !queued && h.logger.Debug() {
h.logger.Debugf("Discarding DATAGRAM frame (%d bytes payload)", len(f.Data))
}
}
// Receive gets a received DATAGRAM frame.
func (h *datagramQueue) Receive(ctx context.Context) ([]byte, error) {
for {
h.rcvMx.Lock()
if len(h.rcvQueue) > 0 {
data := h.rcvQueue[0]
h.rcvQueue = h.rcvQueue[1:]
h.rcvMx.Unlock()
return data, nil
}
h.rcvMx.Unlock()
select {
case <-h.rcvd:
continue
case <-h.closed:
return nil, h.closeErr
case <-ctx.Done():
return nil, ctx.Err()
}
}
}
func (h *datagramQueue) CloseWithError(e error) {
h.closeErr = e
close(h.closed)
}
golang-github-lucas-clemente-quic-go-0.38.2/datagram_queue_test.go 0000664 0000000 0000000 00000007322 14545452366 0025170 0 ustar 00root root 0000000 0000000 package quic
import (
"context"
"errors"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/internal/wire"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Datagram Queue", func() {
var queue *datagramQueue
var queued chan struct{}
BeforeEach(func() {
queued = make(chan struct{}, 100)
queue = newDatagramQueue(func() { queued <- struct{}{} }, utils.DefaultLogger)
})
Context("sending", func() {
It("returns nil when there's no datagram to send", func() {
Expect(queue.Peek()).To(BeNil())
})
It("queues a datagram", func() {
done := make(chan struct{})
frame := &wire.DatagramFrame{Data: []byte("foobar")}
go func() {
defer GinkgoRecover()
defer close(done)
Expect(queue.AddAndWait(frame)).To(Succeed())
}()
Eventually(queued).Should(HaveLen(1))
Consistently(done).ShouldNot(BeClosed())
f := queue.Peek()
Expect(f.Data).To(Equal([]byte("foobar")))
Eventually(done).Should(BeClosed())
queue.Pop()
Expect(queue.Peek()).To(BeNil())
})
It("returns the same datagram multiple times, when Pop isn't called", func() {
sent := make(chan struct{}, 1)
go func() {
defer GinkgoRecover()
Expect(queue.AddAndWait(&wire.DatagramFrame{Data: []byte("foo")})).To(Succeed())
sent <- struct{}{}
Expect(queue.AddAndWait(&wire.DatagramFrame{Data: []byte("bar")})).To(Succeed())
sent <- struct{}{}
}()
Eventually(queued).Should(HaveLen(1))
f := queue.Peek()
Expect(f.Data).To(Equal([]byte("foo")))
Eventually(sent).Should(Receive())
Expect(queue.Peek()).To(Equal(f))
Expect(queue.Peek()).To(Equal(f))
queue.Pop()
Eventually(func() *wire.DatagramFrame { f = queue.Peek(); return f }).ShouldNot(BeNil())
f = queue.Peek()
Expect(f.Data).To(Equal([]byte("bar")))
})
It("closes", func() {
errChan := make(chan error, 1)
go func() {
defer GinkgoRecover()
errChan <- queue.AddAndWait(&wire.DatagramFrame{Data: []byte("foobar")})
}()
Consistently(errChan).ShouldNot(Receive())
queue.CloseWithError(errors.New("test error"))
Eventually(errChan).Should(Receive(MatchError("test error")))
})
})
Context("receiving", func() {
It("receives DATAGRAM frames", func() {
queue.HandleDatagramFrame(&wire.DatagramFrame{Data: []byte("foo")})
queue.HandleDatagramFrame(&wire.DatagramFrame{Data: []byte("bar")})
data, err := queue.Receive(context.Background())
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal([]byte("foo")))
data, err = queue.Receive(context.Background())
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal([]byte("bar")))
})
It("blocks until a frame is received", func() {
c := make(chan []byte, 1)
go func() {
defer GinkgoRecover()
data, err := queue.Receive(context.Background())
Expect(err).ToNot(HaveOccurred())
c <- data
}()
Consistently(c).ShouldNot(Receive())
queue.HandleDatagramFrame(&wire.DatagramFrame{Data: []byte("foobar")})
Eventually(c).Should(Receive(Equal([]byte("foobar"))))
})
It("blocks until context is done", func() {
ctx, cancel := context.WithCancel(context.Background())
errChan := make(chan error)
go func() {
defer GinkgoRecover()
_, err := queue.Receive(ctx)
errChan <- err
}()
Consistently(errChan).ShouldNot(Receive())
cancel()
Eventually(errChan).Should(Receive(Equal(context.Canceled)))
})
It("closes", func() {
errChan := make(chan error, 1)
go func() {
defer GinkgoRecover()
_, err := queue.Receive(context.Background())
errChan <- err
}()
Consistently(errChan).ShouldNot(Receive())
queue.CloseWithError(errors.New("test error"))
Eventually(errChan).Should(Receive(MatchError("test error")))
})
})
})
golang-github-lucas-clemente-quic-go-0.38.2/docs/ 0000775 0000000 0000000 00000000000 14545452366 0021542 5 ustar 00root root 0000000 0000000 golang-github-lucas-clemente-quic-go-0.38.2/docs/quic.png 0000664 0000000 0000000 00000041623 14545452366 0023217 0 ustar 00root root 0000000 0000000 ‰PNG
IHDR ^ ø =ûè\ CZIDATxìÖÌØPFáÙ¶mÛ¶mÛ¶mÛ¶Íh¶mÛ6¾ÙûmïIž¨nïmëE)¥”RJ)¥”RJ9.¿È…šè‚q‚æ(…¸pfJ)¥”RÊJc>Ãpí?§| ºa"Ö`?Öc&ú¡‚â?J)¥”R¥pæ¯ÐþñßGi0·aŽðkQ>áÌT@DG2dAaTB´GO´A=”GdD"Do¸eJ)¥TBl†ý(xìx·DYKY¿™eéÚÏÒ·éb‰«Ô²¨Ùs›þ~ZP‘ADÄE*¤C|„G ¸uá0æ§PHyG\”B,Ã9¼…¹À+Æ,´A>D€k¤”RJåÃ#Øg~C„´ÔMÚXù5›Þ±+¿UkßiË9p”…O•îÇo×[4„kéQ}°ûq7ñÌ ßÕÛ8ÝXŽî(ð
çÖ ` 0\„?«Yº°ãgZ©EkðÔ–£ßpKÕ¸•…IšÂ¼xõú«sÝ„(ø”J†®Ø‰g0wt«QááÔ”RJ©zx
ƒyóéË’Õj`5vŸ0~¬œ$gÿ‘æ7xˆ¿UmáÔ¼"!c2¶â6Ì<ÄfŒ@uDpä9„@ÌüE¬ø¼•ŽºU6î³ô;›Ÿ ïØ;è(’î‹7– $DÐww‚»/îîn«ÖqY÷Å}qwwwww[ÇÞ¿ÞœjþõõŽôÔtÍDÞïœûI¦3ЗªW÷ç>Ÿ¶L$cªÉoÆ
&ˆ%zÍÍßp>œê
‚ ‚è%¾K²FB›5ÛÑH«ë¾Ó±T9ã;j˜æš¦®|vçÄ2äïØüv½aj18w>h