pax_global_header 0000666 0000000 0000000 00000000064 14656644531 0014530 g ustar 00root root 0000000 0000000 52 comment=c4a32aee21f48d41f781ca3873d4bf1370583204
golang-github-lucas-clemente-quic-go-0.46.0/ 0000775 0000000 0000000 00000000000 14656644531 0020607 5 ustar 00root root 0000000 0000000 golang-github-lucas-clemente-quic-go-0.46.0/.circleci/ 0000775 0000000 0000000 00000000000 14656644531 0022442 5 ustar 00root root 0000000 0000000 golang-github-lucas-clemente-quic-go-0.46.0/.circleci/config.yml 0000664 0000000 0000000 00000002644 14656644531 0024440 0 ustar 00root root 0000000 0000000 version: 2.1
executors:
test-go122:
docker:
- image: "cimg/go:1.22"
environment:
runrace: true
TIMESCALE_FACTOR: 3
jobs:
"test": &test
executor: test-go122
steps:
- checkout
- run:
name: "Build infos"
command: go version
- run:
name: "Run tools tests"
command: go run github.com/onsi/ginkgo/v2/ginkgo -race -r -v -randomize-all -trace integrationtests/tools
- run:
name: "Run self integration tests"
command: go run github.com/onsi/ginkgo/v2/ginkgo -v -randomize-all -trace integrationtests/self
- run:
name: "Run version negotiation tests"
command: go run github.com/onsi/ginkgo/v2/ginkgo -v -randomize-all -trace integrationtests/versionnegotiation
- run:
name: "Run self integration tests with race detector"
command: go run github.com/onsi/ginkgo/v2/ginkgo -race -v -randomize-all -trace integrationtests/self
- run:
name: "Run self integration tests with qlog"
command: go run github.com/onsi/ginkgo/v2/ginkgo -v -randomize-all -trace integrationtests/self -- -qlog
- run:
name: "Run version negotiation tests with qlog"
command: go run github.com/onsi/ginkgo/v2/ginkgo -v -randomize-all -trace integrationtests/versionnegotiation -- -qlog
go122:
<<: *test
workflows:
workflow:
jobs:
- go122
golang-github-lucas-clemente-quic-go-0.46.0/.clusterfuzzlite/ 0000775 0000000 0000000 00000000000 14656644531 0024143 5 ustar 00root root 0000000 0000000 golang-github-lucas-clemente-quic-go-0.46.0/.clusterfuzzlite/Dockerfile 0000664 0000000 0000000 00000001113 14656644531 0026131 0 ustar 00root root 0000000 0000000 FROM gcr.io/oss-fuzz-base/base-builder-go:v1
ARG TARGETPLATFORM
RUN echo "TARGETPLATFORM: ${TARGETPLATFORM}"
ENV GOVERSION=1.22.0
RUN platform=$(echo ${TARGETPLATFORM} | tr '/' '-') && \
filename="go${GOVERSION}.${platform}.tar.gz" && \
wget https://dl.google.com/go/${filename} && \
mkdir temp-go && \
rm -rf /root/.go/* && \
tar -C temp-go/ -xzf ${filename} && \
mv temp-go/go/* /root/.go/ && \
rm -r ${filename} temp-go
RUN apt-get update && apt-get install -y make autoconf automake libtool
COPY . $SRC/quic-go
WORKDIR quic-go
COPY .clusterfuzzlite/build.sh $SRC/
golang-github-lucas-clemente-quic-go-0.46.0/.clusterfuzzlite/build.sh 0000775 0000000 0000000 00000000755 14656644531 0025610 0 ustar 00root root 0000000 0000000 #!/bin/bash -eu
export CXX="${CXX} -lresolv" # required by Go 1.20
compile_go_fuzzer github.com/quic-go/quic-go/fuzzing/frames Fuzz frame_fuzzer
compile_go_fuzzer github.com/quic-go/quic-go/fuzzing/header Fuzz header_fuzzer
compile_go_fuzzer github.com/quic-go/quic-go/fuzzing/transportparameters Fuzz transportparameter_fuzzer
compile_go_fuzzer github.com/quic-go/quic-go/fuzzing/tokens Fuzz token_fuzzer
compile_go_fuzzer github.com/quic-go/quic-go/fuzzing/handshake Fuzz handshake_fuzzer
golang-github-lucas-clemente-quic-go-0.46.0/.clusterfuzzlite/project.yaml 0000664 0000000 0000000 00000000015 14656644531 0026471 0 ustar 00root root 0000000 0000000 language: go
golang-github-lucas-clemente-quic-go-0.46.0/.githooks/ 0000775 0000000 0000000 00000000000 14656644531 0022514 5 ustar 00root root 0000000 0000000 golang-github-lucas-clemente-quic-go-0.46.0/.githooks/README.md 0000664 0000000 0000000 00000000231 14656644531 0023767 0 ustar 00root root 0000000 0000000 # Git Hooks
This directory contains useful Git hooks for working with quic-go.
Install them by running
```bash
git config core.hooksPath .githooks
```
golang-github-lucas-clemente-quic-go-0.46.0/.githooks/pre-commit 0000775 0000000 0000000 00000001622 14656644531 0024517 0 ustar 00root root 0000000 0000000 #!/bin/bash
# Check that test files don't contain focussed test cases.
errored=false
for f in $(git diff --diff-filter=d --cached --name-only); do
if [[ $f != *_test.go ]]; then continue; fi
output=$(git show :"$f" | grep -n -e "FIt(" -e "FContext(" -e "FDescribe(")
if [ $? -eq 0 ]; then
echo "$f contains a focussed test:"
echo "$output"
echo ""
errored=true
fi
done
pushd ./integrationtests/gomodvendor > /dev/null
go mod tidy
if [[ -n $(git diff --diff-filter=d --name-only -- "go.mod" "go.sum") ]]; then
echo "go.mod / go.sum in integrationtests/gomodvendor not tidied"
errored=true
fi
popd > /dev/null
# Check that all Go files are properly gofumpt-ed.
output=$(gofumpt -d $(git diff --diff-filter=d --cached --name-only -- '*.go'))
if [ -n "$output" ]; then
echo "Found files that are not properly gofumpt-ed."
echo "$output"
errored=true
fi
if [ "$errored" = true ]; then
exit 1
fi
golang-github-lucas-clemente-quic-go-0.46.0/.github/ 0000775 0000000 0000000 00000000000 14656644531 0022147 5 ustar 00root root 0000000 0000000 golang-github-lucas-clemente-quic-go-0.46.0/.github/FUNDING.yml 0000664 0000000 0000000 00000001464 14656644531 0023771 0 ustar 00root root 0000000 0000000 # These are supported funding model platforms
github: [marten-seemann] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
golang-github-lucas-clemente-quic-go-0.46.0/.github/dependabot.yml 0000664 0000000 0000000 00000000166 14656644531 0025002 0 ustar 00root root 0000000 0000000 version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
golang-github-lucas-clemente-quic-go-0.46.0/.github/workflows/ 0000775 0000000 0000000 00000000000 14656644531 0024204 5 ustar 00root root 0000000 0000000 golang-github-lucas-clemente-quic-go-0.46.0/.github/workflows/build-interop-docker.yml 0000664 0000000 0000000 00000002746 14656644531 0030762 0 ustar 00root root 0000000 0000000 name: Build interop Docker image
on:
push:
branches:
- master
tags:
- 'v*'
jobs:
interop:
runs-on: ${{ fromJSON(vars['DOCKER_RUNNER_UBUNTU'] || '"ubuntu-latest"') }}
steps:
- uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
platforms: linux/amd64,linux/arm64
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: set tag name
id: tag
# Tagged releases won't be picked up by the interop runner automatically,
# but they can be useful when debugging regressions.
run: |
if [[ $GITHUB_REF == refs/tags/* ]]; then
echo "tag=${GITHUB_REF#refs/tags/}" | tee -a $GITHUB_OUTPUT;
echo "gitref=${GITHUB_REF#refs/tags/}" | tee -a $GITHUB_OUTPUT;
else
echo 'tag=latest' | tee -a $GITHUB_OUTPUT;
echo 'gitref=${{ github.sha }}' | tee -a $GITHUB_OUTPUT;
fi
- uses: docker/build-push-action@v6
with:
context: "{{defaultContext}}:interop"
platforms: linux/amd64,linux/arm64
push: true
build-args: |
GITREF=${{ steps.tag.outputs.gitref }}
tags: martenseemann/quic-go-interop:${{ steps.tag.outputs.tag }}
golang-github-lucas-clemente-quic-go-0.46.0/.github/workflows/clusterfuzz-lite-pr.yml 0000664 0000000 0000000 00000003501 14656644531 0030700 0 ustar 00root root 0000000 0000000 name: ClusterFuzzLite PR fuzzing
on:
pull_request:
paths:
- '**'
permissions: read-all
jobs:
PR:
runs-on: ${{ fromJSON(vars['CLUSTERFUZZ_LITE_RUNNER_UBUNTU'] || '"ubuntu-latest"') }}
concurrency:
group: ${{ github.workflow }}-${{ matrix.sanitizer }}-${{ github.ref }}
cancel-in-progress: true
strategy:
fail-fast: false
matrix:
sanitizer:
- address
steps:
- name: Build Fuzzers (${{ matrix.sanitizer }})
id: build
uses: google/clusterfuzzlite/actions/build_fuzzers@v1
with:
language: go
github-token: ${{ secrets.GITHUB_TOKEN }}
sanitizer: ${{ matrix.sanitizer }}
# Optional but recommended: used to only run fuzzers that are affected
# by the PR.
# See later section on "Git repo for storage".
# storage-repo: https://${{ secrets.PERSONAL_ACCESS_TOKEN }}@github.com/OWNER/STORAGE-REPO-NAME.git
# storage-repo-branch: main # Optional. Defaults to "main"
# storage-repo-branch-coverage: gh-pages # Optional. Defaults to "gh-pages".
- name: Run Fuzzers (${{ matrix.sanitizer }})
id: run
uses: google/clusterfuzzlite/actions/run_fuzzers@v1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
fuzz-seconds: 480
mode: 'code-change'
sanitizer: ${{ matrix.sanitizer }}
output-sarif: true
parallel-fuzzing: true
# Optional but recommended: used to download the corpus produced by
# batch fuzzing.
# See later section on "Git repo for storage".
# storage-repo: https://${{ secrets.PERSONAL_ACCESS_TOKEN }}@github.com/OWNER/STORAGE-REPO-NAME.git
# storage-repo-branch: main # Optional. Defaults to "main"
# storage-repo-branch-coverage: gh-pages # Optional. Defaults to "gh-pages".
golang-github-lucas-clemente-quic-go-0.46.0/.github/workflows/cross-compile.sh 0000775 0000000 0000000 00000001502 14656644531 0027320 0 ustar 00root root 0000000 0000000 #!/bin/bash
set -e
dist="$1"
goos=$(echo "$dist" | cut -d "/" -f1)
goarch=$(echo "$dist" | cut -d "/" -f2)
# cross-compiling for android is a pain...
if [[ "$goos" == "android" ]]; then exit; fi
# iOS builds require Cgo, see https://github.com/golang/go/issues/43343
# Cgo would then need a C cross compilation setup. Not worth the hassle.
if [[ "$goos" == "ios" ]]; then exit; fi
# Write all log output to a temporary file instead of to stdout.
# That allows running this script in parallel, while preserving the correct order of the output.
log_file=$(mktemp)
error_handler() {
cat "$log_file" >&2
rm "$log_file"
exit 1
}
trap 'error_handler' ERR
echo "$dist" >> "$log_file"
out="main-$goos-$goarch"
GOOS=$goos GOARCH=$goarch go build -o $out example/main.go >> "$log_file" 2>&1
rm $out
cat "$log_file"
rm "$log_file"
golang-github-lucas-clemente-quic-go-0.46.0/.github/workflows/cross-compile.yml 0000664 0000000 0000000 00000001477 14656644531 0027517 0 ustar 00root root 0000000 0000000 on: [push, pull_request]
jobs:
crosscompile:
strategy:
fail-fast: false
matrix:
go: [ "1.21.x", "1.22.x" ]
runs-on: ${{ fromJSON(vars['CROSS_COMPILE_RUNNER_UBUNTU'] || '"ubuntu-latest"') }}
name: "Cross Compilation (Go ${{matrix.go}})"
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
- name: Install build utils
run: |
sudo apt-get update
sudo apt-get install -y gcc-multilib
- name: Install dependencies
run: go build example/main.go
- name: Run cross compilation
# run in parallel on as many cores as are available on the machine
run: go tool dist list | xargs -I % -P "$(nproc)" .github/workflows/cross-compile.sh %
golang-github-lucas-clemente-quic-go-0.46.0/.github/workflows/go-generate.sh 0000775 0000000 0000000 00000001110 14656644531 0026731 0 ustar 00root root 0000000 0000000 #!/usr/bin/env bash
set -e
DIR=$(pwd)
TMP=$(mktemp -d)
cd "$TMP"
cp -r "$DIR" orig
cp -r "$DIR" generated
cd generated
# delete all go-generated files generated (that adhere to the comment convention)
grep --include \*.go -lrIZ "^// Code generated .* DO NOT EDIT\.$" . | xargs --null rm
# First regenerate sys_conn_buffers_write.go.
# If it doesn't exist, the following mockgen calls will fail.
go generate -run "sys_conn_buffers_write.go"
# now generate everything
go generate ./...
cd ..
# don't compare fuzzing corpora
diff --exclude=corpus --exclude=.git -ruN orig generated
golang-github-lucas-clemente-quic-go-0.46.0/.github/workflows/integration.yml 0000664 0000000 0000000 00000007142 14656644531 0027256 0 ustar 00root root 0000000 0000000 on: [push, pull_request]
jobs:
integration:
strategy:
fail-fast: false
matrix:
os: [ "ubuntu" ]
go: [ "1.21.x", "1.22.x", "1.23.0-rc.2" ]
include:
- os: "windows"
go: "1.21.x"
- os: "macos"
go: "1.21.x"
runs-on: ${{ fromJSON(vars[format('INTEGRATION_RUNNER_{0}', matrix.os)] || format('"{0}-latest"', matrix.os)) }}
timeout-minutes: 30
defaults:
run:
shell: bash # by default Windows uses PowerShell, which uses a different syntax for setting environment variables
env:
DEBUG: false # set this to true to export qlogs and save them as artifacts
TIMESCALE_FACTOR: 3
name: Integration Tests (${{ matrix.os }}, Go ${{ matrix.go }})
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
- run: go version
- name: set qlogger
if: env.DEBUG == 'true'
run: echo "QLOGFLAG= -qlog" >> $GITHUB_ENV
- name: Run other tests
run: |
go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace -skip-package self,versionnegotiation integrationtests
go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace integrationtests/versionnegotiation -- ${{ env.QLOGFLAG }}
- name: Run self tests, using QUIC v1
if: success() || failure() # run this step even if the previous one failed
run: go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace integrationtests/self -- -version=1 ${{ env.QLOGFLAG }}
- name: Run self tests, using QUIC v2
if: success() || failure() # run this step even if the previous one failed
run: go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace integrationtests/self -- -version=2 ${{ env.QLOGFLAG }}
- name: Run self tests, with GSO disabled
if: ${{ matrix.os == 'ubuntu' && (success() || failure()) }} # run this step even if the previous one failed
env:
QUIC_GO_DISABLE_GSO: true
run: go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace integrationtests/self -- -version=1 ${{ env.QLOGFLAG }}
- name: Run self tests, with ECN disabled
if: ${{ matrix.os == 'ubuntu' && (success() || failure()) }} # run this step even if the previous one failed
env:
QUIC_GO_DISABLE_ECN: true
run: go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace integrationtests/self -- -version=1 ${{ env.QLOGFLAG }}
- name: Run tests (32 bit)
if: ${{ matrix.os != 'macos' && (success() || failure()) }} # run this step even if the previous one failed
env:
GOARCH: 386
run: |
go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace -skip-package self,versionnegotiation integrationtests
go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace integrationtests/versionnegotiation -- ${{ env.QLOGFLAG }}
go run github.com/onsi/ginkgo/v2/ginkgo -r -v -randomize-all -randomize-suites -trace integrationtests/self -- ${{ env.QLOGFLAG }}
- name: Run benchmarks
run: go test -v -run=^$ -bench=. ./integrationtests/self
- name: save qlogs
if: ${{ always() && env.DEBUG == 'true' }}
uses: actions/upload-artifact@v4
with:
name: qlogs-${{ matrix.os }}-go${{ matrix.go }}
path: integrationtests/self/*.qlog
retention-days: 7
golang-github-lucas-clemente-quic-go-0.46.0/.github/workflows/lint.yml 0000664 0000000 0000000 00000005777 14656644531 0025715 0 ustar 00root root 0000000 0000000 on: [push, pull_request]
jobs:
check:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: "1.22.x"
- name: Check that no non-test files import Ginkgo or Gomega
run: .github/workflows/no_ginkgo.sh
- name: Check for //go:build ignore in .go files
run: |
IGNORED_FILES=$(grep -rl '//go:build ignore' . --include='*.go') || true
if [ -n "$IGNORED_FILES" ]; then
echo "::error::Found ignored Go files: $IGNORED_FILES"
exit 1
fi
- name: Check that go.mod is tidied
if: success() || failure() # run this step even if the previous one failed
run: |
cp go.mod go.mod.orig
cp go.sum go.sum.orig
go mod tidy
diff go.mod go.mod.orig
diff go.sum go.sum.orig
- name: Run code generators
if: success() || failure() # run this step even if the previous one failed
run: .github/workflows/go-generate.sh
- name: Check that go mod vendor works
if: success() || failure() # run this step even if the previous one failed
run: |
cd integrationtests/gomodvendor
go mod vendor
golangci-lint:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
go: [ "1.21.x", "1.22.x" ]
env:
GOLANGCI_LINT_VERSION: v1.58.0
name: golangci-lint (Go ${{ matrix.go }})
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
- name: golangci-lint (Linux)
uses: golangci/golangci-lint-action@v6
with:
args: --timeout=3m
version: ${{ env.GOLANGCI_LINT_VERSION }}
- name: golangci-lint (Windows)
if: success() || failure() # run this step even if the previous one failed
uses: golangci/golangci-lint-action@v6
env:
GOOS: "windows"
with:
args: --timeout=3m
version: ${{ env.GOLANGCI_LINT_VERSION }}
- name: golangci-lint (OSX)
if: success() || failure() # run this step even if the previous one failed
uses: golangci/golangci-lint-action@v6
env:
GOOS: "darwin"
with:
args: --timeout=3m
version: ${{ env.GOLANGCI_LINT_VERSION }}
- name: golangci-lint (FreeBSD)
if: success() || failure() # run this step even if the previous one failed
uses: golangci/golangci-lint-action@v6
env:
GOOS: "freebsd"
with:
args: --timeout=3m
version: ${{ env.GOLANGCI_LINT_VERSION }}
- name: golangci-lint (others)
if: success() || failure() # run this step even if the previous one failed
uses: golangci/golangci-lint-action@v6
env:
GOOS: "solaris" # some OS that we don't have any build tags for
with:
args: --timeout=3m
version: ${{ env.GOLANGCI_LINT_VERSION }}
golang-github-lucas-clemente-quic-go-0.46.0/.github/workflows/no_ginkgo.sh 0000775 0000000 0000000 00000000726 14656644531 0026522 0 ustar 00root root 0000000 0000000 #!/usr/bin/env bash
# Verify that no non-test files import Ginkgo or Gomega.
set -e
HAS_TESTING=false
cd ..
for f in $(find . -name "*.go" ! -name "*_test.go" ! -name "tools.go"); do
if grep -q "github.com/onsi/ginkgo" $f; then
echo "$f imports github.com/onsi/ginkgo/v2"
HAS_TESTING=true
fi
if grep -q "github.com/onsi/gomega" $f; then
echo "$f imports github.com/onsi/gomega"
HAS_TESTING=true
fi
done
if "$HAS_TESTING"; then
exit 1
fi
exit 0
golang-github-lucas-clemente-quic-go-0.46.0/.github/workflows/unit.yml 0000664 0000000 0000000 00000004341 14656644531 0025710 0 ustar 00root root 0000000 0000000 on: [push, pull_request]
jobs:
unit:
strategy:
fail-fast: false
matrix:
os: [ "ubuntu", "windows", "macos" ]
go: [ "1.21.x", "1.22.x", "1.23.0-rc.2" ]
runs-on: ${{ fromJSON(vars[format('UNIT_RUNNER_{0}', matrix.os)] || format('"{0}-latest"', matrix.os)) }}
name: Unit tests (${{ matrix.os}}, Go ${{ matrix.go }})
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
- run: go version
- name: Run tests
env:
TIMESCALE_FACTOR: 10
run: go run github.com/onsi/ginkgo/v2/ginkgo -r -v -cover -randomize-all -randomize-suites -trace -skip-package integrationtests
- name: Run tests as root
if: ${{ matrix.os == 'ubuntu' }}
env:
TIMESCALE_FACTOR: 10
FILE: sys_conn_helper_linux_test.go
run: |
test -f $FILE # make sure the file actually exists
go run github.com/onsi/ginkgo/v2/ginkgo build -cover -tags root .
sudo ./quic-go.test -ginkgo.v -ginkgo.trace -ginkgo.randomize-all -ginkgo.focus-file=$FILE -test.coverprofile coverage-root.txt
rm quic-go.test
- name: Run tests (32 bit)
if: ${{ matrix.os != 'macos' }} # can't run 32 bit tests on OSX.
env:
TIMESCALE_FACTOR: 10
GOARCH: 386
run: go run github.com/onsi/ginkgo/v2/ginkgo -r -v -cover -coverprofile coverage.txt -output-dir . -randomize-all -randomize-suites -trace -skip-package integrationtests
- name: Run tests with race detector
if: ${{ matrix.os == 'ubuntu' }} # speed things up. Windows and OSX VMs are slow
env:
TIMESCALE_FACTOR: 20
run: go run github.com/onsi/ginkgo/v2/ginkgo -r -v -race -randomize-all -randomize-suites -trace -skip-package integrationtests
- name: Run benchmark tests
run: go test -v -run=^$ -benchtime 0.5s -bench=. $(go list ./... | grep -v integrationtests/self)
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
with:
files: coverage.txt,coverage-root.txt
env_vars: OS=${{ matrix.os }}, GO=${{ matrix.go }}
token: ${{ secrets.CODECOV_TOKEN }}
golang-github-lucas-clemente-quic-go-0.46.0/.gitignore 0000664 0000000 0000000 00000000332 14656644531 0022575 0 ustar 00root root 0000000 0000000 debug
debug.test
main
mockgen_tmp.go
*.qtr
*.qlog
*.sqlog
*.txt
race.[0-9]*
fuzzing/*/*.zip
fuzzing/*/coverprofile
fuzzing/*/crashers
fuzzing/*/sonarprofile
fuzzing/*/suppressions
fuzzing/*/corpus/
gomock_reflect_*/
golang-github-lucas-clemente-quic-go-0.46.0/.golangci.yml 0000664 0000000 0000000 00000001463 14656644531 0023177 0 ustar 00root root 0000000 0000000 linters-settings:
misspell:
ignore-words:
- ect
depguard:
rules:
quicvarint:
list-mode: strict
files:
- "**/github.com/quic-go/quic-go/quicvarint/*"
- "!$test"
allow:
- $gostd
linters:
disable-all: true
enable:
- asciicheck
- depguard
- exhaustive
- exportloopref
- goimports
- gofmt # redundant, since gofmt *should* be a no-op after gofumpt
- gofumpt
- gosimple
- govet
- ineffassign
- misspell
- prealloc
- staticcheck
- stylecheck
- unconvert
- unparam
- unused
issues:
exclude-files:
- internal/handshake/cipher_suite.go
exclude-rules:
- path: internal/qtls
linters:
- depguard
- path: _test\.go
linters:
- exhaustive
golang-github-lucas-clemente-quic-go-0.46.0/Changelog.md 0000664 0000000 0000000 00000010613 14656644531 0023021 0 ustar 00root root 0000000 0000000 # Changelog
## v0.22.0 (2021-07-25)
- Use `ReadBatch` to read multiple UDP packets from the socket with a single syscall
- Add a config option (`Config.DisableVersionNegotiationPackets`) to disable sending of Version Negotiation packets
- Drop support for QUIC draft versions 32 and 34
- Remove the `RetireBugBackwardsCompatibilityMode`, which was intended to mitigate a bug when retiring connection IDs in quic-go in v0.17.2 and ealier
## v0.21.2 (2021-07-15)
- Update qtls (for Go 1.15, 1.16 and 1.17rc1) to include the fix for the crypto/tls panic (see https://groups.google.com/g/golang-dev/c/5LJ2V7rd-Ag/m/YGLHVBZ6AAAJ for details)
## v0.21.0 (2021-06-01)
- quic-go now supports RFC 9000!
## v0.20.0 (2021-03-19)
- Remove the `quic.Config.HandshakeTimeout`. Introduce a `quic.Config.HandshakeIdleTimeout`.
## v0.17.1 (2020-06-20)
- Supports QUIC WG draft-29.
- Improve bundling of ACK frames (#2543).
## v0.16.0 (2020-05-31)
- Supports QUIC WG draft-28.
## v0.15.0 (2020-03-01)
- Supports QUIC WG draft-27.
- Add support for 0-RTT.
- Remove `Session.Close()`. Applications need to pass an application error code to the transport using `Session.CloseWithError()`.
- Make the TLS Cipher Suites configurable (via `tls.Config.CipherSuites`).
## v0.14.0 (2019-12-04)
- Supports QUIC WG draft-24.
## v0.13.0 (2019-11-05)
- Supports QUIC WG draft-23.
- Add an `EarlyListener` that allows sending of 0.5-RTT data.
- Add a `TokenStore` to store address validation tokens.
- Issue and use new connection IDs during a connection.
## v0.12.0 (2019-08-05)
- Implement HTTP/3.
- Rename `quic.Cookie` to `quic.Token` and `quic.Config.AcceptCookie` to `quic.Config.AcceptToken`.
- Distinguish between Retry tokens and tokens sent in NEW_TOKEN frames.
- Enforce application protocol negotiation (via `tls.Config.NextProtos`).
- Use a varint for error codes.
- Add support for [quic-trace](https://github.com/google/quic-trace).
- Add a context to `Listener.Accept`, `Session.Accept{Uni}Stream` and `Session.Open{Uni}StreamSync`.
- Implement TLS key updates.
## v0.11.0 (2019-04-05)
- Drop support for gQUIC. For qQUIC support, please switch to the *gquic* branch.
- Implement QUIC WG draft-19.
- Use [qtls](https://github.com/marten-seemann/qtls) for TLS 1.3.
- Return a `tls.ConnectionState` from `quic.Session.ConnectionState()`.
- Remove the error return values from `quic.Stream.CancelRead()` and `quic.Stream.CancelWrite()`
## v0.10.0 (2018-08-28)
- Add support for QUIC 44, drop support for QUIC 42.
## v0.9.0 (2018-08-15)
- Add a `quic.Config` option for the length of the connection ID (for IETF QUIC).
- Split Session.Close into one method for regular closing and one for closing with an error.
## v0.8.0 (2018-06-26)
- Add support for unidirectional streams (for IETF QUIC).
- Add a `quic.Config` option for the maximum number of incoming streams.
- Add support for QUIC 42 and 43.
- Add dial functions that use a context.
- Multiplex clients on a net.PacketConn, when using Dial(conn).
## v0.7.0 (2018-02-03)
- The lower boundary for packets included in ACKs is now derived, and the value sent in STOP_WAITING frames is ignored.
- Remove `DialNonFWSecure` and `DialAddrNonFWSecure`.
- Expose the `ConnectionState` in the `Session` (experimental API).
- Implement packet pacing.
## v0.6.0 (2017-12-12)
- Add support for QUIC 39, drop support for QUIC 35 - 37
- Added `quic.Config` options for maximal flow control windows
- Add a `quic.Config` option for QUIC versions
- Add a `quic.Config` option to request omission of the connection ID from a server
- Add a `quic.Config` option to configure the source address validation
- Add a `quic.Config` option to configure the handshake timeout
- Add a `quic.Config` option to configure the idle timeout
- Add a `quic.Config` option to configure keep-alive
- Rename the STK to Cookie
- Implement `net.Conn`-style deadlines for streams
- Remove the `tls.Config` from the `quic.Config`. The `tls.Config` must now be passed to the `Dial` and `Listen` functions as a separate parameter. See the [Godoc](https://godoc.org/github.com/quic-go/quic-go) for details.
- Changed the log level environment variable to only accept strings ("DEBUG", "INFO", "ERROR"), see [the wiki](https://github.com/quic-go/quic-go/wiki/Logging) for more details.
- Rename the `h2quic.QuicRoundTripper` to `h2quic.RoundTripper`
- Changed `h2quic.Server.Serve()` to accept a `net.PacketConn`
- Drop support for Go 1.7 and 1.8.
- Various bugfixes
golang-github-lucas-clemente-quic-go-0.46.0/LICENSE 0000664 0000000 0000000 00000002103 14656644531 0021610 0 ustar 00root root 0000000 0000000 MIT License
Copyright (c) 2016 the quic-go authors & Google, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
golang-github-lucas-clemente-quic-go-0.46.0/README.md 0000664 0000000 0000000 00000017627 14656644531 0022103 0 ustar 00root root 0000000 0000000 # A QUIC implementation in pure Go
[](https://quic-go.net/docs/)
[](https://pkg.go.dev/github.com/quic-go/quic-go)
[](https://codecov.io/gh/quic-go/quic-go/)
[](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:quic-go)
quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go. It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)) and HTTP Datagrams ([RFC 9297](https://datatracker.ietf.org/doc/html/rfc9297)).
In addition to these base RFCs, it also implements the following RFCs:
* Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221))
* Datagram Packetization Layer Path MTU Discovery (DPLPMTUD, [RFC 8899](https://datatracker.ietf.org/doc/html/rfc8899))
* QUIC Version 2 ([RFC 9369](https://datatracker.ietf.org/doc/html/rfc9369))
* QUIC Event Logging using qlog ([draft-ietf-quic-qlog-main-schema](https://datatracker.ietf.org/doc/draft-ietf-quic-qlog-main-schema/) and [draft-ietf-quic-qlog-quic-events](https://datatracker.ietf.org/doc/draft-ietf-quic-qlog-quic-events/))
Support for WebTransport over HTTP/3 ([draft-ietf-webtrans-http3](https://datatracker.ietf.org/doc/draft-ietf-webtrans-http3/)) is implemented in [webtransport-go](https://github.com/quic-go/webtransport-go).
Detailed documentation can be found on [quic-go.net](https://quic-go.net/docs/).
## Projects using quic-go
| Project | Description | Stars |
| ---------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- |
| [AdGuardHome](https://github.com/AdguardTeam/AdGuardHome) | Free and open source, powerful network-wide ads & trackers blocking DNS server. |  |
| [algernon](https://github.com/xyproto/algernon) | Small self-contained pure-Go web server with Lua, Markdown, HTTP/2, QUIC, Redis and PostgreSQL support |  |
| [caddy](https://github.com/caddyserver/caddy/) | Fast, multi-platform web server with automatic HTTPS |  |
| [cloudflared](https://github.com/cloudflare/cloudflared) | A tunneling daemon that proxies traffic from the Cloudflare network to your origins |  |
| [frp](https://github.com/fatedier/frp) | A fast reverse proxy to help you expose a local server behind a NAT or firewall to the internet |  |
| [go-libp2p](https://github.com/libp2p/go-libp2p) | libp2p implementation in Go, powering [Kubo](https://github.com/ipfs/kubo) (IPFS) and [Lotus](https://github.com/filecoin-project/lotus) (Filecoin), among others |  |
| [gost](https://github.com/go-gost/gost) | A simple security tunnel written in Go |  |
| [Hysteria](https://github.com/apernet/hysteria) | A powerful, lightning fast and censorship resistant proxy |  |
| [Mercure](https://github.com/dunglas/mercure) | An open, easy, fast, reliable and battery-efficient solution for real-time communications |  |
| [OONI Probe](https://github.com/ooni/probe-cli) | Next generation OONI Probe. Library and CLI tool. |  |
| [RoadRunner](https://github.com/roadrunner-server/roadrunner) | High-performance PHP application server, process manager written in Go and powered with plugins |  |
| [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization |  |
| [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy |  |
| [v2ray-core](https://github.com/v2fly/v2ray-core) | A platform for building proxies to bypass network restrictions |  |
| [YoMo](https://github.com/yomorun/yomo) | Streaming Serverless Framework for Geo-distributed System |  |
If you'd like to see your project added to this list, please send us a PR.
## Release Policy
quic-go always aims to support the latest two Go releases.
## Contributing
We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/quic-go/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment.
golang-github-lucas-clemente-quic-go-0.46.0/SECURITY.md 0000664 0000000 0000000 00000001507 14656644531 0022403 0 ustar 00root root 0000000 0000000 # Security Policy
quic-go still in development. This means that there may be problems in our protocols,
or there may be mistakes in our implementations.
We take security vulnerabilities very seriously. If you discover a security issue,
please bring it to our attention right away!
## Reporting a Vulnerability
If you find a vulnerability that may affect live deployments -- for example, by exposing
a remote execution exploit -- please [**report privately**](https://github.com/quic-go/quic-go/security/advisories/new).
Please **DO NOT file a public issue**.
If the issue is an implementation weakness that cannot be immediately exploited or
something not yet deployed, just discuss it openly.
## Reporting a non security bug
For non-security bugs, please simply file a GitHub [issue](https://github.com/quic-go/quic-go/issues/new).
golang-github-lucas-clemente-quic-go-0.46.0/buffer_pool.go 0000664 0000000 0000000 00000004324 14656644531 0023443 0 ustar 00root root 0000000 0000000 package quic
import (
"sync"
"github.com/quic-go/quic-go/internal/protocol"
)
type packetBuffer struct {
Data []byte
// refCount counts how many packets Data is used in.
// It doesn't support concurrent use.
// It is > 1 when used for coalesced packet.
refCount int
}
// Split increases the refCount.
// It must be called when a packet buffer is used for more than one packet,
// e.g. when splitting coalesced packets.
func (b *packetBuffer) Split() {
b.refCount++
}
// Decrement decrements the reference counter.
// It doesn't put the buffer back into the pool.
func (b *packetBuffer) Decrement() {
b.refCount--
if b.refCount < 0 {
panic("negative packetBuffer refCount")
}
}
// MaybeRelease puts the packet buffer back into the pool,
// if the reference counter already reached 0.
func (b *packetBuffer) MaybeRelease() {
// only put the packetBuffer back if it's not used any more
if b.refCount == 0 {
b.putBack()
}
}
// Release puts back the packet buffer into the pool.
// It should be called when processing is definitely finished.
func (b *packetBuffer) Release() {
b.Decrement()
if b.refCount != 0 {
panic("packetBuffer refCount not zero")
}
b.putBack()
}
// Len returns the length of Data
func (b *packetBuffer) Len() protocol.ByteCount { return protocol.ByteCount(len(b.Data)) }
func (b *packetBuffer) Cap() protocol.ByteCount { return protocol.ByteCount(cap(b.Data)) }
func (b *packetBuffer) putBack() {
if cap(b.Data) == protocol.MaxPacketBufferSize {
bufferPool.Put(b)
return
}
if cap(b.Data) == protocol.MaxLargePacketBufferSize {
largeBufferPool.Put(b)
return
}
panic("putPacketBuffer called with packet of wrong size!")
}
var bufferPool, largeBufferPool sync.Pool
func getPacketBuffer() *packetBuffer {
buf := bufferPool.Get().(*packetBuffer)
buf.refCount = 1
buf.Data = buf.Data[:0]
return buf
}
func getLargePacketBuffer() *packetBuffer {
buf := largeBufferPool.Get().(*packetBuffer)
buf.refCount = 1
buf.Data = buf.Data[:0]
return buf
}
func init() {
bufferPool.New = func() any {
return &packetBuffer{Data: make([]byte, 0, protocol.MaxPacketBufferSize)}
}
largeBufferPool.New = func() any {
return &packetBuffer{Data: make([]byte, 0, protocol.MaxLargePacketBufferSize)}
}
}
golang-github-lucas-clemente-quic-go-0.46.0/buffer_pool_test.go 0000664 0000000 0000000 00000002604 14656644531 0024501 0 ustar 00root root 0000000 0000000 package quic
import (
"github.com/quic-go/quic-go/internal/protocol"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Buffer Pool", func() {
It("returns buffers of cap", func() {
buf1 := getPacketBuffer()
Expect(buf1.Data).To(HaveCap(protocol.MaxPacketBufferSize))
buf2 := getLargePacketBuffer()
Expect(buf2.Data).To(HaveCap(protocol.MaxLargePacketBufferSize))
})
It("releases buffers", func() {
buf1 := getPacketBuffer()
buf1.Release()
buf2 := getLargePacketBuffer()
buf2.Release()
})
It("gets the length", func() {
buf := getPacketBuffer()
buf.Data = append(buf.Data, []byte("foobar")...)
Expect(buf.Len()).To(BeEquivalentTo(6))
})
It("panics if wrong-sized buffers are passed", func() {
buf := getPacketBuffer()
buf.Data = make([]byte, 10)
Expect(func() { buf.Release() }).To(Panic())
})
It("panics if it is released twice", func() {
buf := getPacketBuffer()
buf.Release()
Expect(func() { buf.Release() }).To(Panic())
})
It("panics if it is decremented too many times", func() {
buf := getPacketBuffer()
buf.Decrement()
Expect(func() { buf.Decrement() }).To(Panic())
})
It("waits until all parts have been released", func() {
buf := getPacketBuffer()
buf.Split()
buf.Split()
// now we have 3 parts
buf.Decrement()
buf.Decrement()
buf.Decrement()
Expect(func() { buf.Decrement() }).To(Panic())
})
})
golang-github-lucas-clemente-quic-go-0.46.0/client.go 0000664 0000000 0000000 00000016133 14656644531 0022420 0 ustar 00root root 0000000 0000000 package quic
import (
"context"
"crypto/tls"
"errors"
"net"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/logging"
)
type client struct {
sendConn sendConn
use0RTT bool
packetHandlers packetHandlerManager
onClose func()
tlsConf *tls.Config
config *Config
connIDGenerator ConnectionIDGenerator
srcConnID protocol.ConnectionID
destConnID protocol.ConnectionID
initialPacketNumber protocol.PacketNumber
hasNegotiatedVersion bool
version protocol.Version
handshakeChan chan struct{}
conn quicConn
tracer *logging.ConnectionTracer
tracingID ConnectionTracingID
logger utils.Logger
}
// make it possible to mock connection ID for initial generation in the tests
var generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial
// DialAddr establishes a new QUIC connection to a server.
// It resolves the address, and then creates a new UDP connection to dial the QUIC server.
// When the QUIC connection is closed, this UDP connection is closed.
// See Dial for more details.
func DialAddr(ctx context.Context, addr string, tlsConf *tls.Config, conf *Config) (Connection, error) {
udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
if err != nil {
return nil, err
}
udpAddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return nil, err
}
tr, err := setupTransport(udpConn, tlsConf, true)
if err != nil {
return nil, err
}
return tr.dial(ctx, udpAddr, addr, tlsConf, conf, false)
}
// DialAddrEarly establishes a new 0-RTT QUIC connection to a server.
// See DialAddr for more details.
func DialAddrEarly(ctx context.Context, addr string, tlsConf *tls.Config, conf *Config) (EarlyConnection, error) {
udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
if err != nil {
return nil, err
}
udpAddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return nil, err
}
tr, err := setupTransport(udpConn, tlsConf, true)
if err != nil {
return nil, err
}
conn, err := tr.dial(ctx, udpAddr, addr, tlsConf, conf, true)
if err != nil {
tr.Close()
return nil, err
}
return conn, nil
}
// DialEarly establishes a new 0-RTT QUIC connection to a server using a net.PacketConn.
// See Dial for more details.
func DialEarly(ctx context.Context, c net.PacketConn, addr net.Addr, tlsConf *tls.Config, conf *Config) (EarlyConnection, error) {
dl, err := setupTransport(c, tlsConf, false)
if err != nil {
return nil, err
}
conn, err := dl.DialEarly(ctx, addr, tlsConf, conf)
if err != nil {
dl.Close()
return nil, err
}
return conn, nil
}
// Dial establishes a new QUIC connection to a server using a net.PacketConn.
// If the PacketConn satisfies the OOBCapablePacketConn interface (as a net.UDPConn does),
// ECN and packet info support will be enabled. In this case, ReadMsgUDP and WriteMsgUDP
// will be used instead of ReadFrom and WriteTo to read/write packets.
// The tls.Config must define an application protocol (using NextProtos).
//
// This is a convenience function. More advanced use cases should instantiate a Transport,
// which offers configuration options for a more fine-grained control of the connection establishment,
// including reusing the underlying UDP socket for multiple QUIC connections.
func Dial(ctx context.Context, c net.PacketConn, addr net.Addr, tlsConf *tls.Config, conf *Config) (Connection, error) {
dl, err := setupTransport(c, tlsConf, false)
if err != nil {
return nil, err
}
conn, err := dl.Dial(ctx, addr, tlsConf, conf)
if err != nil {
dl.Close()
return nil, err
}
return conn, nil
}
func setupTransport(c net.PacketConn, tlsConf *tls.Config, createdPacketConn bool) (*Transport, error) {
if tlsConf == nil {
return nil, errors.New("quic: tls.Config not set")
}
return &Transport{
Conn: c,
createdConn: createdPacketConn,
isSingleUse: true,
}, nil
}
func dial(
ctx context.Context,
conn sendConn,
connIDGenerator ConnectionIDGenerator,
packetHandlers packetHandlerManager,
tlsConf *tls.Config,
config *Config,
onClose func(),
use0RTT bool,
) (quicConn, error) {
c, err := newClient(conn, connIDGenerator, config, tlsConf, onClose, use0RTT)
if err != nil {
return nil, err
}
c.packetHandlers = packetHandlers
c.tracingID = nextConnTracingID()
if c.config.Tracer != nil {
c.tracer = c.config.Tracer(context.WithValue(ctx, ConnectionTracingKey, c.tracingID), protocol.PerspectiveClient, c.destConnID)
}
if c.tracer != nil && c.tracer.StartedConnection != nil {
c.tracer.StartedConnection(c.sendConn.LocalAddr(), c.sendConn.RemoteAddr(), c.srcConnID, c.destConnID)
}
if err := c.dial(ctx); err != nil {
return nil, err
}
return c.conn, nil
}
func newClient(sendConn sendConn, connIDGenerator ConnectionIDGenerator, config *Config, tlsConf *tls.Config, onClose func(), use0RTT bool) (*client, error) {
srcConnID, err := connIDGenerator.GenerateConnectionID()
if err != nil {
return nil, err
}
destConnID, err := generateConnectionIDForInitial()
if err != nil {
return nil, err
}
c := &client{
connIDGenerator: connIDGenerator,
srcConnID: srcConnID,
destConnID: destConnID,
sendConn: sendConn,
use0RTT: use0RTT,
onClose: onClose,
tlsConf: tlsConf,
config: config,
version: config.Versions[0],
handshakeChan: make(chan struct{}),
logger: utils.DefaultLogger.WithPrefix("client"),
}
return c, nil
}
func (c *client) dial(ctx context.Context) error {
c.logger.Infof("Starting new connection to %s (%s -> %s), source connection ID %s, destination connection ID %s, version %s", c.tlsConf.ServerName, c.sendConn.LocalAddr(), c.sendConn.RemoteAddr(), c.srcConnID, c.destConnID, c.version)
c.conn = newClientConnection(
context.WithValue(context.WithoutCancel(ctx), ConnectionTracingKey, c.tracingID),
c.sendConn,
c.packetHandlers,
c.destConnID,
c.srcConnID,
c.connIDGenerator,
c.config,
c.tlsConf,
c.initialPacketNumber,
c.use0RTT,
c.hasNegotiatedVersion,
c.tracer,
c.logger,
c.version,
)
c.packetHandlers.Add(c.srcConnID, c.conn)
errorChan := make(chan error, 1)
recreateChan := make(chan errCloseForRecreating)
go func() {
err := c.conn.run()
var recreateErr *errCloseForRecreating
if errors.As(err, &recreateErr) {
recreateChan <- *recreateErr
return
}
if c.onClose != nil {
c.onClose()
}
errorChan <- err // returns as soon as the connection is closed
}()
// only set when we're using 0-RTT
// Otherwise, earlyConnChan will be nil. Receiving from a nil chan blocks forever.
var earlyConnChan <-chan struct{}
if c.use0RTT {
earlyConnChan = c.conn.earlyConnReady()
}
select {
case <-ctx.Done():
c.conn.destroy(nil)
return context.Cause(ctx)
case err := <-errorChan:
return err
case recreateErr := <-recreateChan:
c.initialPacketNumber = recreateErr.nextPacketNumber
c.version = recreateErr.nextVersion
c.hasNegotiatedVersion = true
return c.dial(ctx)
case <-earlyConnChan:
// ready to send 0-RTT data
return nil
case <-c.conn.HandshakeComplete():
// handshake successfully completed
return nil
}
}
golang-github-lucas-clemente-quic-go-0.46.0/client_test.go 0000664 0000000 0000000 00000025617 14656644531 0023466 0 ustar 00root root 0000000 0000000 package quic
import (
"context"
"crypto/tls"
"errors"
"net"
"time"
mocklogging "github.com/quic-go/quic-go/internal/mocks/logging"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/logging"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"go.uber.org/mock/gomock"
)
type nullMultiplexer struct{}
func (n nullMultiplexer) AddConn(indexableConn) {}
func (n nullMultiplexer) RemoveConn(indexableConn) error { return nil }
var _ = Describe("Client", func() {
var (
cl *client
packetConn *MockSendConn
connID protocol.ConnectionID
origMultiplexer multiplexer
tlsConf *tls.Config
tracer *mocklogging.MockConnectionTracer
config *Config
originalClientConnConstructor func(
ctx context.Context,
conn sendConn,
runner connRunner,
destConnID protocol.ConnectionID,
srcConnID protocol.ConnectionID,
connIDGenerator ConnectionIDGenerator,
conf *Config,
tlsConf *tls.Config,
initialPacketNumber protocol.PacketNumber,
enable0RTT bool,
hasNegotiatedVersion bool,
tracer *logging.ConnectionTracer,
logger utils.Logger,
v protocol.Version,
) quicConn
)
BeforeEach(func() {
tlsConf = &tls.Config{NextProtos: []string{"proto1"}}
connID = protocol.ParseConnectionID([]byte{0, 0, 0, 0, 0, 0, 0x13, 0x37})
originalClientConnConstructor = newClientConnection
var tr *logging.ConnectionTracer
tr, tracer = mocklogging.NewMockConnectionTracer(mockCtrl)
config = &Config{
Tracer: func(ctx context.Context, perspective logging.Perspective, id ConnectionID) *logging.ConnectionTracer {
return tr
},
Versions: []protocol.Version{protocol.Version1},
}
Eventually(areConnsRunning).Should(BeFalse())
packetConn = NewMockSendConn(mockCtrl)
packetConn.EXPECT().LocalAddr().Return(&net.UDPAddr{}).AnyTimes()
packetConn.EXPECT().RemoteAddr().Return(&net.UDPAddr{}).AnyTimes()
cl = &client{
srcConnID: connID,
destConnID: connID,
version: protocol.Version1,
sendConn: packetConn,
tracer: tr,
logger: utils.DefaultLogger,
}
getMultiplexer() // make the sync.Once execute
// replace the clientMuxer. getMultiplexer will now return the nullMultiplexer
origMultiplexer = connMuxer
connMuxer = &nullMultiplexer{}
})
AfterEach(func() {
connMuxer = origMultiplexer
newClientConnection = originalClientConnConstructor
})
AfterEach(func() {
if s, ok := cl.conn.(*connection); ok {
s.destroy(nil)
}
Eventually(areConnsRunning).Should(BeFalse())
})
Context("Dialing", func() {
var origGenerateConnectionIDForInitial func() (protocol.ConnectionID, error)
BeforeEach(func() {
origGenerateConnectionIDForInitial = generateConnectionIDForInitial
generateConnectionIDForInitial = func() (protocol.ConnectionID, error) {
return connID, nil
}
})
AfterEach(func() {
generateConnectionIDForInitial = origGenerateConnectionIDForInitial
})
It("returns after the handshake is complete", func() {
manager := NewMockPacketHandlerManager(mockCtrl)
manager.EXPECT().Add(gomock.Any(), gomock.Any())
run := make(chan struct{})
newClientConnection = func(
_ context.Context,
_ sendConn,
_ connRunner,
_ protocol.ConnectionID,
_ protocol.ConnectionID,
_ ConnectionIDGenerator,
_ *Config,
_ *tls.Config,
_ protocol.PacketNumber,
enable0RTT bool,
_ bool,
_ *logging.ConnectionTracer,
_ utils.Logger,
_ protocol.Version,
) quicConn {
Expect(enable0RTT).To(BeFalse())
conn := NewMockQUICConn(mockCtrl)
conn.EXPECT().run().Do(func() error { close(run); return nil })
c := make(chan struct{})
close(c)
conn.EXPECT().HandshakeComplete().Return(c)
return conn
}
cl, err := newClient(packetConn, &protocol.DefaultConnectionIDGenerator{}, populateConfig(config), tlsConf, nil, false)
Expect(err).ToNot(HaveOccurred())
cl.packetHandlers = manager
Expect(cl).ToNot(BeNil())
Expect(cl.dial(context.Background())).To(Succeed())
Eventually(run).Should(BeClosed())
})
It("returns early connections", func() {
manager := NewMockPacketHandlerManager(mockCtrl)
manager.EXPECT().Add(gomock.Any(), gomock.Any())
readyChan := make(chan struct{})
done := make(chan struct{})
newClientConnection = func(
_ context.Context,
_ sendConn,
runner connRunner,
_ protocol.ConnectionID,
_ protocol.ConnectionID,
_ ConnectionIDGenerator,
_ *Config,
_ *tls.Config,
_ protocol.PacketNumber,
enable0RTT bool,
_ bool,
_ *logging.ConnectionTracer,
_ utils.Logger,
_ protocol.Version,
) quicConn {
Expect(enable0RTT).To(BeTrue())
conn := NewMockQUICConn(mockCtrl)
conn.EXPECT().run().Do(func() error { close(done); return nil })
conn.EXPECT().HandshakeComplete().Return(make(chan struct{}))
conn.EXPECT().earlyConnReady().Return(readyChan)
return conn
}
cl, err := newClient(packetConn, &protocol.DefaultConnectionIDGenerator{}, populateConfig(config), tlsConf, nil, true)
Expect(err).ToNot(HaveOccurred())
cl.packetHandlers = manager
Expect(cl).ToNot(BeNil())
Expect(cl.dial(context.Background())).To(Succeed())
Eventually(done).Should(BeClosed())
})
It("returns an error that occurs while waiting for the handshake to complete", func() {
manager := NewMockPacketHandlerManager(mockCtrl)
manager.EXPECT().Add(gomock.Any(), gomock.Any())
testErr := errors.New("early handshake error")
newClientConnection = func(
_ context.Context,
_ sendConn,
_ connRunner,
_ protocol.ConnectionID,
_ protocol.ConnectionID,
_ ConnectionIDGenerator,
_ *Config,
_ *tls.Config,
_ protocol.PacketNumber,
_ bool,
_ bool,
_ *logging.ConnectionTracer,
_ utils.Logger,
_ protocol.Version,
) quicConn {
conn := NewMockQUICConn(mockCtrl)
conn.EXPECT().run().Return(testErr)
conn.EXPECT().HandshakeComplete().Return(make(chan struct{}))
conn.EXPECT().earlyConnReady().Return(make(chan struct{}))
return conn
}
var closed bool
cl, err := newClient(packetConn, &protocol.DefaultConnectionIDGenerator{}, populateConfig(config), tlsConf, func() { closed = true }, true)
Expect(err).ToNot(HaveOccurred())
cl.packetHandlers = manager
Expect(cl).ToNot(BeNil())
Expect(cl.dial(context.Background())).To(MatchError(testErr))
Expect(closed).To(BeTrue())
})
Context("quic.Config", func() {
It("setups with the right values", func() {
tokenStore := NewLRUTokenStore(10, 4)
config := &Config{
HandshakeIdleTimeout: 1337 * time.Minute,
MaxIdleTimeout: 42 * time.Hour,
MaxIncomingStreams: 1234,
MaxIncomingUniStreams: 4321,
TokenStore: tokenStore,
EnableDatagrams: true,
}
c := populateConfig(config)
Expect(c.HandshakeIdleTimeout).To(Equal(1337 * time.Minute))
Expect(c.MaxIdleTimeout).To(Equal(42 * time.Hour))
Expect(c.MaxIncomingStreams).To(BeEquivalentTo(1234))
Expect(c.MaxIncomingUniStreams).To(BeEquivalentTo(4321))
Expect(c.TokenStore).To(Equal(tokenStore))
Expect(c.EnableDatagrams).To(BeTrue())
})
It("disables bidirectional streams", func() {
config := &Config{
MaxIncomingStreams: -1,
MaxIncomingUniStreams: 4321,
}
c := populateConfig(config)
Expect(c.MaxIncomingStreams).To(BeZero())
Expect(c.MaxIncomingUniStreams).To(BeEquivalentTo(4321))
})
It("disables unidirectional streams", func() {
config := &Config{
MaxIncomingStreams: 1234,
MaxIncomingUniStreams: -1,
}
c := populateConfig(config)
Expect(c.MaxIncomingStreams).To(BeEquivalentTo(1234))
Expect(c.MaxIncomingUniStreams).To(BeZero())
})
It("fills in default values if options are not set in the Config", func() {
c := populateConfig(&Config{})
Expect(c.Versions).To(Equal(protocol.SupportedVersions))
Expect(c.HandshakeIdleTimeout).To(Equal(protocol.DefaultHandshakeIdleTimeout))
Expect(c.MaxIdleTimeout).To(Equal(protocol.DefaultIdleTimeout))
})
})
It("creates new connections with the right parameters", func() {
config := &Config{Versions: []protocol.Version{protocol.Version1}}
c := make(chan struct{})
var version protocol.Version
var conf *Config
done := make(chan struct{})
newClientConnection = func(
_ context.Context,
connP sendConn,
_ connRunner,
_ protocol.ConnectionID,
_ protocol.ConnectionID,
_ ConnectionIDGenerator,
configP *Config,
_ *tls.Config,
_ protocol.PacketNumber,
_ bool,
_ bool,
_ *logging.ConnectionTracer,
_ utils.Logger,
versionP protocol.Version,
) quicConn {
version = versionP
conf = configP
close(c)
// TODO: check connection IDs?
conn := NewMockQUICConn(mockCtrl)
conn.EXPECT().run()
conn.EXPECT().HandshakeComplete().Return(make(chan struct{}))
conn.EXPECT().destroy(gomock.Any()).MaxTimes(1)
close(done)
return conn
}
packetConn := NewMockPacketConn(mockCtrl)
packetConn.EXPECT().ReadFrom(gomock.Any()).DoAndReturn(func([]byte) (int, net.Addr, error) {
<-done
return 0, nil, errors.New("closed")
})
packetConn.EXPECT().LocalAddr()
packetConn.EXPECT().SetReadDeadline(gomock.Any()).AnyTimes()
_, err := Dial(context.Background(), packetConn, &net.UDPAddr{}, tlsConf, config)
Expect(err).ToNot(HaveOccurred())
Eventually(c).Should(BeClosed())
Expect(version).To(Equal(config.Versions[0]))
Expect(conf.Versions).To(Equal(config.Versions))
})
It("creates a new connections after version negotiation", func() {
var counter int
newClientConnection = func(
_ context.Context,
_ sendConn,
runner connRunner,
_ protocol.ConnectionID,
connID protocol.ConnectionID,
_ ConnectionIDGenerator,
configP *Config,
_ *tls.Config,
pn protocol.PacketNumber,
_ bool,
hasNegotiatedVersion bool,
_ *logging.ConnectionTracer,
_ utils.Logger,
versionP protocol.Version,
) quicConn {
conn := NewMockQUICConn(mockCtrl)
conn.EXPECT().HandshakeComplete().Return(make(chan struct{}))
if counter == 0 {
Expect(pn).To(BeZero())
Expect(hasNegotiatedVersion).To(BeFalse())
conn.EXPECT().run().DoAndReturn(func() error {
runner.Remove(connID)
return &errCloseForRecreating{
nextPacketNumber: 109,
nextVersion: 789,
}
})
} else {
Expect(pn).To(Equal(protocol.PacketNumber(109)))
Expect(hasNegotiatedVersion).To(BeTrue())
conn.EXPECT().run()
conn.EXPECT().destroy(gomock.Any())
}
counter++
return conn
}
config := &Config{Tracer: config.Tracer, Versions: []protocol.Version{protocol.Version1}}
tracer.EXPECT().StartedConnection(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
_, err := DialAddr(context.Background(), "localhost:7890", tlsConf, config)
Expect(err).ToNot(HaveOccurred())
Expect(counter).To(Equal(2))
})
})
})
golang-github-lucas-clemente-quic-go-0.46.0/closed_conn.go 0000664 0000000 0000000 00000003410 14656644531 0023422 0 ustar 00root root 0000000 0000000 package quic
import (
"math/bits"
"net"
"github.com/quic-go/quic-go/internal/utils"
)
// A closedLocalConn is a connection that we closed locally.
// When receiving packets for such a connection, we need to retransmit the packet containing the CONNECTION_CLOSE frame,
// with an exponential backoff.
type closedLocalConn struct {
counter uint32
logger utils.Logger
sendPacket func(net.Addr, packetInfo)
}
var _ packetHandler = &closedLocalConn{}
// newClosedLocalConn creates a new closedLocalConn and runs it.
func newClosedLocalConn(sendPacket func(net.Addr, packetInfo), logger utils.Logger) packetHandler {
return &closedLocalConn{
sendPacket: sendPacket,
logger: logger,
}
}
func (c *closedLocalConn) handlePacket(p receivedPacket) {
c.counter++
// exponential backoff
// only send a CONNECTION_CLOSE for the 1st, 2nd, 4th, 8th, 16th, ... packet arriving
if bits.OnesCount32(c.counter) != 1 {
return
}
c.logger.Debugf("Received %d packets after sending CONNECTION_CLOSE. Retransmitting.", c.counter)
c.sendPacket(p.remoteAddr, p.info)
}
func (c *closedLocalConn) destroy(error) {}
func (c *closedLocalConn) closeWithTransportError(TransportErrorCode) {}
// A closedRemoteConn is a connection that was closed remotely.
// For such a connection, we might receive reordered packets that were sent before the CONNECTION_CLOSE.
// We can just ignore those packets.
type closedRemoteConn struct{}
var _ packetHandler = &closedRemoteConn{}
func newClosedRemoteConn() packetHandler {
return &closedRemoteConn{}
}
func (c *closedRemoteConn) handlePacket(receivedPacket) {}
func (c *closedRemoteConn) destroy(error) {}
func (c *closedRemoteConn) closeWithTransportError(TransportErrorCode) {}
golang-github-lucas-clemente-quic-go-0.46.0/closed_conn_test.go 0000664 0000000 0000000 00000001357 14656644531 0024471 0 ustar 00root root 0000000 0000000 package quic
import (
"net"
"github.com/quic-go/quic-go/internal/utils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Closed local connection", func() {
It("repeats the packet containing the CONNECTION_CLOSE frame", func() {
written := make(chan net.Addr, 1)
conn := newClosedLocalConn(func(addr net.Addr, _ packetInfo) { written <- addr }, utils.DefaultLogger)
addr := &net.UDPAddr{IP: net.IPv4(127, 1, 2, 3), Port: 1337}
for i := 1; i <= 20; i++ {
conn.handlePacket(receivedPacket{remoteAddr: addr})
if i == 1 || i == 2 || i == 4 || i == 8 || i == 16 {
Expect(written).To(Receive(Equal(addr))) // receive the CONNECTION_CLOSE
} else {
Expect(written).ToNot(Receive())
}
}
})
})
golang-github-lucas-clemente-quic-go-0.46.0/codecov.yml 0000664 0000000 0000000 00000000473 14656644531 0022760 0 ustar 00root root 0000000 0000000 coverage:
round: nearest
ignore:
- http3/gzip_reader.go
- interop/
- internal/handshake/cipher_suite.go
- internal/utils/linkedlist/linkedlist.go
- internal/testdata
- testutils/
- fuzzing/
- metrics/
status:
project:
default:
threshold: 0.5
patch: false
golang-github-lucas-clemente-quic-go-0.46.0/config.go 0000664 0000000 0000000 00000010310 14656644531 0022376 0 ustar 00root root 0000000 0000000 package quic
import (
"fmt"
"time"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/quicvarint"
)
// Clone clones a Config
func (c *Config) Clone() *Config {
copy := *c
return ©
}
func (c *Config) handshakeTimeout() time.Duration {
return 2 * c.HandshakeIdleTimeout
}
func (c *Config) maxRetryTokenAge() time.Duration {
return c.handshakeTimeout()
}
func validateConfig(config *Config) error {
if config == nil {
return nil
}
const maxStreams = 1 << 60
if config.MaxIncomingStreams > maxStreams {
config.MaxIncomingStreams = maxStreams
}
if config.MaxIncomingUniStreams > maxStreams {
config.MaxIncomingUniStreams = maxStreams
}
if config.MaxStreamReceiveWindow > quicvarint.Max {
config.MaxStreamReceiveWindow = quicvarint.Max
}
if config.MaxConnectionReceiveWindow > quicvarint.Max {
config.MaxConnectionReceiveWindow = quicvarint.Max
}
if config.InitialPacketSize > 0 && config.InitialPacketSize < protocol.MinInitialPacketSize {
config.InitialPacketSize = protocol.MinInitialPacketSize
}
if config.InitialPacketSize > protocol.MaxPacketBufferSize {
config.InitialPacketSize = protocol.MaxPacketBufferSize
}
// check that all QUIC versions are actually supported
for _, v := range config.Versions {
if !protocol.IsValidVersion(v) {
return fmt.Errorf("invalid QUIC version: %s", v)
}
}
return nil
}
// populateConfig populates fields in the quic.Config with their default values, if none are set
// it may be called with nil
func populateConfig(config *Config) *Config {
if config == nil {
config = &Config{}
}
versions := config.Versions
if len(versions) == 0 {
versions = protocol.SupportedVersions
}
handshakeIdleTimeout := protocol.DefaultHandshakeIdleTimeout
if config.HandshakeIdleTimeout != 0 {
handshakeIdleTimeout = config.HandshakeIdleTimeout
}
idleTimeout := protocol.DefaultIdleTimeout
if config.MaxIdleTimeout != 0 {
idleTimeout = config.MaxIdleTimeout
}
initialStreamReceiveWindow := config.InitialStreamReceiveWindow
if initialStreamReceiveWindow == 0 {
initialStreamReceiveWindow = protocol.DefaultInitialMaxStreamData
}
maxStreamReceiveWindow := config.MaxStreamReceiveWindow
if maxStreamReceiveWindow == 0 {
maxStreamReceiveWindow = protocol.DefaultMaxReceiveStreamFlowControlWindow
}
initialConnectionReceiveWindow := config.InitialConnectionReceiveWindow
if initialConnectionReceiveWindow == 0 {
initialConnectionReceiveWindow = protocol.DefaultInitialMaxData
}
maxConnectionReceiveWindow := config.MaxConnectionReceiveWindow
if maxConnectionReceiveWindow == 0 {
maxConnectionReceiveWindow = protocol.DefaultMaxReceiveConnectionFlowControlWindow
}
maxIncomingStreams := config.MaxIncomingStreams
if maxIncomingStreams == 0 {
maxIncomingStreams = protocol.DefaultMaxIncomingStreams
} else if maxIncomingStreams < 0 {
maxIncomingStreams = 0
}
maxIncomingUniStreams := config.MaxIncomingUniStreams
if maxIncomingUniStreams == 0 {
maxIncomingUniStreams = protocol.DefaultMaxIncomingUniStreams
} else if maxIncomingUniStreams < 0 {
maxIncomingUniStreams = 0
}
initialPacketSize := config.InitialPacketSize
if initialPacketSize == 0 {
initialPacketSize = protocol.InitialPacketSize
}
return &Config{
GetConfigForClient: config.GetConfigForClient,
Versions: versions,
HandshakeIdleTimeout: handshakeIdleTimeout,
MaxIdleTimeout: idleTimeout,
KeepAlivePeriod: config.KeepAlivePeriod,
InitialStreamReceiveWindow: initialStreamReceiveWindow,
MaxStreamReceiveWindow: maxStreamReceiveWindow,
InitialConnectionReceiveWindow: initialConnectionReceiveWindow,
MaxConnectionReceiveWindow: maxConnectionReceiveWindow,
AllowConnectionWindowIncrease: config.AllowConnectionWindowIncrease,
MaxIncomingStreams: maxIncomingStreams,
MaxIncomingUniStreams: maxIncomingUniStreams,
TokenStore: config.TokenStore,
EnableDatagrams: config.EnableDatagrams,
InitialPacketSize: initialPacketSize,
DisablePathMTUDiscovery: config.DisablePathMTUDiscovery,
Allow0RTT: config.Allow0RTT,
Tracer: config.Tracer,
}
}
golang-github-lucas-clemente-quic-go-0.46.0/config_test.go 0000664 0000000 0000000 00000015060 14656644531 0023444 0 ustar 00root root 0000000 0000000 package quic
import (
"context"
"errors"
"fmt"
"reflect"
"time"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/logging"
"github.com/quic-go/quic-go/quicvarint"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Config", func() {
Context("validating", func() {
It("validates a nil config", func() {
Expect(validateConfig(nil)).To(Succeed())
})
It("validates a config with normal values", func() {
conf := populateConfig(&Config{
MaxIncomingStreams: 5,
MaxStreamReceiveWindow: 10,
})
Expect(validateConfig(conf)).To(Succeed())
Expect(conf.MaxIncomingStreams).To(BeEquivalentTo(5))
Expect(conf.MaxStreamReceiveWindow).To(BeEquivalentTo(10))
})
It("clips too large values for the stream limits", func() {
conf := &Config{
MaxIncomingStreams: 1<<60 + 1,
MaxIncomingUniStreams: 1<<60 + 2,
}
Expect(validateConfig(conf)).To(Succeed())
Expect(conf.MaxIncomingStreams).To(BeEquivalentTo(int64(1 << 60)))
Expect(conf.MaxIncomingUniStreams).To(BeEquivalentTo(int64(1 << 60)))
})
It("clips too large values for the flow control windows", func() {
conf := &Config{
MaxStreamReceiveWindow: quicvarint.Max + 1,
MaxConnectionReceiveWindow: quicvarint.Max + 2,
}
Expect(validateConfig(conf)).To(Succeed())
Expect(conf.MaxStreamReceiveWindow).To(BeEquivalentTo(uint64(quicvarint.Max)))
Expect(conf.MaxConnectionReceiveWindow).To(BeEquivalentTo(uint64(quicvarint.Max)))
})
It("increases too small packet sizes", func() {
conf := &Config{InitialPacketSize: 10}
Expect(validateConfig(conf)).To(Succeed())
Expect(conf.InitialPacketSize).To(BeEquivalentTo(1200))
})
It("clips too large packet sizes", func() {
conf := &Config{InitialPacketSize: protocol.MaxPacketBufferSize + 1}
Expect(validateConfig(conf)).To(Succeed())
Expect(conf.InitialPacketSize).To(BeEquivalentTo(protocol.MaxPacketBufferSize))
})
It("doesn't modify the InitialPacketSize if it is unset", func() {
conf := &Config{InitialPacketSize: 0}
Expect(validateConfig(conf)).To(Succeed())
Expect(conf.InitialPacketSize).To(BeZero())
})
})
configWithNonZeroNonFunctionFields := func() *Config {
c := &Config{}
v := reflect.ValueOf(c).Elem()
typ := v.Type()
for i := 0; i < typ.NumField(); i++ {
f := v.Field(i)
if !f.CanSet() {
// unexported field; not cloned.
continue
}
switch fn := typ.Field(i).Name; fn {
case "GetConfigForClient", "RequireAddressValidation", "GetLogWriter", "AllowConnectionWindowIncrease", "Tracer":
// Can't compare functions.
case "Versions":
f.Set(reflect.ValueOf([]Version{1, 2, 3}))
case "ConnectionIDLength":
f.Set(reflect.ValueOf(8))
case "ConnectionIDGenerator":
f.Set(reflect.ValueOf(&protocol.DefaultConnectionIDGenerator{ConnLen: protocol.DefaultConnectionIDLength}))
case "HandshakeIdleTimeout":
f.Set(reflect.ValueOf(time.Second))
case "MaxIdleTimeout":
f.Set(reflect.ValueOf(time.Hour))
case "TokenStore":
f.Set(reflect.ValueOf(NewLRUTokenStore(2, 3)))
case "InitialStreamReceiveWindow":
f.Set(reflect.ValueOf(uint64(1234)))
case "MaxStreamReceiveWindow":
f.Set(reflect.ValueOf(uint64(9)))
case "InitialConnectionReceiveWindow":
f.Set(reflect.ValueOf(uint64(4321)))
case "MaxConnectionReceiveWindow":
f.Set(reflect.ValueOf(uint64(10)))
case "MaxIncomingStreams":
f.Set(reflect.ValueOf(int64(11)))
case "MaxIncomingUniStreams":
f.Set(reflect.ValueOf(int64(12)))
case "StatelessResetKey":
f.Set(reflect.ValueOf(&StatelessResetKey{1, 2, 3, 4}))
case "KeepAlivePeriod":
f.Set(reflect.ValueOf(time.Second))
case "EnableDatagrams":
f.Set(reflect.ValueOf(true))
case "DisableVersionNegotiationPackets":
f.Set(reflect.ValueOf(true))
case "InitialPacketSize":
f.Set(reflect.ValueOf(uint16(1350)))
case "DisablePathMTUDiscovery":
f.Set(reflect.ValueOf(true))
case "Allow0RTT":
f.Set(reflect.ValueOf(true))
default:
Fail(fmt.Sprintf("all fields must be accounted for, but saw unknown field %q", fn))
}
}
return c
}
It("uses twice the handshake idle timeouts for the handshake timeout", func() {
c := &Config{HandshakeIdleTimeout: time.Second * 11 / 2}
Expect(c.handshakeTimeout()).To(Equal(11 * time.Second))
})
Context("cloning", func() {
It("clones function fields", func() {
var calledAllowConnectionWindowIncrease, calledTracer bool
c1 := &Config{
GetConfigForClient: func(info *ClientHelloInfo) (*Config, error) { return nil, errors.New("nope") },
AllowConnectionWindowIncrease: func(Connection, uint64) bool { calledAllowConnectionWindowIncrease = true; return true },
Tracer: func(context.Context, logging.Perspective, ConnectionID) *logging.ConnectionTracer {
calledTracer = true
return nil
},
}
c2 := c1.Clone()
c2.AllowConnectionWindowIncrease(nil, 1234)
Expect(calledAllowConnectionWindowIncrease).To(BeTrue())
_, err := c2.GetConfigForClient(&ClientHelloInfo{})
Expect(err).To(MatchError("nope"))
c2.Tracer(context.Background(), logging.PerspectiveClient, protocol.ConnectionID{})
Expect(calledTracer).To(BeTrue())
})
It("clones non-function fields", func() {
c := configWithNonZeroNonFunctionFields()
Expect(c.Clone()).To(Equal(c))
})
It("returns a copy", func() {
c1 := &Config{MaxIncomingStreams: 100}
c2 := c1.Clone()
c2.MaxIncomingStreams = 200
Expect(c1.MaxIncomingStreams).To(BeEquivalentTo(100))
})
})
Context("populating", func() {
It("copies non-function fields", func() {
c := configWithNonZeroNonFunctionFields()
Expect(populateConfig(c)).To(Equal(c))
})
It("populates empty fields with default values", func() {
c := populateConfig(&Config{})
Expect(c.Versions).To(Equal(protocol.SupportedVersions))
Expect(c.HandshakeIdleTimeout).To(Equal(protocol.DefaultHandshakeIdleTimeout))
Expect(c.InitialStreamReceiveWindow).To(BeEquivalentTo(protocol.DefaultInitialMaxStreamData))
Expect(c.MaxStreamReceiveWindow).To(BeEquivalentTo(protocol.DefaultMaxReceiveStreamFlowControlWindow))
Expect(c.InitialConnectionReceiveWindow).To(BeEquivalentTo(protocol.DefaultInitialMaxData))
Expect(c.MaxConnectionReceiveWindow).To(BeEquivalentTo(protocol.DefaultMaxReceiveConnectionFlowControlWindow))
Expect(c.MaxIncomingStreams).To(BeEquivalentTo(protocol.DefaultMaxIncomingStreams))
Expect(c.MaxIncomingUniStreams).To(BeEquivalentTo(protocol.DefaultMaxIncomingUniStreams))
Expect(c.DisablePathMTUDiscovery).To(BeFalse())
Expect(c.GetConfigForClient).To(BeNil())
})
})
})
golang-github-lucas-clemente-quic-go-0.46.0/conn_id_generator.go 0000664 0000000 0000000 00000010613 14656644531 0024616 0 ustar 00root root 0000000 0000000 package quic
import (
"fmt"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/wire"
)
type connIDGenerator struct {
generator ConnectionIDGenerator
highestSeq uint64
activeSrcConnIDs map[uint64]protocol.ConnectionID
initialClientDestConnID *protocol.ConnectionID // nil for the client
addConnectionID func(protocol.ConnectionID)
getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken
removeConnectionID func(protocol.ConnectionID)
retireConnectionID func(protocol.ConnectionID)
replaceWithClosed func([]protocol.ConnectionID, []byte)
queueControlFrame func(wire.Frame)
}
func newConnIDGenerator(
initialConnectionID protocol.ConnectionID,
initialClientDestConnID *protocol.ConnectionID, // nil for the client
addConnectionID func(protocol.ConnectionID),
getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken,
removeConnectionID func(protocol.ConnectionID),
retireConnectionID func(protocol.ConnectionID),
replaceWithClosed func([]protocol.ConnectionID, []byte),
queueControlFrame func(wire.Frame),
generator ConnectionIDGenerator,
) *connIDGenerator {
m := &connIDGenerator{
generator: generator,
activeSrcConnIDs: make(map[uint64]protocol.ConnectionID),
addConnectionID: addConnectionID,
getStatelessResetToken: getStatelessResetToken,
removeConnectionID: removeConnectionID,
retireConnectionID: retireConnectionID,
replaceWithClosed: replaceWithClosed,
queueControlFrame: queueControlFrame,
}
m.activeSrcConnIDs[0] = initialConnectionID
m.initialClientDestConnID = initialClientDestConnID
return m
}
func (m *connIDGenerator) SetMaxActiveConnIDs(limit uint64) error {
if m.generator.ConnectionIDLen() == 0 {
return nil
}
// The active_connection_id_limit transport parameter is the number of
// connection IDs the peer will store. This limit includes the connection ID
// used during the handshake, and the one sent in the preferred_address
// transport parameter.
// We currently don't send the preferred_address transport parameter,
// so we can issue (limit - 1) connection IDs.
for i := uint64(len(m.activeSrcConnIDs)); i < min(limit, protocol.MaxIssuedConnectionIDs); i++ {
if err := m.issueNewConnID(); err != nil {
return err
}
}
return nil
}
func (m *connIDGenerator) Retire(seq uint64, sentWithDestConnID protocol.ConnectionID) error {
if seq > m.highestSeq {
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: fmt.Sprintf("retired connection ID %d (highest issued: %d)", seq, m.highestSeq),
}
}
connID, ok := m.activeSrcConnIDs[seq]
// We might already have deleted this connection ID, if this is a duplicate frame.
if !ok {
return nil
}
if connID == sentWithDestConnID {
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: fmt.Sprintf("retired connection ID %d (%s), which was used as the Destination Connection ID on this packet", seq, connID),
}
}
m.retireConnectionID(connID)
delete(m.activeSrcConnIDs, seq)
// Don't issue a replacement for the initial connection ID.
if seq == 0 {
return nil
}
return m.issueNewConnID()
}
func (m *connIDGenerator) issueNewConnID() error {
connID, err := m.generator.GenerateConnectionID()
if err != nil {
return err
}
m.activeSrcConnIDs[m.highestSeq+1] = connID
m.addConnectionID(connID)
m.queueControlFrame(&wire.NewConnectionIDFrame{
SequenceNumber: m.highestSeq + 1,
ConnectionID: connID,
StatelessResetToken: m.getStatelessResetToken(connID),
})
m.highestSeq++
return nil
}
func (m *connIDGenerator) SetHandshakeComplete() {
if m.initialClientDestConnID != nil {
m.retireConnectionID(*m.initialClientDestConnID)
m.initialClientDestConnID = nil
}
}
func (m *connIDGenerator) RemoveAll() {
if m.initialClientDestConnID != nil {
m.removeConnectionID(*m.initialClientDestConnID)
}
for _, connID := range m.activeSrcConnIDs {
m.removeConnectionID(connID)
}
}
func (m *connIDGenerator) ReplaceWithClosed(connClose []byte) {
connIDs := make([]protocol.ConnectionID, 0, len(m.activeSrcConnIDs)+1)
if m.initialClientDestConnID != nil {
connIDs = append(connIDs, *m.initialClientDestConnID)
}
for _, connID := range m.activeSrcConnIDs {
connIDs = append(connIDs, connID)
}
m.replaceWithClosed(connIDs, connClose)
}
golang-github-lucas-clemente-quic-go-0.46.0/conn_id_generator_test.go 0000664 0000000 0000000 00000016353 14656644531 0025664 0 ustar 00root root 0000000 0000000 package quic
import (
"fmt"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/wire"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Connection ID Generator", func() {
var (
addedConnIDs []protocol.ConnectionID
retiredConnIDs []protocol.ConnectionID
removedConnIDs []protocol.ConnectionID
replacedWithClosed []protocol.ConnectionID
queuedFrames []wire.Frame
g *connIDGenerator
)
initialConnID := protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5, 6, 7})
initialClientDestConnID := protocol.ParseConnectionID([]byte{0xa, 0xb, 0xc, 0xd, 0xe})
connIDToToken := func(c protocol.ConnectionID) protocol.StatelessResetToken {
b := c.Bytes()[0]
return protocol.StatelessResetToken{b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b}
}
BeforeEach(func() {
addedConnIDs = nil
retiredConnIDs = nil
removedConnIDs = nil
queuedFrames = nil
replacedWithClosed = nil
g = newConnIDGenerator(
initialConnID,
&initialClientDestConnID,
func(c protocol.ConnectionID) { addedConnIDs = append(addedConnIDs, c) },
connIDToToken,
func(c protocol.ConnectionID) { removedConnIDs = append(removedConnIDs, c) },
func(c protocol.ConnectionID) { retiredConnIDs = append(retiredConnIDs, c) },
func(cs []protocol.ConnectionID, _ []byte) { replacedWithClosed = append(replacedWithClosed, cs...) },
func(f wire.Frame) { queuedFrames = append(queuedFrames, f) },
&protocol.DefaultConnectionIDGenerator{ConnLen: initialConnID.Len()},
)
})
It("issues new connection IDs", func() {
Expect(g.SetMaxActiveConnIDs(4)).To(Succeed())
Expect(retiredConnIDs).To(BeEmpty())
Expect(addedConnIDs).To(HaveLen(3))
for i := 0; i < len(addedConnIDs)-1; i++ {
Expect(addedConnIDs[i]).ToNot(Equal(addedConnIDs[i+1]))
}
Expect(queuedFrames).To(HaveLen(3))
for i := 0; i < 3; i++ {
f := queuedFrames[i]
Expect(f).To(BeAssignableToTypeOf(&wire.NewConnectionIDFrame{}))
nf := f.(*wire.NewConnectionIDFrame)
Expect(nf.SequenceNumber).To(BeEquivalentTo(i + 1))
Expect(nf.ConnectionID.Len()).To(Equal(7))
Expect(nf.StatelessResetToken).To(Equal(connIDToToken(nf.ConnectionID)))
}
})
It("limits the number of connection IDs that it issues", func() {
Expect(g.SetMaxActiveConnIDs(9999999)).To(Succeed())
Expect(retiredConnIDs).To(BeEmpty())
Expect(addedConnIDs).To(HaveLen(protocol.MaxIssuedConnectionIDs - 1))
Expect(queuedFrames).To(HaveLen(protocol.MaxIssuedConnectionIDs - 1))
})
// SetMaxActiveConnIDs is called twice when dialing a 0-RTT connection:
// once for the restored from the old connections, once when we receive the transport parameters
Context("dealing with 0-RTT", func() {
It("doesn't issue new connection IDs when SetMaxActiveConnIDs is called with the same value", func() {
Expect(g.SetMaxActiveConnIDs(4)).To(Succeed())
Expect(queuedFrames).To(HaveLen(3))
queuedFrames = nil
Expect(g.SetMaxActiveConnIDs(4)).To(Succeed())
Expect(queuedFrames).To(BeEmpty())
})
It("issues more connection IDs if the server allows a higher limit on the resumed connection", func() {
Expect(g.SetMaxActiveConnIDs(3)).To(Succeed())
Expect(queuedFrames).To(HaveLen(2))
queuedFrames = nil
Expect(g.SetMaxActiveConnIDs(6)).To(Succeed())
Expect(queuedFrames).To(HaveLen(3))
})
It("issues more connection IDs if the server allows a higher limit on the resumed connection, when connection IDs were retired in between", func() {
Expect(g.SetMaxActiveConnIDs(3)).To(Succeed())
Expect(queuedFrames).To(HaveLen(2))
queuedFrames = nil
g.Retire(1, protocol.ConnectionID{})
Expect(queuedFrames).To(HaveLen(1))
queuedFrames = nil
Expect(g.SetMaxActiveConnIDs(6)).To(Succeed())
Expect(queuedFrames).To(HaveLen(3))
})
})
It("errors if the peers tries to retire a connection ID that wasn't yet issued", func() {
Expect(g.Retire(1, protocol.ConnectionID{})).To(MatchError(&qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "retired connection ID 1 (highest issued: 0)",
}))
})
It("errors if the peers tries to retire a connection ID in a packet with that connection ID", func() {
Expect(g.SetMaxActiveConnIDs(4)).To(Succeed())
Expect(queuedFrames).ToNot(BeEmpty())
Expect(queuedFrames[0]).To(BeAssignableToTypeOf(&wire.NewConnectionIDFrame{}))
f := queuedFrames[0].(*wire.NewConnectionIDFrame)
Expect(g.Retire(f.SequenceNumber, f.ConnectionID)).To(MatchError(&qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: fmt.Sprintf("retired connection ID %d (%s), which was used as the Destination Connection ID on this packet", f.SequenceNumber, f.ConnectionID),
}))
})
It("issues new connection IDs, when old ones are retired", func() {
Expect(g.SetMaxActiveConnIDs(5)).To(Succeed())
queuedFrames = nil
Expect(retiredConnIDs).To(BeEmpty())
Expect(g.Retire(3, protocol.ConnectionID{})).To(Succeed())
Expect(queuedFrames).To(HaveLen(1))
Expect(queuedFrames[0]).To(BeAssignableToTypeOf(&wire.NewConnectionIDFrame{}))
nf := queuedFrames[0].(*wire.NewConnectionIDFrame)
Expect(nf.SequenceNumber).To(BeEquivalentTo(5))
Expect(nf.ConnectionID.Len()).To(Equal(7))
})
It("retires the initial connection ID", func() {
Expect(g.Retire(0, protocol.ConnectionID{})).To(Succeed())
Expect(removedConnIDs).To(BeEmpty())
Expect(retiredConnIDs).To(HaveLen(1))
Expect(retiredConnIDs[0]).To(Equal(initialConnID))
Expect(addedConnIDs).To(BeEmpty())
})
It("handles duplicate retirements", func() {
Expect(g.SetMaxActiveConnIDs(11)).To(Succeed())
queuedFrames = nil
Expect(retiredConnIDs).To(BeEmpty())
Expect(g.Retire(5, protocol.ConnectionID{})).To(Succeed())
Expect(retiredConnIDs).To(HaveLen(1))
Expect(queuedFrames).To(HaveLen(1))
Expect(g.Retire(5, protocol.ConnectionID{})).To(Succeed())
Expect(retiredConnIDs).To(HaveLen(1))
Expect(queuedFrames).To(HaveLen(1))
})
It("retires the client's initial destination connection ID when the handshake completes", func() {
g.SetHandshakeComplete()
Expect(retiredConnIDs).To(HaveLen(1))
Expect(retiredConnIDs[0]).To(Equal(initialClientDestConnID))
})
It("removes all connection IDs", func() {
Expect(g.SetMaxActiveConnIDs(5)).To(Succeed())
Expect(queuedFrames).To(HaveLen(4))
g.RemoveAll()
Expect(removedConnIDs).To(HaveLen(6)) // initial conn ID, initial client dest conn id, and newly issued ones
Expect(removedConnIDs).To(ContainElement(initialConnID))
Expect(removedConnIDs).To(ContainElement(initialClientDestConnID))
for _, f := range queuedFrames {
nf := f.(*wire.NewConnectionIDFrame)
Expect(removedConnIDs).To(ContainElement(nf.ConnectionID))
}
})
It("replaces with a closed connection for all connection IDs", func() {
Expect(g.SetMaxActiveConnIDs(5)).To(Succeed())
Expect(queuedFrames).To(HaveLen(4))
g.ReplaceWithClosed([]byte("foobar"))
Expect(replacedWithClosed).To(HaveLen(6)) // initial conn ID, initial client dest conn id, and newly issued ones
Expect(replacedWithClosed).To(ContainElement(initialClientDestConnID))
Expect(replacedWithClosed).To(ContainElement(initialConnID))
for _, f := range queuedFrames {
nf := f.(*wire.NewConnectionIDFrame)
Expect(replacedWithClosed).To(ContainElement(nf.ConnectionID))
}
})
})
golang-github-lucas-clemente-quic-go-0.46.0/conn_id_manager.go 0000664 0000000 0000000 00000015224 14656644531 0024245 0 ustar 00root root 0000000 0000000 package quic
import (
"fmt"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/utils"
list "github.com/quic-go/quic-go/internal/utils/linkedlist"
"github.com/quic-go/quic-go/internal/wire"
)
type newConnID struct {
SequenceNumber uint64
ConnectionID protocol.ConnectionID
StatelessResetToken protocol.StatelessResetToken
}
type connIDManager struct {
queue list.List[newConnID]
handshakeComplete bool
activeSequenceNumber uint64
highestRetired uint64
activeConnectionID protocol.ConnectionID
activeStatelessResetToken *protocol.StatelessResetToken
// We change the connection ID after sending on average
// protocol.PacketsPerConnectionID packets. The actual value is randomized
// hide the packet loss rate from on-path observers.
rand utils.Rand
packetsSinceLastChange uint32
packetsPerConnectionID uint32
addStatelessResetToken func(protocol.StatelessResetToken)
removeStatelessResetToken func(protocol.StatelessResetToken)
queueControlFrame func(wire.Frame)
}
func newConnIDManager(
initialDestConnID protocol.ConnectionID,
addStatelessResetToken func(protocol.StatelessResetToken),
removeStatelessResetToken func(protocol.StatelessResetToken),
queueControlFrame func(wire.Frame),
) *connIDManager {
return &connIDManager{
activeConnectionID: initialDestConnID,
addStatelessResetToken: addStatelessResetToken,
removeStatelessResetToken: removeStatelessResetToken,
queueControlFrame: queueControlFrame,
}
}
func (h *connIDManager) AddFromPreferredAddress(connID protocol.ConnectionID, resetToken protocol.StatelessResetToken) error {
return h.addConnectionID(1, connID, resetToken)
}
func (h *connIDManager) Add(f *wire.NewConnectionIDFrame) error {
if err := h.add(f); err != nil {
return err
}
if h.queue.Len() >= protocol.MaxActiveConnectionIDs {
return &qerr.TransportError{ErrorCode: qerr.ConnectionIDLimitError}
}
return nil
}
func (h *connIDManager) add(f *wire.NewConnectionIDFrame) error {
// If the NEW_CONNECTION_ID frame is reordered, such that its sequence number is smaller than the currently active
// connection ID or if it was already retired, send the RETIRE_CONNECTION_ID frame immediately.
if f.SequenceNumber < h.activeSequenceNumber || f.SequenceNumber < h.highestRetired {
h.queueControlFrame(&wire.RetireConnectionIDFrame{
SequenceNumber: f.SequenceNumber,
})
return nil
}
// Retire elements in the queue.
// Doesn't retire the active connection ID.
if f.RetirePriorTo > h.highestRetired {
var next *list.Element[newConnID]
for el := h.queue.Front(); el != nil; el = next {
if el.Value.SequenceNumber >= f.RetirePriorTo {
break
}
next = el.Next()
h.queueControlFrame(&wire.RetireConnectionIDFrame{
SequenceNumber: el.Value.SequenceNumber,
})
h.queue.Remove(el)
}
h.highestRetired = f.RetirePriorTo
}
if f.SequenceNumber == h.activeSequenceNumber {
return nil
}
if err := h.addConnectionID(f.SequenceNumber, f.ConnectionID, f.StatelessResetToken); err != nil {
return err
}
// Retire the active connection ID, if necessary.
if h.activeSequenceNumber < f.RetirePriorTo {
// The queue is guaranteed to have at least one element at this point.
h.updateConnectionID()
}
return nil
}
func (h *connIDManager) addConnectionID(seq uint64, connID protocol.ConnectionID, resetToken protocol.StatelessResetToken) error {
// insert a new element at the end
if h.queue.Len() == 0 || h.queue.Back().Value.SequenceNumber < seq {
h.queue.PushBack(newConnID{
SequenceNumber: seq,
ConnectionID: connID,
StatelessResetToken: resetToken,
})
return nil
}
// insert a new element somewhere in the middle
for el := h.queue.Front(); el != nil; el = el.Next() {
if el.Value.SequenceNumber == seq {
if el.Value.ConnectionID != connID {
return fmt.Errorf("received conflicting connection IDs for sequence number %d", seq)
}
if el.Value.StatelessResetToken != resetToken {
return fmt.Errorf("received conflicting stateless reset tokens for sequence number %d", seq)
}
break
}
if el.Value.SequenceNumber > seq {
h.queue.InsertBefore(newConnID{
SequenceNumber: seq,
ConnectionID: connID,
StatelessResetToken: resetToken,
}, el)
break
}
}
return nil
}
func (h *connIDManager) updateConnectionID() {
h.queueControlFrame(&wire.RetireConnectionIDFrame{
SequenceNumber: h.activeSequenceNumber,
})
h.highestRetired = max(h.highestRetired, h.activeSequenceNumber)
if h.activeStatelessResetToken != nil {
h.removeStatelessResetToken(*h.activeStatelessResetToken)
}
front := h.queue.Remove(h.queue.Front())
h.activeSequenceNumber = front.SequenceNumber
h.activeConnectionID = front.ConnectionID
h.activeStatelessResetToken = &front.StatelessResetToken
h.packetsSinceLastChange = 0
h.packetsPerConnectionID = protocol.PacketsPerConnectionID/2 + uint32(h.rand.Int31n(protocol.PacketsPerConnectionID))
h.addStatelessResetToken(*h.activeStatelessResetToken)
}
func (h *connIDManager) Close() {
if h.activeStatelessResetToken != nil {
h.removeStatelessResetToken(*h.activeStatelessResetToken)
}
}
// is called when the server performs a Retry
// and when the server changes the connection ID in the first Initial sent
func (h *connIDManager) ChangeInitialConnID(newConnID protocol.ConnectionID) {
if h.activeSequenceNumber != 0 {
panic("expected first connection ID to have sequence number 0")
}
h.activeConnectionID = newConnID
}
// is called when the server provides a stateless reset token in the transport parameters
func (h *connIDManager) SetStatelessResetToken(token protocol.StatelessResetToken) {
if h.activeSequenceNumber != 0 {
panic("expected first connection ID to have sequence number 0")
}
h.activeStatelessResetToken = &token
h.addStatelessResetToken(token)
}
func (h *connIDManager) SentPacket() {
h.packetsSinceLastChange++
}
func (h *connIDManager) shouldUpdateConnID() bool {
if !h.handshakeComplete {
return false
}
// initiate the first change as early as possible (after handshake completion)
if h.queue.Len() > 0 && h.activeSequenceNumber == 0 {
return true
}
// For later changes, only change if
// 1. The queue of connection IDs is filled more than 50%.
// 2. We sent at least PacketsPerConnectionID packets
return 2*h.queue.Len() >= protocol.MaxActiveConnectionIDs &&
h.packetsSinceLastChange >= h.packetsPerConnectionID
}
func (h *connIDManager) Get() protocol.ConnectionID {
if h.shouldUpdateConnID() {
h.updateConnectionID()
}
return h.activeConnectionID
}
func (h *connIDManager) SetHandshakeComplete() {
h.handshakeComplete = true
}
golang-github-lucas-clemente-quic-go-0.46.0/conn_id_manager_test.go 0000664 0000000 0000000 00000035111 14656644531 0025301 0 ustar 00root root 0000000 0000000 package quic
import (
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/wire"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Connection ID Manager", func() {
var (
m *connIDManager
frameQueue []wire.Frame
tokenAdded *protocol.StatelessResetToken
removedTokens []protocol.StatelessResetToken
)
initialConnID := protocol.ParseConnectionID([]byte{0, 0, 0, 0})
BeforeEach(func() {
frameQueue = nil
tokenAdded = nil
removedTokens = nil
m = newConnIDManager(
initialConnID,
func(token protocol.StatelessResetToken) { tokenAdded = &token },
func(token protocol.StatelessResetToken) { removedTokens = append(removedTokens, token) },
func(f wire.Frame,
) {
frameQueue = append(frameQueue, f)
})
})
get := func() (protocol.ConnectionID, protocol.StatelessResetToken) {
if m.queue.Len() == 0 {
return protocol.ConnectionID{}, protocol.StatelessResetToken{}
}
val := m.queue.Remove(m.queue.Front())
return val.ConnectionID, val.StatelessResetToken
}
It("returns the initial connection ID", func() {
Expect(m.Get()).To(Equal(initialConnID))
})
It("changes the initial connection ID", func() {
m.ChangeInitialConnID(protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5}))
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5})))
})
It("sets the token for the first connection ID", func() {
token := protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
m.SetStatelessResetToken(token)
Expect(*m.activeStatelessResetToken).To(Equal(token))
Expect(*tokenAdded).To(Equal(token))
})
It("adds and gets connection IDs", func() {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 10,
ConnectionID: protocol.ParseConnectionID([]byte{2, 3, 4, 5}),
StatelessResetToken: protocol.StatelessResetToken{0xe, 0xd, 0xc, 0xb, 0xa, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
})).To(Succeed())
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 4,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe},
})).To(Succeed())
c1, rt1 := get()
Expect(c1).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4})))
Expect(rt1).To(Equal(protocol.StatelessResetToken{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe}))
c2, rt2 := get()
Expect(c2).To(Equal(protocol.ParseConnectionID([]byte{2, 3, 4, 5})))
Expect(rt2).To(Equal(protocol.StatelessResetToken{0xe, 0xd, 0xc, 0xb, 0xa, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}))
c3, _ := get()
Expect(c3).To(BeZero())
})
It("accepts duplicates", func() {
f1 := &wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe},
}
f2 := &wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe},
}
Expect(m.Add(f1)).To(Succeed())
Expect(m.Add(f2)).To(Succeed())
c1, rt1 := get()
Expect(c1).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4})))
Expect(rt1).To(Equal(protocol.StatelessResetToken{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe}))
c2, _ := get()
Expect(c2).To(BeZero())
})
It("ignores duplicates for the currently used connection ID", func() {
f := &wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe},
}
m.SetHandshakeComplete()
Expect(m.Add(f)).To(Succeed())
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4})))
c, _ := get()
Expect(c).To(BeZero())
// Now send the same connection ID again. It should not be queued.
Expect(m.Add(f)).To(Succeed())
c, _ = get()
Expect(c).To(BeZero())
})
It("rejects duplicates with different connection IDs", func() {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 42,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
})).To(Succeed())
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 42,
ConnectionID: protocol.ParseConnectionID([]byte{2, 3, 4, 5}),
})).To(MatchError("received conflicting connection IDs for sequence number 42"))
})
It("rejects duplicates with different connection IDs", func() {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 42,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe},
})).To(Succeed())
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 42,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{0xe, 0xd, 0xc, 0xb, 0xa, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
})).To(MatchError("received conflicting stateless reset tokens for sequence number 42"))
})
It("retires connection IDs", func() {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 10,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
})).To(Succeed())
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 13,
ConnectionID: protocol.ParseConnectionID([]byte{2, 3, 4, 5}),
})).To(Succeed())
Expect(frameQueue).To(BeEmpty())
Expect(m.Add(&wire.NewConnectionIDFrame{
RetirePriorTo: 14,
SequenceNumber: 17,
ConnectionID: protocol.ParseConnectionID([]byte{3, 4, 5, 6}),
})).To(Succeed())
Expect(frameQueue).To(HaveLen(3))
Expect(frameQueue[0].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeEquivalentTo(10))
Expect(frameQueue[1].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeEquivalentTo(13))
Expect(frameQueue[2].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeZero())
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{3, 4, 5, 6})))
})
It("ignores reordered connection IDs, if their sequence number was already retired", func() {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 10,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
RetirePriorTo: 5,
})).To(Succeed())
Expect(frameQueue).To(HaveLen(1))
Expect(frameQueue[0].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeZero())
frameQueue = nil
// If this NEW_CONNECTION_ID frame hadn't been reordered, we would have retired it before.
// Make sure it gets retired immediately now.
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 4,
ConnectionID: protocol.ParseConnectionID([]byte{4, 3, 2, 1}),
})).To(Succeed())
Expect(frameQueue).To(HaveLen(1))
Expect(frameQueue[0].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeEquivalentTo(4))
})
It("ignores reordered connection IDs, if their sequence number was already retired or less than active", func() {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 10,
ConnectionID: protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef}),
RetirePriorTo: 5,
})).To(Succeed())
Expect(frameQueue).To(HaveLen(1))
Expect(frameQueue[0].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeZero())
frameQueue = nil
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef})))
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 9,
ConnectionID: protocol.ParseConnectionID([]byte{0xde, 0xca, 0xfb, 0xad}),
RetirePriorTo: 5,
})).To(Succeed())
Expect(frameQueue).To(HaveLen(1))
Expect(frameQueue[0].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeEquivalentTo(9))
})
It("accepts retransmissions for the connection ID that is in use", func() {
connID := protocol.ParseConnectionID([]byte{1, 2, 3, 4})
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: connID,
})).To(Succeed())
m.SetHandshakeComplete()
Expect(frameQueue).To(BeEmpty())
Expect(m.Get()).To(Equal(connID))
Expect(frameQueue).To(HaveLen(1))
Expect(frameQueue[0]).To(BeAssignableToTypeOf(&wire.RetireConnectionIDFrame{}))
Expect(frameQueue[0].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeZero())
frameQueue = nil
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: connID,
})).To(Succeed())
Expect(frameQueue).To(BeEmpty())
})
It("errors when the peer sends too connection IDs", func() {
for i := uint8(1); i < protocol.MaxActiveConnectionIDs; i++ {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: uint64(i),
ConnectionID: protocol.ParseConnectionID([]byte{i, i, i, i}),
StatelessResetToken: protocol.StatelessResetToken{i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i},
})).To(Succeed())
}
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: uint64(9999),
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
})).To(MatchError(&qerr.TransportError{ErrorCode: qerr.ConnectionIDLimitError}))
})
It("initiates the first connection ID update as soon as possible", func() {
Expect(m.Get()).To(Equal(initialConnID))
m.SetHandshakeComplete()
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1},
})).To(Succeed())
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4})))
})
It("waits until handshake completion before initiating a connection ID update", func() {
Expect(m.Get()).To(Equal(initialConnID))
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1},
})).To(Succeed())
Expect(m.Get()).To(Equal(initialConnID))
m.SetHandshakeComplete()
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4})))
})
It("initiates subsequent updates when enough packets are sent", func() {
var s uint8
for s = uint8(1); s < protocol.MaxActiveConnectionIDs; s++ {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: uint64(s),
ConnectionID: protocol.ParseConnectionID([]byte{s, s, s, s}),
StatelessResetToken: protocol.StatelessResetToken{s, s, s, s, s, s, s, s, s, s, s, s, s, s, s, s},
})).To(Succeed())
}
m.SetHandshakeComplete()
lastConnID := m.Get()
Expect(lastConnID).To(Equal(protocol.ParseConnectionID([]byte{1, 1, 1, 1})))
var counter int
for i := 0; i < 50*protocol.PacketsPerConnectionID; i++ {
m.SentPacket()
connID := m.Get()
if connID != lastConnID {
counter++
lastConnID = connID
Expect(removedTokens).To(HaveLen(1))
removedTokens = nil
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: uint64(s),
ConnectionID: protocol.ParseConnectionID([]byte{s, s, s, s}),
StatelessResetToken: protocol.StatelessResetToken{s, s, s, s, s, s, s, s, s, s, s, s, s, s, s, s},
})).To(Succeed())
s++
}
}
Expect(counter).To(BeNumerically("~", 50, 10))
})
It("retires delayed connection IDs that arrive after a higher connection ID was already retired", func() {
for s := uint8(10); s <= 10+protocol.MaxActiveConnectionIDs/2; s++ {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: uint64(s),
ConnectionID: protocol.ParseConnectionID([]byte{s, s, s, s}),
StatelessResetToken: protocol.StatelessResetToken{s, s, s, s, s, s, s, s, s, s, s, s, s, s, s, s},
})).To(Succeed())
}
m.SetHandshakeComplete()
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{10, 10, 10, 10})))
for {
m.SentPacket()
if m.Get() == protocol.ParseConnectionID([]byte{11, 11, 11, 11}) {
break
}
}
// The active conn ID is now {11, 11, 11, 11}
Expect(m.queue.Front().Value.ConnectionID).To(Equal(protocol.ParseConnectionID([]byte{12, 12, 12, 12})))
// Add a delayed connection ID. It should just be ignored now.
frameQueue = nil
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: uint64(5),
ConnectionID: protocol.ParseConnectionID([]byte{5, 5, 5, 5}),
StatelessResetToken: protocol.StatelessResetToken{5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5},
})).To(Succeed())
Expect(m.queue.Front().Value.ConnectionID).To(Equal(protocol.ParseConnectionID([]byte{12, 12, 12, 12})))
Expect(frameQueue).To(HaveLen(1))
Expect(frameQueue[0].(*wire.RetireConnectionIDFrame).SequenceNumber).To(BeEquivalentTo(5))
})
It("only initiates subsequent updates when enough if enough connection IDs are queued", func() {
for i := uint8(1); i <= protocol.MaxActiveConnectionIDs/2; i++ {
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: uint64(i),
ConnectionID: protocol.ParseConnectionID([]byte{i, i, i, i}),
StatelessResetToken: protocol.StatelessResetToken{i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i},
})).To(Succeed())
}
m.SetHandshakeComplete()
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 1, 1, 1})))
for i := 0; i < 2*protocol.PacketsPerConnectionID; i++ {
m.SentPacket()
}
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 1, 1, 1})))
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 1337,
ConnectionID: protocol.ParseConnectionID([]byte{1, 3, 3, 7}),
})).To(Succeed())
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{2, 2, 2, 2})))
Expect(removedTokens).To(HaveLen(1))
Expect(removedTokens[0]).To(Equal(protocol.StatelessResetToken{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}))
})
It("removes the currently active stateless reset token when it is closed", func() {
m.Close()
Expect(removedTokens).To(BeEmpty())
Expect(m.Add(&wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1},
})).To(Succeed())
m.SetHandshakeComplete()
Expect(m.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4})))
m.Close()
Expect(removedTokens).To(HaveLen(1))
Expect(removedTokens[0]).To(Equal(protocol.StatelessResetToken{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}))
})
})
golang-github-lucas-clemente-quic-go-0.46.0/connection.go 0000664 0000000 0000000 00000227453 14656644531 0023312 0 ustar 00root root 0000000 0000000 package quic
import (
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"reflect"
"sync"
"sync/atomic"
"time"
"github.com/quic-go/quic-go/internal/ackhandler"
"github.com/quic-go/quic-go/internal/flowcontrol"
"github.com/quic-go/quic-go/internal/handshake"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/internal/wire"
"github.com/quic-go/quic-go/logging"
)
type unpacker interface {
UnpackLongHeader(hdr *wire.Header, data []byte) (*unpackedPacket, error)
UnpackShortHeader(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error)
}
type streamManager interface {
GetOrOpenSendStream(protocol.StreamID) (sendStreamI, error)
GetOrOpenReceiveStream(protocol.StreamID) (receiveStreamI, error)
OpenStream() (Stream, error)
OpenUniStream() (SendStream, error)
OpenStreamSync(context.Context) (Stream, error)
OpenUniStreamSync(context.Context) (SendStream, error)
AcceptStream(context.Context) (Stream, error)
AcceptUniStream(context.Context) (ReceiveStream, error)
DeleteStream(protocol.StreamID) error
UpdateLimits(*wire.TransportParameters)
HandleMaxStreamsFrame(*wire.MaxStreamsFrame)
CloseWithError(error)
ResetFor0RTT()
UseResetMaps()
}
type cryptoStreamHandler interface {
StartHandshake(context.Context) error
ChangeConnectionID(protocol.ConnectionID)
SetLargest1RTTAcked(protocol.PacketNumber) error
SetHandshakeConfirmed()
GetSessionTicket() ([]byte, error)
NextEvent() handshake.Event
DiscardInitialKeys()
HandleMessage([]byte, protocol.EncryptionLevel) error
io.Closer
ConnectionState() handshake.ConnectionState
}
type receivedPacket struct {
buffer *packetBuffer
remoteAddr net.Addr
rcvTime time.Time
data []byte
ecn protocol.ECN
info packetInfo // only valid if the contained IP address is valid
}
func (p *receivedPacket) Size() protocol.ByteCount { return protocol.ByteCount(len(p.data)) }
func (p *receivedPacket) Clone() *receivedPacket {
return &receivedPacket{
remoteAddr: p.remoteAddr,
rcvTime: p.rcvTime,
data: p.data,
buffer: p.buffer,
ecn: p.ecn,
info: p.info,
}
}
type connRunner interface {
Add(protocol.ConnectionID, packetHandler) bool
GetStatelessResetToken(protocol.ConnectionID) protocol.StatelessResetToken
Retire(protocol.ConnectionID)
Remove(protocol.ConnectionID)
ReplaceWithClosed([]protocol.ConnectionID, []byte)
AddResetToken(protocol.StatelessResetToken, packetHandler)
RemoveResetToken(protocol.StatelessResetToken)
}
type closeError struct {
err error
remote bool
immediate bool
}
type errCloseForRecreating struct {
nextPacketNumber protocol.PacketNumber
nextVersion protocol.Version
}
func (e *errCloseForRecreating) Error() string {
return "closing connection in order to recreate it"
}
var connTracingID atomic.Uint64 // to be accessed atomically
func nextConnTracingID() ConnectionTracingID { return ConnectionTracingID(connTracingID.Add(1)) }
// A Connection is a QUIC connection
type connection struct {
// Destination connection ID used during the handshake.
// Used to check source connection ID on incoming packets.
handshakeDestConnID protocol.ConnectionID
// Set for the client. Destination connection ID used on the first Initial sent.
origDestConnID protocol.ConnectionID
retrySrcConnID *protocol.ConnectionID // only set for the client (and if a Retry was performed)
srcConnIDLen int
perspective protocol.Perspective
version protocol.Version
config *Config
conn sendConn
sendQueue sender
streamsMap streamManager
connIDManager *connIDManager
connIDGenerator *connIDGenerator
rttStats *utils.RTTStats
cryptoStreamManager *cryptoStreamManager
sentPacketHandler ackhandler.SentPacketHandler
receivedPacketHandler ackhandler.ReceivedPacketHandler
retransmissionQueue *retransmissionQueue
framer *framer
connFlowController flowcontrol.ConnectionFlowController
tokenStoreKey string // only set for the client
tokenGenerator *handshake.TokenGenerator // only set for the server
unpacker unpacker
frameParser wire.FrameParser
packer packer
mtuDiscoverer mtuDiscoverer // initialized when the transport parameters are received
maxPayloadSizeEstimate atomic.Uint32
initialStream *cryptoStream
handshakeStream *cryptoStream
oneRTTStream *cryptoStream // only set for the server
cryptoStreamHandler cryptoStreamHandler
receivedPackets chan receivedPacket
sendingScheduled chan struct{}
closeOnce sync.Once
// closeChan is used to notify the run loop that it should terminate
closeChan chan closeError
ctx context.Context
ctxCancel context.CancelCauseFunc
handshakeCompleteChan chan struct{}
undecryptablePackets []receivedPacket // undecryptable packets, waiting for a change in encryption level
undecryptablePacketsToProcess []receivedPacket
earlyConnReadyChan chan struct{}
sentFirstPacket bool
droppedInitialKeys bool
handshakeComplete bool
handshakeConfirmed bool
receivedRetry bool
versionNegotiated bool
receivedFirstPacket bool
// the minimum of the max_idle_timeout values advertised by both endpoints
idleTimeout time.Duration
creationTime time.Time
// The idle timeout is set based on the max of the time we received the last packet...
lastPacketReceivedTime time.Time
// ... and the time we sent a new ack-eliciting packet after receiving a packet.
firstAckElicitingPacketAfterIdleSentTime time.Time
// pacingDeadline is the time when the next packet should be sent
pacingDeadline time.Time
peerParams *wire.TransportParameters
timer connectionTimer
// keepAlivePingSent stores whether a keep alive PING is in flight.
// It is reset as soon as we receive a packet from the peer.
keepAlivePingSent bool
keepAliveInterval time.Duration
datagramQueue *datagramQueue
connStateMutex sync.Mutex
connState ConnectionState
logID string
tracer *logging.ConnectionTracer
logger utils.Logger
}
var (
_ Connection = &connection{}
_ EarlyConnection = &connection{}
_ streamSender = &connection{}
)
var newConnection = func(
ctx context.Context,
ctxCancel context.CancelCauseFunc,
conn sendConn,
runner connRunner,
origDestConnID protocol.ConnectionID,
retrySrcConnID *protocol.ConnectionID,
clientDestConnID protocol.ConnectionID,
destConnID protocol.ConnectionID,
srcConnID protocol.ConnectionID,
connIDGenerator ConnectionIDGenerator,
statelessResetToken protocol.StatelessResetToken,
conf *Config,
tlsConf *tls.Config,
tokenGenerator *handshake.TokenGenerator,
clientAddressValidated bool,
tracer *logging.ConnectionTracer,
logger utils.Logger,
v protocol.Version,
) quicConn {
s := &connection{
ctx: ctx,
ctxCancel: ctxCancel,
conn: conn,
config: conf,
handshakeDestConnID: destConnID,
srcConnIDLen: srcConnID.Len(),
tokenGenerator: tokenGenerator,
oneRTTStream: newCryptoStream(),
perspective: protocol.PerspectiveServer,
tracer: tracer,
logger: logger,
version: v,
}
if origDestConnID.Len() > 0 {
s.logID = origDestConnID.String()
} else {
s.logID = destConnID.String()
}
s.connIDManager = newConnIDManager(
destConnID,
func(token protocol.StatelessResetToken) { runner.AddResetToken(token, s) },
runner.RemoveResetToken,
s.queueControlFrame,
)
s.connIDGenerator = newConnIDGenerator(
srcConnID,
&clientDestConnID,
func(connID protocol.ConnectionID) { runner.Add(connID, s) },
runner.GetStatelessResetToken,
runner.Remove,
runner.Retire,
runner.ReplaceWithClosed,
s.queueControlFrame,
connIDGenerator,
)
s.preSetup()
s.sentPacketHandler, s.receivedPacketHandler = ackhandler.NewAckHandler(
0,
protocol.ByteCount(s.config.InitialPacketSize),
s.rttStats,
clientAddressValidated,
s.conn.capabilities().ECN,
s.perspective,
s.tracer,
s.logger,
)
s.maxPayloadSizeEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(s.config.InitialPacketSize))))
params := &wire.TransportParameters{
InitialMaxStreamDataBidiLocal: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
InitialMaxStreamDataBidiRemote: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
InitialMaxStreamDataUni: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
InitialMaxData: protocol.ByteCount(s.config.InitialConnectionReceiveWindow),
MaxIdleTimeout: s.config.MaxIdleTimeout,
MaxBidiStreamNum: protocol.StreamNum(s.config.MaxIncomingStreams),
MaxUniStreamNum: protocol.StreamNum(s.config.MaxIncomingUniStreams),
MaxAckDelay: protocol.MaxAckDelayInclGranularity,
AckDelayExponent: protocol.AckDelayExponent,
MaxUDPPayloadSize: protocol.MaxPacketBufferSize,
DisableActiveMigration: true,
StatelessResetToken: &statelessResetToken,
OriginalDestinationConnectionID: origDestConnID,
// For interoperability with quic-go versions before May 2023, this value must be set to a value
// different from protocol.DefaultActiveConnectionIDLimit.
// If set to the default value, it will be omitted from the transport parameters, which will make
// old quic-go versions interpret it as 0, instead of the default value of 2.
// See https://github.com/quic-go/quic-go/pull/3806.
ActiveConnectionIDLimit: protocol.MaxActiveConnectionIDs,
InitialSourceConnectionID: srcConnID,
RetrySourceConnectionID: retrySrcConnID,
}
if s.config.EnableDatagrams {
params.MaxDatagramFrameSize = wire.MaxDatagramSize
} else {
params.MaxDatagramFrameSize = protocol.InvalidByteCount
}
if s.tracer != nil && s.tracer.SentTransportParameters != nil {
s.tracer.SentTransportParameters(params)
}
cs := handshake.NewCryptoSetupServer(
clientDestConnID,
conn.LocalAddr(),
conn.RemoteAddr(),
params,
tlsConf,
conf.Allow0RTT,
s.rttStats,
tracer,
logger,
s.version,
)
s.cryptoStreamHandler = cs
s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, s.initialStream, s.handshakeStream, s.sentPacketHandler, s.retransmissionQueue, cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective)
s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen)
s.cryptoStreamManager = newCryptoStreamManager(s.initialStream, s.handshakeStream, s.oneRTTStream)
return s
}
// declare this as a variable, such that we can it mock it in the tests
var newClientConnection = func(
ctx context.Context,
conn sendConn,
runner connRunner,
destConnID protocol.ConnectionID,
srcConnID protocol.ConnectionID,
connIDGenerator ConnectionIDGenerator,
conf *Config,
tlsConf *tls.Config,
initialPacketNumber protocol.PacketNumber,
enable0RTT bool,
hasNegotiatedVersion bool,
tracer *logging.ConnectionTracer,
logger utils.Logger,
v protocol.Version,
) quicConn {
s := &connection{
conn: conn,
config: conf,
origDestConnID: destConnID,
handshakeDestConnID: destConnID,
srcConnIDLen: srcConnID.Len(),
perspective: protocol.PerspectiveClient,
logID: destConnID.String(),
logger: logger,
tracer: tracer,
versionNegotiated: hasNegotiatedVersion,
version: v,
}
s.connIDManager = newConnIDManager(
destConnID,
func(token protocol.StatelessResetToken) { runner.AddResetToken(token, s) },
runner.RemoveResetToken,
s.queueControlFrame,
)
s.connIDGenerator = newConnIDGenerator(
srcConnID,
nil,
func(connID protocol.ConnectionID) { runner.Add(connID, s) },
runner.GetStatelessResetToken,
runner.Remove,
runner.Retire,
runner.ReplaceWithClosed,
s.queueControlFrame,
connIDGenerator,
)
s.ctx, s.ctxCancel = context.WithCancelCause(ctx)
s.preSetup()
s.sentPacketHandler, s.receivedPacketHandler = ackhandler.NewAckHandler(
initialPacketNumber,
protocol.ByteCount(s.config.InitialPacketSize),
s.rttStats,
false, // has no effect
s.conn.capabilities().ECN,
s.perspective,
s.tracer,
s.logger,
)
s.maxPayloadSizeEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(s.config.InitialPacketSize))))
oneRTTStream := newCryptoStream()
params := &wire.TransportParameters{
InitialMaxStreamDataBidiRemote: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
InitialMaxStreamDataBidiLocal: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
InitialMaxStreamDataUni: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
InitialMaxData: protocol.ByteCount(s.config.InitialConnectionReceiveWindow),
MaxIdleTimeout: s.config.MaxIdleTimeout,
MaxBidiStreamNum: protocol.StreamNum(s.config.MaxIncomingStreams),
MaxUniStreamNum: protocol.StreamNum(s.config.MaxIncomingUniStreams),
MaxAckDelay: protocol.MaxAckDelayInclGranularity,
MaxUDPPayloadSize: protocol.MaxPacketBufferSize,
AckDelayExponent: protocol.AckDelayExponent,
DisableActiveMigration: true,
// For interoperability with quic-go versions before May 2023, this value must be set to a value
// different from protocol.DefaultActiveConnectionIDLimit.
// If set to the default value, it will be omitted from the transport parameters, which will make
// old quic-go versions interpret it as 0, instead of the default value of 2.
// See https://github.com/quic-go/quic-go/pull/3806.
ActiveConnectionIDLimit: protocol.MaxActiveConnectionIDs,
InitialSourceConnectionID: srcConnID,
}
if s.config.EnableDatagrams {
params.MaxDatagramFrameSize = wire.MaxDatagramSize
} else {
params.MaxDatagramFrameSize = protocol.InvalidByteCount
}
if s.tracer != nil && s.tracer.SentTransportParameters != nil {
s.tracer.SentTransportParameters(params)
}
cs := handshake.NewCryptoSetupClient(
destConnID,
params,
tlsConf,
enable0RTT,
s.rttStats,
tracer,
logger,
s.version,
)
s.cryptoStreamHandler = cs
s.cryptoStreamManager = newCryptoStreamManager(s.initialStream, s.handshakeStream, oneRTTStream)
s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen)
s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, s.initialStream, s.handshakeStream, s.sentPacketHandler, s.retransmissionQueue, cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective)
if len(tlsConf.ServerName) > 0 {
s.tokenStoreKey = tlsConf.ServerName
} else {
s.tokenStoreKey = conn.RemoteAddr().String()
}
if s.config.TokenStore != nil {
if token := s.config.TokenStore.Pop(s.tokenStoreKey); token != nil {
s.packer.SetToken(token.data)
}
}
return s
}
func (s *connection) preSetup() {
s.initialStream = newCryptoStream()
s.handshakeStream = newCryptoStream()
s.sendQueue = newSendQueue(s.conn)
s.retransmissionQueue = newRetransmissionQueue()
s.frameParser = *wire.NewFrameParser(s.config.EnableDatagrams)
s.rttStats = &utils.RTTStats{}
s.connFlowController = flowcontrol.NewConnectionFlowController(
protocol.ByteCount(s.config.InitialConnectionReceiveWindow),
protocol.ByteCount(s.config.MaxConnectionReceiveWindow),
func(size protocol.ByteCount) bool {
if s.config.AllowConnectionWindowIncrease == nil {
return true
}
return s.config.AllowConnectionWindowIncrease(s, uint64(size))
},
s.rttStats,
s.logger,
)
s.earlyConnReadyChan = make(chan struct{})
s.streamsMap = newStreamsMap(
s.ctx,
s,
s.queueControlFrame,
s.newFlowController,
uint64(s.config.MaxIncomingStreams),
uint64(s.config.MaxIncomingUniStreams),
s.perspective,
)
s.framer = newFramer()
s.receivedPackets = make(chan receivedPacket, protocol.MaxConnUnprocessedPackets)
s.closeChan = make(chan closeError, 1)
s.sendingScheduled = make(chan struct{}, 1)
s.handshakeCompleteChan = make(chan struct{})
now := time.Now()
s.lastPacketReceivedTime = now
s.creationTime = now
s.datagramQueue = newDatagramQueue(s.scheduleSending, s.logger)
s.connState.Version = s.version
}
// run the connection main loop
func (s *connection) run() error {
var closeErr closeError
defer func() { s.ctxCancel(closeErr.err) }()
s.timer = *newTimer()
if err := s.cryptoStreamHandler.StartHandshake(s.ctx); err != nil {
return err
}
if err := s.handleHandshakeEvents(); err != nil {
return err
}
go func() {
if err := s.sendQueue.Run(); err != nil {
s.destroyImpl(err)
}
}()
if s.perspective == protocol.PerspectiveClient {
s.scheduleSending() // so the ClientHello actually gets sent
}
var sendQueueAvailable <-chan struct{}
runLoop:
for {
if s.framer.QueuedTooManyControlFrames() {
s.closeLocal(&qerr.TransportError{ErrorCode: InternalError})
}
// Close immediately if requested
select {
case closeErr = <-s.closeChan:
break runLoop
default:
}
s.maybeResetTimer()
var processedUndecryptablePacket bool
if len(s.undecryptablePacketsToProcess) > 0 {
queue := s.undecryptablePacketsToProcess
s.undecryptablePacketsToProcess = nil
for _, p := range queue {
if processed := s.handlePacketImpl(p); processed {
processedUndecryptablePacket = true
}
// Don't set timers and send packets if the packet made us close the connection.
select {
case closeErr = <-s.closeChan:
break runLoop
default:
}
}
}
// If we processed any undecryptable packets, jump to the resetting of the timers directly.
if !processedUndecryptablePacket {
select {
case closeErr = <-s.closeChan:
break runLoop
case <-s.timer.Chan():
s.timer.SetRead()
// We do all the interesting stuff after the switch statement, so
// nothing to see here.
case <-s.sendingScheduled:
// We do all the interesting stuff after the switch statement, so
// nothing to see here.
case <-sendQueueAvailable:
case firstPacket := <-s.receivedPackets:
wasProcessed := s.handlePacketImpl(firstPacket)
// Don't set timers and send packets if the packet made us close the connection.
select {
case closeErr = <-s.closeChan:
break runLoop
default:
}
if s.handshakeComplete {
// Now process all packets in the receivedPackets channel.
// Limit the number of packets to the length of the receivedPackets channel,
// so we eventually get a chance to send out an ACK when receiving a lot of packets.
numPackets := len(s.receivedPackets)
receiveLoop:
for i := 0; i < numPackets; i++ {
select {
case p := <-s.receivedPackets:
if processed := s.handlePacketImpl(p); processed {
wasProcessed = true
}
select {
case closeErr = <-s.closeChan:
break runLoop
default:
}
default:
break receiveLoop
}
}
}
// Only reset the timers if this packet was actually processed.
// This avoids modifying any state when handling undecryptable packets,
// which could be injected by an attacker.
if !wasProcessed {
continue
}
}
}
now := time.Now()
if timeout := s.sentPacketHandler.GetLossDetectionTimeout(); !timeout.IsZero() && timeout.Before(now) {
// This could cause packets to be retransmitted.
// Check it before trying to send packets.
if err := s.sentPacketHandler.OnLossDetectionTimeout(); err != nil {
s.closeLocal(err)
}
}
if keepAliveTime := s.nextKeepAliveTime(); !keepAliveTime.IsZero() && !now.Before(keepAliveTime) {
// send a PING frame since there is no activity in the connection
s.logger.Debugf("Sending a keep-alive PING to keep the connection alive.")
s.framer.QueueControlFrame(&wire.PingFrame{})
s.keepAlivePingSent = true
} else if !s.handshakeComplete && now.Sub(s.creationTime) >= s.config.handshakeTimeout() {
s.destroyImpl(qerr.ErrHandshakeTimeout)
continue
} else {
idleTimeoutStartTime := s.idleTimeoutStartTime()
if (!s.handshakeComplete && now.Sub(idleTimeoutStartTime) >= s.config.HandshakeIdleTimeout) ||
(s.handshakeComplete && now.After(s.nextIdleTimeoutTime())) {
s.destroyImpl(qerr.ErrIdleTimeout)
continue
}
}
if s.sendQueue.WouldBlock() {
// The send queue is still busy sending out packets.
// Wait until there's space to enqueue new packets.
sendQueueAvailable = s.sendQueue.Available()
continue
}
if err := s.triggerSending(now); err != nil {
s.closeLocal(err)
}
if s.sendQueue.WouldBlock() {
sendQueueAvailable = s.sendQueue.Available()
} else {
sendQueueAvailable = nil
}
}
s.cryptoStreamHandler.Close()
s.sendQueue.Close() // close the send queue before sending the CONNECTION_CLOSE
s.handleCloseError(&closeErr)
if s.tracer != nil && s.tracer.Close != nil {
if e := (&errCloseForRecreating{}); !errors.As(closeErr.err, &e) {
s.tracer.Close()
}
}
s.logger.Infof("Connection %s closed.", s.logID)
s.timer.Stop()
return closeErr.err
}
// blocks until the early connection can be used
func (s *connection) earlyConnReady() <-chan struct{} {
return s.earlyConnReadyChan
}
func (s *connection) HandshakeComplete() <-chan struct{} {
return s.handshakeCompleteChan
}
func (s *connection) Context() context.Context {
return s.ctx
}
func (s *connection) supportsDatagrams() bool {
return s.peerParams.MaxDatagramFrameSize > 0
}
func (s *connection) ConnectionState() ConnectionState {
s.connStateMutex.Lock()
defer s.connStateMutex.Unlock()
cs := s.cryptoStreamHandler.ConnectionState()
s.connState.TLS = cs.ConnectionState
s.connState.Used0RTT = cs.Used0RTT
s.connState.GSO = s.conn.capabilities().GSO
return s.connState
}
// Time when the connection should time out
func (s *connection) nextIdleTimeoutTime() time.Time {
idleTimeout := max(s.idleTimeout, s.rttStats.PTO(true)*3)
return s.idleTimeoutStartTime().Add(idleTimeout)
}
// Time when the next keep-alive packet should be sent.
// It returns a zero time if no keep-alive should be sent.
func (s *connection) nextKeepAliveTime() time.Time {
if s.config.KeepAlivePeriod == 0 || s.keepAlivePingSent || !s.firstAckElicitingPacketAfterIdleSentTime.IsZero() {
return time.Time{}
}
keepAliveInterval := max(s.keepAliveInterval, s.rttStats.PTO(true)*3/2)
return s.lastPacketReceivedTime.Add(keepAliveInterval)
}
func (s *connection) maybeResetTimer() {
var deadline time.Time
if !s.handshakeComplete {
deadline = s.creationTime.Add(s.config.handshakeTimeout())
if t := s.idleTimeoutStartTime().Add(s.config.HandshakeIdleTimeout); t.Before(deadline) {
deadline = t
}
} else {
if keepAliveTime := s.nextKeepAliveTime(); !keepAliveTime.IsZero() {
deadline = keepAliveTime
} else {
deadline = s.nextIdleTimeoutTime()
}
}
s.timer.SetTimer(
deadline,
s.receivedPacketHandler.GetAlarmTimeout(),
s.sentPacketHandler.GetLossDetectionTimeout(),
s.pacingDeadline,
)
}
func (s *connection) idleTimeoutStartTime() time.Time {
startTime := s.lastPacketReceivedTime
if t := s.firstAckElicitingPacketAfterIdleSentTime; t.After(startTime) {
startTime = t
}
return startTime
}
func (s *connection) handleHandshakeComplete() error {
defer close(s.handshakeCompleteChan)
// Once the handshake completes, we have derived 1-RTT keys.
// There's no point in queueing undecryptable packets for later decryption anymore.
s.undecryptablePackets = nil
s.connIDManager.SetHandshakeComplete()
s.connIDGenerator.SetHandshakeComplete()
if s.tracer != nil && s.tracer.ChoseALPN != nil {
s.tracer.ChoseALPN(s.cryptoStreamHandler.ConnectionState().NegotiatedProtocol)
}
// The server applies transport parameters right away, but the client side has to wait for handshake completion.
// During a 0-RTT connection, the client is only allowed to use the new transport parameters for 1-RTT packets.
if s.perspective == protocol.PerspectiveClient {
s.applyTransportParameters()
return nil
}
// All these only apply to the server side.
if err := s.handleHandshakeConfirmed(); err != nil {
return err
}
ticket, err := s.cryptoStreamHandler.GetSessionTicket()
if err != nil {
return err
}
if ticket != nil { // may be nil if session tickets are disabled via tls.Config.SessionTicketsDisabled
s.oneRTTStream.Write(ticket)
for s.oneRTTStream.HasData() {
s.queueControlFrame(s.oneRTTStream.PopCryptoFrame(protocol.MaxPostHandshakeCryptoFrameSize))
}
}
token, err := s.tokenGenerator.NewToken(s.conn.RemoteAddr())
if err != nil {
return err
}
s.queueControlFrame(&wire.NewTokenFrame{Token: token})
s.queueControlFrame(&wire.HandshakeDoneFrame{})
return nil
}
func (s *connection) handleHandshakeConfirmed() error {
if err := s.dropEncryptionLevel(protocol.EncryptionHandshake); err != nil {
return err
}
s.handshakeConfirmed = true
s.sentPacketHandler.SetHandshakeConfirmed()
s.cryptoStreamHandler.SetHandshakeConfirmed()
if !s.config.DisablePathMTUDiscovery && s.conn.capabilities().DF {
s.mtuDiscoverer.Start()
}
return nil
}
func (s *connection) handlePacketImpl(rp receivedPacket) bool {
s.sentPacketHandler.ReceivedBytes(rp.Size())
if wire.IsVersionNegotiationPacket(rp.data) {
s.handleVersionNegotiationPacket(rp)
return false
}
var counter uint8
var lastConnID protocol.ConnectionID
var processed bool
data := rp.data
p := rp
for len(data) > 0 {
if counter > 0 {
p = *(p.Clone())
p.data = data
destConnID, err := wire.ParseConnectionID(p.data, s.srcConnIDLen)
if err != nil {
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropHeaderParseError)
}
s.logger.Debugf("error parsing packet, couldn't parse connection ID: %s", err)
break
}
if destConnID != lastConnID {
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropUnknownConnectionID)
}
s.logger.Debugf("coalesced packet has different destination connection ID: %s, expected %s", destConnID, lastConnID)
break
}
}
if wire.IsLongHeaderPacket(p.data[0]) {
hdr, packetData, rest, err := wire.ParsePacket(p.data)
if err != nil {
if s.tracer != nil && s.tracer.DroppedPacket != nil {
dropReason := logging.PacketDropHeaderParseError
if err == wire.ErrUnsupportedVersion {
dropReason = logging.PacketDropUnsupportedVersion
}
s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), dropReason)
}
s.logger.Debugf("error parsing packet: %s", err)
break
}
lastConnID = hdr.DestConnectionID
if hdr.Version != s.version {
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropUnexpectedVersion)
}
s.logger.Debugf("Dropping packet with version %x. Expected %x.", hdr.Version, s.version)
break
}
if counter > 0 {
p.buffer.Split()
}
counter++
// only log if this actually a coalesced packet
if s.logger.Debug() && (counter > 1 || len(rest) > 0) {
s.logger.Debugf("Parsed a coalesced packet. Part %d: %d bytes. Remaining: %d bytes.", counter, len(packetData), len(rest))
}
p.data = packetData
if wasProcessed := s.handleLongHeaderPacket(p, hdr); wasProcessed {
processed = true
}
data = rest
} else {
if counter > 0 {
p.buffer.Split()
}
processed = s.handleShortHeaderPacket(p)
break
}
}
p.buffer.MaybeRelease()
return processed
}
func (s *connection) handleShortHeaderPacket(p receivedPacket) bool {
var wasQueued bool
defer func() {
// Put back the packet buffer if the packet wasn't queued for later decryption.
if !wasQueued {
p.buffer.Decrement()
}
}()
destConnID, err := wire.ParseConnectionID(p.data, s.srcConnIDLen)
if err != nil {
s.tracer.DroppedPacket(logging.PacketType1RTT, protocol.InvalidPacketNumber, protocol.ByteCount(len(p.data)), logging.PacketDropHeaderParseError)
return false
}
pn, pnLen, keyPhase, data, err := s.unpacker.UnpackShortHeader(p.rcvTime, p.data)
if err != nil {
wasQueued = s.handleUnpackError(err, p, logging.PacketType1RTT)
return false
}
if s.logger.Debug() {
s.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, 1-RTT", pn, p.Size(), destConnID)
wire.LogShortHeader(s.logger, destConnID, pn, pnLen, keyPhase)
}
if s.receivedPacketHandler.IsPotentiallyDuplicate(pn, protocol.Encryption1RTT) {
s.logger.Debugf("Dropping (potentially) duplicate packet.")
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(logging.PacketType1RTT, pn, p.Size(), logging.PacketDropDuplicate)
}
return false
}
var log func([]logging.Frame)
if s.tracer != nil && s.tracer.ReceivedShortHeaderPacket != nil {
log = func(frames []logging.Frame) {
s.tracer.ReceivedShortHeaderPacket(
&logging.ShortHeader{
DestConnectionID: destConnID,
PacketNumber: pn,
PacketNumberLen: pnLen,
KeyPhase: keyPhase,
},
p.Size(),
p.ecn,
frames,
)
}
}
if err := s.handleUnpackedShortHeaderPacket(destConnID, pn, data, p.ecn, p.rcvTime, log); err != nil {
s.closeLocal(err)
return false
}
return true
}
func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) bool /* was the packet successfully processed */ {
var wasQueued bool
defer func() {
// Put back the packet buffer if the packet wasn't queued for later decryption.
if !wasQueued {
p.buffer.Decrement()
}
}()
if hdr.Type == protocol.PacketTypeRetry {
return s.handleRetryPacket(hdr, p.data, p.rcvTime)
}
// The server can change the source connection ID with the first Handshake packet.
// After this, all packets with a different source connection have to be ignored.
if s.receivedFirstPacket && hdr.Type == protocol.PacketTypeInitial && hdr.SrcConnectionID != s.handshakeDestConnID {
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(logging.PacketTypeInitial, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnknownConnectionID)
}
s.logger.Debugf("Dropping Initial packet (%d bytes) with unexpected source connection ID: %s (expected %s)", p.Size(), hdr.SrcConnectionID, s.handshakeDestConnID)
return false
}
// drop 0-RTT packets, if we are a client
if s.perspective == protocol.PerspectiveClient && hdr.Type == protocol.PacketType0RTT {
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(logging.PacketType0RTT, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropKeyUnavailable)
}
return false
}
packet, err := s.unpacker.UnpackLongHeader(hdr, p.data)
if err != nil {
wasQueued = s.handleUnpackError(err, p, logging.PacketTypeFromHeader(hdr))
return false
}
if s.logger.Debug() {
s.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, %s", packet.hdr.PacketNumber, p.Size(), hdr.DestConnectionID, packet.encryptionLevel)
packet.hdr.Log(s.logger)
}
if pn := packet.hdr.PacketNumber; s.receivedPacketHandler.IsPotentiallyDuplicate(pn, packet.encryptionLevel) {
s.logger.Debugf("Dropping (potentially) duplicate packet.")
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), pn, p.Size(), logging.PacketDropDuplicate)
}
return false
}
if err := s.handleUnpackedLongHeaderPacket(packet, p.ecn, p.rcvTime, p.Size()); err != nil {
s.closeLocal(err)
return false
}
return true
}
func (s *connection) handleUnpackError(err error, p receivedPacket, pt logging.PacketType) (wasQueued bool) {
switch err {
case handshake.ErrKeysDropped:
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(pt, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropKeyUnavailable)
}
s.logger.Debugf("Dropping %s packet (%d bytes) because we already dropped the keys.", pt, p.Size())
case handshake.ErrKeysNotYetAvailable:
// Sealer for this encryption level not yet available.
// Try again later.
s.tryQueueingUndecryptablePacket(p, pt)
return true
case wire.ErrInvalidReservedBits:
s.closeLocal(&qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: err.Error(),
})
case handshake.ErrDecryptionFailed:
// This might be a packet injected by an attacker. Drop it.
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(pt, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropPayloadDecryptError)
}
s.logger.Debugf("Dropping %s packet (%d bytes) that could not be unpacked. Error: %s", pt, p.Size(), err)
default:
var headerErr *headerParseError
if errors.As(err, &headerErr) {
// This might be a packet injected by an attacker. Drop it.
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(pt, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropHeaderParseError)
}
s.logger.Debugf("Dropping %s packet (%d bytes) for which we couldn't unpack the header. Error: %s", pt, p.Size(), err)
} else {
// This is an error returned by the AEAD (other than ErrDecryptionFailed).
// For example, a PROTOCOL_VIOLATION due to key updates.
s.closeLocal(err)
}
}
return false
}
func (s *connection) handleRetryPacket(hdr *wire.Header, data []byte, rcvTime time.Time) bool /* was this a valid Retry */ {
if s.perspective == protocol.PerspectiveServer {
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropUnexpectedPacket)
}
s.logger.Debugf("Ignoring Retry.")
return false
}
if s.receivedFirstPacket {
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropUnexpectedPacket)
}
s.logger.Debugf("Ignoring Retry, since we already received a packet.")
return false
}
destConnID := s.connIDManager.Get()
if hdr.SrcConnectionID == destConnID {
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropUnexpectedPacket)
}
s.logger.Debugf("Ignoring Retry, since the server didn't change the Source Connection ID.")
return false
}
// If a token is already set, this means that we already received a Retry from the server.
// Ignore this Retry packet.
if s.receivedRetry {
s.logger.Debugf("Ignoring Retry, since a Retry was already received.")
return false
}
tag := handshake.GetRetryIntegrityTag(data[:len(data)-16], destConnID, hdr.Version)
if !bytes.Equal(data[len(data)-16:], tag[:]) {
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropPayloadDecryptError)
}
s.logger.Debugf("Ignoring spoofed Retry. Integrity Tag doesn't match.")
return false
}
if s.logger.Debug() {
s.logger.Debugf("<- Received Retry:")
(&wire.ExtendedHeader{Header: *hdr}).Log(s.logger)
s.logger.Debugf("Switching destination connection ID to: %s", hdr.SrcConnectionID)
}
if s.tracer != nil && s.tracer.ReceivedRetry != nil {
s.tracer.ReceivedRetry(hdr)
}
newDestConnID := hdr.SrcConnectionID
s.receivedRetry = true
if err := s.sentPacketHandler.ResetForRetry(rcvTime); err != nil {
s.closeLocal(err)
return false
}
s.handshakeDestConnID = newDestConnID
s.retrySrcConnID = &newDestConnID
s.cryptoStreamHandler.ChangeConnectionID(newDestConnID)
s.packer.SetToken(hdr.Token)
s.connIDManager.ChangeInitialConnID(newDestConnID)
s.scheduleSending()
return true
}
func (s *connection) handleVersionNegotiationPacket(p receivedPacket) {
if s.perspective == protocol.PerspectiveServer || // servers never receive version negotiation packets
s.receivedFirstPacket || s.versionNegotiated { // ignore delayed / duplicated version negotiation packets
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(logging.PacketTypeVersionNegotiation, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnexpectedPacket)
}
return
}
src, dest, supportedVersions, err := wire.ParseVersionNegotiationPacket(p.data)
if err != nil {
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(logging.PacketTypeVersionNegotiation, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropHeaderParseError)
}
s.logger.Debugf("Error parsing Version Negotiation packet: %s", err)
return
}
for _, v := range supportedVersions {
if v == s.version {
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(logging.PacketTypeVersionNegotiation, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnexpectedVersion)
}
// The Version Negotiation packet contains the version that we offered.
// This might be a packet sent by an attacker, or it was corrupted.
return
}
}
s.logger.Infof("Received a Version Negotiation packet. Supported Versions: %s", supportedVersions)
if s.tracer != nil && s.tracer.ReceivedVersionNegotiationPacket != nil {
s.tracer.ReceivedVersionNegotiationPacket(dest, src, supportedVersions)
}
newVersion, ok := protocol.ChooseSupportedVersion(s.config.Versions, supportedVersions)
if !ok {
s.destroyImpl(&VersionNegotiationError{
Ours: s.config.Versions,
Theirs: supportedVersions,
})
s.logger.Infof("No compatible QUIC version found.")
return
}
if s.tracer != nil && s.tracer.NegotiatedVersion != nil {
s.tracer.NegotiatedVersion(newVersion, s.config.Versions, supportedVersions)
}
s.logger.Infof("Switching to QUIC version %s.", newVersion)
nextPN, _ := s.sentPacketHandler.PeekPacketNumber(protocol.EncryptionInitial)
s.destroyImpl(&errCloseForRecreating{
nextPacketNumber: nextPN,
nextVersion: newVersion,
})
}
func (s *connection) handleUnpackedLongHeaderPacket(
packet *unpackedPacket,
ecn protocol.ECN,
rcvTime time.Time,
packetSize protocol.ByteCount, // only for logging
) error {
if !s.receivedFirstPacket {
s.receivedFirstPacket = true
if !s.versionNegotiated && s.tracer != nil && s.tracer.NegotiatedVersion != nil {
var clientVersions, serverVersions []protocol.Version
switch s.perspective {
case protocol.PerspectiveClient:
clientVersions = s.config.Versions
case protocol.PerspectiveServer:
serverVersions = s.config.Versions
}
s.tracer.NegotiatedVersion(s.version, clientVersions, serverVersions)
}
// The server can change the source connection ID with the first Handshake packet.
if s.perspective == protocol.PerspectiveClient && packet.hdr.SrcConnectionID != s.handshakeDestConnID {
cid := packet.hdr.SrcConnectionID
s.logger.Debugf("Received first packet. Switching destination connection ID to: %s", cid)
s.handshakeDestConnID = cid
s.connIDManager.ChangeInitialConnID(cid)
}
// We create the connection as soon as we receive the first packet from the client.
// We do that before authenticating the packet.
// That means that if the source connection ID was corrupted,
// we might have created a connection with an incorrect source connection ID.
// Once we authenticate the first packet, we need to update it.
if s.perspective == protocol.PerspectiveServer {
if packet.hdr.SrcConnectionID != s.handshakeDestConnID {
s.handshakeDestConnID = packet.hdr.SrcConnectionID
s.connIDManager.ChangeInitialConnID(packet.hdr.SrcConnectionID)
}
if s.tracer != nil && s.tracer.StartedConnection != nil {
s.tracer.StartedConnection(
s.conn.LocalAddr(),
s.conn.RemoteAddr(),
packet.hdr.SrcConnectionID,
packet.hdr.DestConnectionID,
)
}
}
}
if s.perspective == protocol.PerspectiveServer && packet.encryptionLevel == protocol.EncryptionHandshake &&
!s.droppedInitialKeys {
// On the server side, Initial keys are dropped as soon as the first Handshake packet is received.
// See Section 4.9.1 of RFC 9001.
if err := s.dropEncryptionLevel(protocol.EncryptionInitial); err != nil {
return err
}
}
s.lastPacketReceivedTime = rcvTime
s.firstAckElicitingPacketAfterIdleSentTime = time.Time{}
s.keepAlivePingSent = false
var log func([]logging.Frame)
if s.tracer != nil && s.tracer.ReceivedLongHeaderPacket != nil {
log = func(frames []logging.Frame) {
s.tracer.ReceivedLongHeaderPacket(packet.hdr, packetSize, ecn, frames)
}
}
isAckEliciting, err := s.handleFrames(packet.data, packet.hdr.DestConnectionID, packet.encryptionLevel, log)
if err != nil {
return err
}
return s.receivedPacketHandler.ReceivedPacket(packet.hdr.PacketNumber, ecn, packet.encryptionLevel, rcvTime, isAckEliciting)
}
func (s *connection) handleUnpackedShortHeaderPacket(
destConnID protocol.ConnectionID,
pn protocol.PacketNumber,
data []byte,
ecn protocol.ECN,
rcvTime time.Time,
log func([]logging.Frame),
) error {
s.lastPacketReceivedTime = rcvTime
s.firstAckElicitingPacketAfterIdleSentTime = time.Time{}
s.keepAlivePingSent = false
isAckEliciting, err := s.handleFrames(data, destConnID, protocol.Encryption1RTT, log)
if err != nil {
return err
}
return s.receivedPacketHandler.ReceivedPacket(pn, ecn, protocol.Encryption1RTT, rcvTime, isAckEliciting)
}
func (s *connection) handleFrames(
data []byte,
destConnID protocol.ConnectionID,
encLevel protocol.EncryptionLevel,
log func([]logging.Frame),
) (isAckEliciting bool, _ error) {
// Only used for tracing.
// If we're not tracing, this slice will always remain empty.
var frames []logging.Frame
if log != nil {
frames = make([]logging.Frame, 0, 4)
}
handshakeWasComplete := s.handshakeComplete
var handleErr error
for len(data) > 0 {
l, frame, err := s.frameParser.ParseNext(data, encLevel, s.version)
if err != nil {
return false, err
}
data = data[l:]
if frame == nil {
break
}
if ackhandler.IsFrameAckEliciting(frame) {
isAckEliciting = true
}
if log != nil {
frames = append(frames, toLoggingFrame(frame))
}
// An error occurred handling a previous frame.
// Don't handle the current frame.
if handleErr != nil {
continue
}
if err := s.handleFrame(frame, encLevel, destConnID); err != nil {
if log == nil {
return false, err
}
// If we're logging, we need to keep parsing (but not handling) all frames.
handleErr = err
}
}
if log != nil {
log(frames)
if handleErr != nil {
return false, handleErr
}
}
// Handle completion of the handshake after processing all the frames.
// This ensures that we correctly handle the following case on the server side:
// We receive a Handshake packet that contains the CRYPTO frame that allows us to complete the handshake,
// and an ACK serialized after that CRYPTO frame. In this case, we still want to process the ACK frame.
if !handshakeWasComplete && s.handshakeComplete {
if err := s.handleHandshakeComplete(); err != nil {
return false, err
}
}
return
}
func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel, destConnID protocol.ConnectionID) error {
var err error
wire.LogFrame(s.logger, f, false)
switch frame := f.(type) {
case *wire.CryptoFrame:
err = s.handleCryptoFrame(frame, encLevel)
case *wire.StreamFrame:
err = s.handleStreamFrame(frame)
case *wire.AckFrame:
err = s.handleAckFrame(frame, encLevel)
case *wire.ConnectionCloseFrame:
s.handleConnectionCloseFrame(frame)
case *wire.ResetStreamFrame:
err = s.handleResetStreamFrame(frame)
case *wire.MaxDataFrame:
s.handleMaxDataFrame(frame)
case *wire.MaxStreamDataFrame:
err = s.handleMaxStreamDataFrame(frame)
case *wire.MaxStreamsFrame:
s.handleMaxStreamsFrame(frame)
case *wire.DataBlockedFrame:
case *wire.StreamDataBlockedFrame:
case *wire.StreamsBlockedFrame:
case *wire.StopSendingFrame:
err = s.handleStopSendingFrame(frame)
case *wire.PingFrame:
case *wire.PathChallengeFrame:
s.handlePathChallengeFrame(frame)
case *wire.PathResponseFrame:
// since we don't send PATH_CHALLENGEs, we don't expect PATH_RESPONSEs
err = errors.New("unexpected PATH_RESPONSE frame")
case *wire.NewTokenFrame:
err = s.handleNewTokenFrame(frame)
case *wire.NewConnectionIDFrame:
err = s.handleNewConnectionIDFrame(frame)
case *wire.RetireConnectionIDFrame:
err = s.handleRetireConnectionIDFrame(frame, destConnID)
case *wire.HandshakeDoneFrame:
err = s.handleHandshakeDoneFrame()
case *wire.DatagramFrame:
err = s.handleDatagramFrame(frame)
default:
err = fmt.Errorf("unexpected frame type: %s", reflect.ValueOf(&frame).Elem().Type().Name())
}
return err
}
// handlePacket is called by the server with a new packet
func (s *connection) handlePacket(p receivedPacket) {
// Discard packets once the amount of queued packets is larger than
// the channel size, protocol.MaxConnUnprocessedPackets
select {
case s.receivedPackets <- p:
default:
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropDOSPrevention)
}
}
}
func (s *connection) handleConnectionCloseFrame(frame *wire.ConnectionCloseFrame) {
if frame.IsApplicationError {
s.closeRemote(&qerr.ApplicationError{
Remote: true,
ErrorCode: qerr.ApplicationErrorCode(frame.ErrorCode),
ErrorMessage: frame.ReasonPhrase,
})
return
}
s.closeRemote(&qerr.TransportError{
Remote: true,
ErrorCode: qerr.TransportErrorCode(frame.ErrorCode),
FrameType: frame.FrameType,
ErrorMessage: frame.ReasonPhrase,
})
}
func (s *connection) handleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel) error {
if err := s.cryptoStreamManager.HandleCryptoFrame(frame, encLevel); err != nil {
return err
}
for {
data := s.cryptoStreamManager.GetCryptoData(encLevel)
if data == nil {
break
}
if err := s.cryptoStreamHandler.HandleMessage(data, encLevel); err != nil {
return err
}
}
return s.handleHandshakeEvents()
}
func (s *connection) handleHandshakeEvents() error {
for {
ev := s.cryptoStreamHandler.NextEvent()
var err error
switch ev.Kind {
case handshake.EventNoEvent:
return nil
case handshake.EventHandshakeComplete:
// Don't call handleHandshakeComplete yet.
// It's advantageous to process ACK frames that might be serialized after the CRYPTO frame first.
s.handshakeComplete = true
case handshake.EventReceivedTransportParameters:
err = s.handleTransportParameters(ev.TransportParameters)
case handshake.EventRestoredTransportParameters:
s.restoreTransportParameters(ev.TransportParameters)
close(s.earlyConnReadyChan)
case handshake.EventReceivedReadKeys:
// Queue all packets for decryption that have been undecryptable so far.
s.undecryptablePacketsToProcess = s.undecryptablePackets
s.undecryptablePackets = nil
case handshake.EventDiscard0RTTKeys:
err = s.dropEncryptionLevel(protocol.Encryption0RTT)
case handshake.EventWriteInitialData:
_, err = s.initialStream.Write(ev.Data)
case handshake.EventWriteHandshakeData:
_, err = s.handshakeStream.Write(ev.Data)
}
if err != nil {
return err
}
}
}
func (s *connection) handleStreamFrame(frame *wire.StreamFrame) error {
str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID)
if err != nil {
return err
}
if str == nil {
// Stream is closed and already garbage collected
// ignore this StreamFrame
return nil
}
return str.handleStreamFrame(frame)
}
func (s *connection) handleMaxDataFrame(frame *wire.MaxDataFrame) {
s.connFlowController.UpdateSendWindow(frame.MaximumData)
}
func (s *connection) handleMaxStreamDataFrame(frame *wire.MaxStreamDataFrame) error {
str, err := s.streamsMap.GetOrOpenSendStream(frame.StreamID)
if err != nil {
return err
}
if str == nil {
// stream is closed and already garbage collected
return nil
}
str.updateSendWindow(frame.MaximumStreamData)
return nil
}
func (s *connection) handleMaxStreamsFrame(frame *wire.MaxStreamsFrame) {
s.streamsMap.HandleMaxStreamsFrame(frame)
}
func (s *connection) handleResetStreamFrame(frame *wire.ResetStreamFrame) error {
str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID)
if err != nil {
return err
}
if str == nil {
// stream is closed and already garbage collected
return nil
}
return str.handleResetStreamFrame(frame)
}
func (s *connection) handleStopSendingFrame(frame *wire.StopSendingFrame) error {
str, err := s.streamsMap.GetOrOpenSendStream(frame.StreamID)
if err != nil {
return err
}
if str == nil {
// stream is closed and already garbage collected
return nil
}
str.handleStopSendingFrame(frame)
return nil
}
func (s *connection) handlePathChallengeFrame(frame *wire.PathChallengeFrame) {
s.queueControlFrame(&wire.PathResponseFrame{Data: frame.Data})
}
func (s *connection) handleNewTokenFrame(frame *wire.NewTokenFrame) error {
if s.perspective == protocol.PerspectiveServer {
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "received NEW_TOKEN frame from the client",
}
}
if s.config.TokenStore != nil {
s.config.TokenStore.Put(s.tokenStoreKey, &ClientToken{data: frame.Token})
}
return nil
}
func (s *connection) handleNewConnectionIDFrame(f *wire.NewConnectionIDFrame) error {
return s.connIDManager.Add(f)
}
func (s *connection) handleRetireConnectionIDFrame(f *wire.RetireConnectionIDFrame, destConnID protocol.ConnectionID) error {
return s.connIDGenerator.Retire(f.SequenceNumber, destConnID)
}
func (s *connection) handleHandshakeDoneFrame() error {
if s.perspective == protocol.PerspectiveServer {
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "received a HANDSHAKE_DONE frame",
}
}
if !s.handshakeConfirmed {
return s.handleHandshakeConfirmed()
}
return nil
}
func (s *connection) handleAckFrame(frame *wire.AckFrame, encLevel protocol.EncryptionLevel) error {
acked1RTTPacket, err := s.sentPacketHandler.ReceivedAck(frame, encLevel, s.lastPacketReceivedTime)
if err != nil {
return err
}
if !acked1RTTPacket {
return nil
}
// On the client side: If the packet acknowledged a 1-RTT packet, this confirms the handshake.
// This is only possible if the ACK was sent in a 1-RTT packet.
// This is an optimization over simply waiting for a HANDSHAKE_DONE frame, see section 4.1.2 of RFC 9001.
if s.perspective == protocol.PerspectiveClient && !s.handshakeConfirmed {
if err := s.handleHandshakeConfirmed(); err != nil {
return err
}
}
return s.cryptoStreamHandler.SetLargest1RTTAcked(frame.LargestAcked())
}
func (s *connection) handleDatagramFrame(f *wire.DatagramFrame) error {
if f.Length(s.version) > wire.MaxDatagramSize {
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "DATAGRAM frame too large",
}
}
s.datagramQueue.HandleDatagramFrame(f)
return nil
}
// closeLocal closes the connection and send a CONNECTION_CLOSE containing the error
func (s *connection) closeLocal(e error) {
s.closeOnce.Do(func() {
if e == nil {
s.logger.Infof("Closing connection.")
} else {
s.logger.Errorf("Closing connection with error: %s", e)
}
s.closeChan <- closeError{err: e, immediate: false, remote: false}
})
}
// destroy closes the connection without sending the error on the wire
func (s *connection) destroy(e error) {
s.destroyImpl(e)
<-s.ctx.Done()
}
func (s *connection) destroyImpl(e error) {
s.closeOnce.Do(func() {
if nerr, ok := e.(net.Error); ok && nerr.Timeout() {
s.logger.Errorf("Destroying connection: %s", e)
} else {
s.logger.Errorf("Destroying connection with error: %s", e)
}
s.closeChan <- closeError{err: e, immediate: true, remote: false}
})
}
func (s *connection) closeRemote(e error) {
s.closeOnce.Do(func() {
s.logger.Errorf("Peer closed connection with error: %s", e)
s.closeChan <- closeError{err: e, immediate: true, remote: true}
})
}
func (s *connection) CloseWithError(code ApplicationErrorCode, desc string) error {
s.closeLocal(&qerr.ApplicationError{
ErrorCode: code,
ErrorMessage: desc,
})
<-s.ctx.Done()
return nil
}
func (s *connection) closeWithTransportError(code TransportErrorCode) {
s.closeLocal(&qerr.TransportError{ErrorCode: code})
<-s.ctx.Done()
}
func (s *connection) handleCloseError(closeErr *closeError) {
e := closeErr.err
if e == nil {
e = &qerr.ApplicationError{}
} else {
defer func() {
closeErr.err = e
}()
}
var (
statelessResetErr *StatelessResetError
versionNegotiationErr *VersionNegotiationError
recreateErr *errCloseForRecreating
applicationErr *ApplicationError
transportErr *TransportError
)
switch {
case errors.Is(e, qerr.ErrIdleTimeout),
errors.Is(e, qerr.ErrHandshakeTimeout),
errors.As(e, &statelessResetErr),
errors.As(e, &versionNegotiationErr),
errors.As(e, &recreateErr),
errors.As(e, &applicationErr),
errors.As(e, &transportErr):
default:
e = &qerr.TransportError{
ErrorCode: qerr.InternalError,
ErrorMessage: e.Error(),
}
}
s.streamsMap.CloseWithError(e)
s.connIDManager.Close()
if s.datagramQueue != nil {
s.datagramQueue.CloseWithError(e)
}
if s.tracer != nil && s.tracer.ClosedConnection != nil && !errors.As(e, &recreateErr) {
s.tracer.ClosedConnection(e)
}
// If this is a remote close we're done here
if closeErr.remote {
s.connIDGenerator.ReplaceWithClosed(nil)
return
}
if closeErr.immediate {
s.connIDGenerator.RemoveAll()
return
}
// Don't send out any CONNECTION_CLOSE if this is an error that occurred
// before we even sent out the first packet.
if s.perspective == protocol.PerspectiveClient && !s.sentFirstPacket {
s.connIDGenerator.RemoveAll()
return
}
connClosePacket, err := s.sendConnectionClose(e)
if err != nil {
s.logger.Debugf("Error sending CONNECTION_CLOSE: %s", err)
}
s.connIDGenerator.ReplaceWithClosed(connClosePacket)
}
func (s *connection) dropEncryptionLevel(encLevel protocol.EncryptionLevel) error {
if s.tracer != nil && s.tracer.DroppedEncryptionLevel != nil {
s.tracer.DroppedEncryptionLevel(encLevel)
}
s.sentPacketHandler.DropPackets(encLevel)
s.receivedPacketHandler.DropPackets(encLevel)
//nolint:exhaustive // only Initial and 0-RTT need special treatment
switch encLevel {
case protocol.EncryptionInitial:
s.droppedInitialKeys = true
s.cryptoStreamHandler.DiscardInitialKeys()
case protocol.Encryption0RTT:
s.streamsMap.ResetFor0RTT()
s.framer.Handle0RTTRejection()
return s.connFlowController.Reset()
}
return s.cryptoStreamManager.Drop(encLevel)
}
// is called for the client, when restoring transport parameters saved for 0-RTT
func (s *connection) restoreTransportParameters(params *wire.TransportParameters) {
if s.logger.Debug() {
s.logger.Debugf("Restoring Transport Parameters: %s", params)
}
s.peerParams = params
s.connIDGenerator.SetMaxActiveConnIDs(params.ActiveConnectionIDLimit)
s.connFlowController.UpdateSendWindow(params.InitialMaxData)
s.streamsMap.UpdateLimits(params)
s.connStateMutex.Lock()
s.connState.SupportsDatagrams = s.supportsDatagrams()
s.connStateMutex.Unlock()
}
func (s *connection) handleTransportParameters(params *wire.TransportParameters) error {
if s.tracer != nil && s.tracer.ReceivedTransportParameters != nil {
s.tracer.ReceivedTransportParameters(params)
}
if err := s.checkTransportParameters(params); err != nil {
return &qerr.TransportError{
ErrorCode: qerr.TransportParameterError,
ErrorMessage: err.Error(),
}
}
if s.perspective == protocol.PerspectiveClient && s.peerParams != nil && s.ConnectionState().Used0RTT && !params.ValidForUpdate(s.peerParams) {
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "server sent reduced limits after accepting 0-RTT data",
}
}
s.peerParams = params
// On the client side we have to wait for handshake completion.
// During a 0-RTT connection, we are only allowed to use the new transport parameters for 1-RTT packets.
if s.perspective == protocol.PerspectiveServer {
s.applyTransportParameters()
// On the server side, the early connection is ready as soon as we processed
// the client's transport parameters.
close(s.earlyConnReadyChan)
}
s.connStateMutex.Lock()
s.connState.SupportsDatagrams = s.supportsDatagrams()
s.connStateMutex.Unlock()
return nil
}
func (s *connection) checkTransportParameters(params *wire.TransportParameters) error {
if s.logger.Debug() {
s.logger.Debugf("Processed Transport Parameters: %s", params)
}
// check the initial_source_connection_id
if params.InitialSourceConnectionID != s.handshakeDestConnID {
return fmt.Errorf("expected initial_source_connection_id to equal %s, is %s", s.handshakeDestConnID, params.InitialSourceConnectionID)
}
if s.perspective == protocol.PerspectiveServer {
return nil
}
// check the original_destination_connection_id
if params.OriginalDestinationConnectionID != s.origDestConnID {
return fmt.Errorf("expected original_destination_connection_id to equal %s, is %s", s.origDestConnID, params.OriginalDestinationConnectionID)
}
if s.retrySrcConnID != nil { // a Retry was performed
if params.RetrySourceConnectionID == nil {
return errors.New("missing retry_source_connection_id")
}
if *params.RetrySourceConnectionID != *s.retrySrcConnID {
return fmt.Errorf("expected retry_source_connection_id to equal %s, is %s", s.retrySrcConnID, *params.RetrySourceConnectionID)
}
} else if params.RetrySourceConnectionID != nil {
return errors.New("received retry_source_connection_id, although no Retry was performed")
}
return nil
}
func (s *connection) applyTransportParameters() {
params := s.peerParams
// Our local idle timeout will always be > 0.
s.idleTimeout = s.config.MaxIdleTimeout
if s.idleTimeout > 0 && params.MaxIdleTimeout < s.idleTimeout {
s.idleTimeout = params.MaxIdleTimeout
}
s.keepAliveInterval = min(s.config.KeepAlivePeriod, min(s.idleTimeout/2, protocol.MaxKeepAliveInterval))
s.streamsMap.UpdateLimits(params)
s.frameParser.SetAckDelayExponent(params.AckDelayExponent)
s.connFlowController.UpdateSendWindow(params.InitialMaxData)
s.rttStats.SetMaxAckDelay(params.MaxAckDelay)
s.connIDGenerator.SetMaxActiveConnIDs(params.ActiveConnectionIDLimit)
if params.StatelessResetToken != nil {
s.connIDManager.SetStatelessResetToken(*params.StatelessResetToken)
}
// We don't support connection migration yet, so we don't have any use for the preferred_address.
if params.PreferredAddress != nil {
// Retire the connection ID.
s.connIDManager.AddFromPreferredAddress(params.PreferredAddress.ConnectionID, params.PreferredAddress.StatelessResetToken)
}
maxPacketSize := protocol.ByteCount(protocol.MaxPacketBufferSize)
if params.MaxUDPPayloadSize > 0 && params.MaxUDPPayloadSize < maxPacketSize {
maxPacketSize = params.MaxUDPPayloadSize
}
s.mtuDiscoverer = newMTUDiscoverer(
s.rttStats,
protocol.ByteCount(s.config.InitialPacketSize),
maxPacketSize,
s.onMTUIncreased,
s.tracer,
)
}
func (s *connection) triggerSending(now time.Time) error {
s.pacingDeadline = time.Time{}
sendMode := s.sentPacketHandler.SendMode(now)
//nolint:exhaustive // No need to handle pacing limited here.
switch sendMode {
case ackhandler.SendAny:
return s.sendPackets(now)
case ackhandler.SendNone:
return nil
case ackhandler.SendPacingLimited:
deadline := s.sentPacketHandler.TimeUntilSend()
if deadline.IsZero() {
deadline = deadlineSendImmediately
}
s.pacingDeadline = deadline
// Allow sending of an ACK if we're pacing limit.
// This makes sure that a peer that is mostly receiving data (and thus has an inaccurate cwnd estimate)
// sends enough ACKs to allow its peer to utilize the bandwidth.
fallthrough
case ackhandler.SendAck:
// We can at most send a single ACK only packet.
// There will only be a new ACK after receiving new packets.
// SendAck is only returned when we're congestion limited, so we don't need to set the pacinggs timer.
return s.maybeSendAckOnlyPacket(now)
case ackhandler.SendPTOInitial:
if err := s.sendProbePacket(protocol.EncryptionInitial, now); err != nil {
return err
}
if s.sendQueue.WouldBlock() {
s.scheduleSending()
return nil
}
return s.triggerSending(now)
case ackhandler.SendPTOHandshake:
if err := s.sendProbePacket(protocol.EncryptionHandshake, now); err != nil {
return err
}
if s.sendQueue.WouldBlock() {
s.scheduleSending()
return nil
}
return s.triggerSending(now)
case ackhandler.SendPTOAppData:
if err := s.sendProbePacket(protocol.Encryption1RTT, now); err != nil {
return err
}
if s.sendQueue.WouldBlock() {
s.scheduleSending()
return nil
}
return s.triggerSending(now)
default:
return fmt.Errorf("BUG: invalid send mode %d", sendMode)
}
}
func (s *connection) sendPackets(now time.Time) error {
// Path MTU Discovery
// Can't use GSO, since we need to send a single packet that's larger than our current maximum size.
// Performance-wise, this doesn't matter, since we only send a very small (<10) number of
// MTU probe packets per connection.
if s.handshakeConfirmed && s.mtuDiscoverer != nil && s.mtuDiscoverer.ShouldSendProbe(now) {
ping, size := s.mtuDiscoverer.GetPing()
p, buf, err := s.packer.PackMTUProbePacket(ping, size, s.version)
if err != nil {
return err
}
ecn := s.sentPacketHandler.ECNMode(true)
s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, buf.Len(), false)
s.registerPackedShortHeaderPacket(p, ecn, now)
s.sendQueue.Send(buf, 0, ecn)
// This is kind of a hack. We need to trigger sending again somehow.
s.pacingDeadline = deadlineSendImmediately
return nil
}
if isBlocked, offset := s.connFlowController.IsNewlyBlocked(); isBlocked {
s.framer.QueueControlFrame(&wire.DataBlockedFrame{MaximumData: offset})
}
if offset := s.connFlowController.GetWindowUpdate(); offset > 0 {
s.framer.QueueControlFrame(&wire.MaxDataFrame{MaximumData: offset})
}
if cf := s.cryptoStreamManager.GetPostHandshakeData(protocol.MaxPostHandshakeCryptoFrameSize); cf != nil {
s.queueControlFrame(cf)
}
if !s.handshakeConfirmed {
packet, err := s.packer.PackCoalescedPacket(false, s.maxPacketSize(), s.version)
if err != nil || packet == nil {
return err
}
s.sentFirstPacket = true
if err := s.sendPackedCoalescedPacket(packet, s.sentPacketHandler.ECNMode(packet.IsOnlyShortHeaderPacket()), now); err != nil {
return err
}
sendMode := s.sentPacketHandler.SendMode(now)
if sendMode == ackhandler.SendPacingLimited {
s.resetPacingDeadline()
} else if sendMode == ackhandler.SendAny {
s.pacingDeadline = deadlineSendImmediately
}
return nil
}
if s.conn.capabilities().GSO {
return s.sendPacketsWithGSO(now)
}
return s.sendPacketsWithoutGSO(now)
}
func (s *connection) sendPacketsWithoutGSO(now time.Time) error {
for {
buf := getPacketBuffer()
ecn := s.sentPacketHandler.ECNMode(true)
if _, err := s.appendOneShortHeaderPacket(buf, s.maxPacketSize(), ecn, now); err != nil {
if err == errNothingToPack {
buf.Release()
return nil
}
return err
}
s.sendQueue.Send(buf, 0, ecn)
if s.sendQueue.WouldBlock() {
return nil
}
sendMode := s.sentPacketHandler.SendMode(now)
if sendMode == ackhandler.SendPacingLimited {
s.resetPacingDeadline()
return nil
}
if sendMode != ackhandler.SendAny {
return nil
}
// Prioritize receiving of packets over sending out more packets.
if len(s.receivedPackets) > 0 {
s.pacingDeadline = deadlineSendImmediately
return nil
}
}
}
func (s *connection) sendPacketsWithGSO(now time.Time) error {
buf := getLargePacketBuffer()
maxSize := s.maxPacketSize()
ecn := s.sentPacketHandler.ECNMode(true)
for {
var dontSendMore bool
size, err := s.appendOneShortHeaderPacket(buf, maxSize, ecn, now)
if err != nil {
if err != errNothingToPack {
return err
}
if buf.Len() == 0 {
buf.Release()
return nil
}
dontSendMore = true
}
if !dontSendMore {
sendMode := s.sentPacketHandler.SendMode(now)
if sendMode == ackhandler.SendPacingLimited {
s.resetPacingDeadline()
}
if sendMode != ackhandler.SendAny {
dontSendMore = true
}
}
// Don't send more packets in this batch if they require a different ECN marking than the previous ones.
nextECN := s.sentPacketHandler.ECNMode(true)
// Append another packet if
// 1. The congestion controller and pacer allow sending more
// 2. The last packet appended was a full-size packet
// 3. The next packet will have the same ECN marking
// 4. We still have enough space for another full-size packet in the buffer
if !dontSendMore && size == maxSize && nextECN == ecn && buf.Len()+maxSize <= buf.Cap() {
continue
}
s.sendQueue.Send(buf, uint16(maxSize), ecn)
if dontSendMore {
return nil
}
if s.sendQueue.WouldBlock() {
return nil
}
// Prioritize receiving of packets over sending out more packets.
if len(s.receivedPackets) > 0 {
s.pacingDeadline = deadlineSendImmediately
return nil
}
buf = getLargePacketBuffer()
}
}
func (s *connection) resetPacingDeadline() {
deadline := s.sentPacketHandler.TimeUntilSend()
if deadline.IsZero() {
deadline = deadlineSendImmediately
}
s.pacingDeadline = deadline
}
func (s *connection) maybeSendAckOnlyPacket(now time.Time) error {
if !s.handshakeConfirmed {
ecn := s.sentPacketHandler.ECNMode(false)
packet, err := s.packer.PackCoalescedPacket(true, s.maxPacketSize(), s.version)
if err != nil {
return err
}
if packet == nil {
return nil
}
return s.sendPackedCoalescedPacket(packet, ecn, now)
}
ecn := s.sentPacketHandler.ECNMode(true)
p, buf, err := s.packer.PackAckOnlyPacket(s.maxPacketSize(), s.version)
if err != nil {
if err == errNothingToPack {
return nil
}
return err
}
s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, buf.Len(), false)
s.registerPackedShortHeaderPacket(p, ecn, now)
s.sendQueue.Send(buf, 0, ecn)
return nil
}
func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel, now time.Time) error {
// Queue probe packets until we actually send out a packet,
// or until there are no more packets to queue.
var packet *coalescedPacket
for {
if wasQueued := s.sentPacketHandler.QueueProbePacket(encLevel); !wasQueued {
break
}
var err error
packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), s.version)
if err != nil {
return err
}
if packet != nil {
break
}
}
if packet == nil {
s.retransmissionQueue.AddPing(encLevel)
var err error
packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), s.version)
if err != nil {
return err
}
}
if packet == nil || (len(packet.longHdrPackets) == 0 && packet.shortHdrPacket == nil) {
return fmt.Errorf("connection BUG: couldn't pack %s probe packet", encLevel)
}
return s.sendPackedCoalescedPacket(packet, s.sentPacketHandler.ECNMode(packet.IsOnlyShortHeaderPacket()), now)
}
// appendOneShortHeaderPacket appends a new packet to the given packetBuffer.
// If there was nothing to pack, the returned size is 0.
func (s *connection) appendOneShortHeaderPacket(buf *packetBuffer, maxSize protocol.ByteCount, ecn protocol.ECN, now time.Time) (protocol.ByteCount, error) {
startLen := buf.Len()
p, err := s.packer.AppendPacket(buf, maxSize, s.version)
if err != nil {
return 0, err
}
size := buf.Len() - startLen
s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, size, false)
s.registerPackedShortHeaderPacket(p, ecn, now)
return size, nil
}
func (s *connection) registerPackedShortHeaderPacket(p shortHeaderPacket, ecn protocol.ECN, now time.Time) {
if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && (len(p.StreamFrames) > 0 || ackhandler.HasAckElicitingFrames(p.Frames)) {
s.firstAckElicitingPacketAfterIdleSentTime = now
}
largestAcked := protocol.InvalidPacketNumber
if p.Ack != nil {
largestAcked = p.Ack.LargestAcked()
}
s.sentPacketHandler.SentPacket(now, p.PacketNumber, largestAcked, p.StreamFrames, p.Frames, protocol.Encryption1RTT, ecn, p.Length, p.IsPathMTUProbePacket)
s.connIDManager.SentPacket()
}
func (s *connection) sendPackedCoalescedPacket(packet *coalescedPacket, ecn protocol.ECN, now time.Time) error {
s.logCoalescedPacket(packet, ecn)
for _, p := range packet.longHdrPackets {
if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() {
s.firstAckElicitingPacketAfterIdleSentTime = now
}
largestAcked := protocol.InvalidPacketNumber
if p.ack != nil {
largestAcked = p.ack.LargestAcked()
}
s.sentPacketHandler.SentPacket(now, p.header.PacketNumber, largestAcked, p.streamFrames, p.frames, p.EncryptionLevel(), ecn, p.length, false)
if s.perspective == protocol.PerspectiveClient && p.EncryptionLevel() == protocol.EncryptionHandshake &&
!s.droppedInitialKeys {
// On the client side, Initial keys are dropped as soon as the first Handshake packet is sent.
// See Section 4.9.1 of RFC 9001.
if err := s.dropEncryptionLevel(protocol.EncryptionInitial); err != nil {
return err
}
}
}
if p := packet.shortHdrPacket; p != nil {
if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() {
s.firstAckElicitingPacketAfterIdleSentTime = now
}
largestAcked := protocol.InvalidPacketNumber
if p.Ack != nil {
largestAcked = p.Ack.LargestAcked()
}
s.sentPacketHandler.SentPacket(now, p.PacketNumber, largestAcked, p.StreamFrames, p.Frames, protocol.Encryption1RTT, ecn, p.Length, p.IsPathMTUProbePacket)
}
s.connIDManager.SentPacket()
s.sendQueue.Send(packet.buffer, 0, ecn)
return nil
}
func (s *connection) sendConnectionClose(e error) ([]byte, error) {
var packet *coalescedPacket
var err error
var transportErr *qerr.TransportError
var applicationErr *qerr.ApplicationError
if errors.As(e, &transportErr) {
packet, err = s.packer.PackConnectionClose(transportErr, s.maxPacketSize(), s.version)
} else if errors.As(e, &applicationErr) {
packet, err = s.packer.PackApplicationClose(applicationErr, s.maxPacketSize(), s.version)
} else {
packet, err = s.packer.PackConnectionClose(&qerr.TransportError{
ErrorCode: qerr.InternalError,
ErrorMessage: fmt.Sprintf("connection BUG: unspecified error type (msg: %s)", e.Error()),
}, s.maxPacketSize(), s.version)
}
if err != nil {
return nil, err
}
ecn := s.sentPacketHandler.ECNMode(packet.IsOnlyShortHeaderPacket())
s.logCoalescedPacket(packet, ecn)
return packet.buffer.Data, s.conn.Write(packet.buffer.Data, 0, ecn)
}
func (s *connection) maxPacketSize() protocol.ByteCount {
if s.mtuDiscoverer == nil {
// Use the configured packet size on the client side.
// If the server sends a max_udp_payload_size that's smaller than this size, we can ignore this:
// Apparently the server still processed the (fully padded) Initial packet anyway.
if s.perspective == protocol.PerspectiveClient {
return protocol.ByteCount(s.config.InitialPacketSize)
}
// On the server side, there's no downside to using 1200 bytes until we received the client's transport
// parameters:
// * If the first packet didn't contain the entire ClientHello, all we can do is ACK that packet. We don't
// need a lot of bytes for that.
// * If it did, we will have processed the transport parameters and initialized the MTU discoverer.
return protocol.MinInitialPacketSize
}
return s.mtuDiscoverer.CurrentSize()
}
// AcceptStream returns the next stream openend by the peer
func (s *connection) AcceptStream(ctx context.Context) (Stream, error) {
return s.streamsMap.AcceptStream(ctx)
}
func (s *connection) AcceptUniStream(ctx context.Context) (ReceiveStream, error) {
return s.streamsMap.AcceptUniStream(ctx)
}
// OpenStream opens a stream
func (s *connection) OpenStream() (Stream, error) {
return s.streamsMap.OpenStream()
}
func (s *connection) OpenStreamSync(ctx context.Context) (Stream, error) {
return s.streamsMap.OpenStreamSync(ctx)
}
func (s *connection) OpenUniStream() (SendStream, error) {
return s.streamsMap.OpenUniStream()
}
func (s *connection) OpenUniStreamSync(ctx context.Context) (SendStream, error) {
return s.streamsMap.OpenUniStreamSync(ctx)
}
func (s *connection) newFlowController(id protocol.StreamID) flowcontrol.StreamFlowController {
initialSendWindow := s.peerParams.InitialMaxStreamDataUni
if id.Type() == protocol.StreamTypeBidi {
if id.InitiatedBy() == s.perspective {
initialSendWindow = s.peerParams.InitialMaxStreamDataBidiRemote
} else {
initialSendWindow = s.peerParams.InitialMaxStreamDataBidiLocal
}
}
return flowcontrol.NewStreamFlowController(
id,
s.connFlowController,
protocol.ByteCount(s.config.InitialStreamReceiveWindow),
protocol.ByteCount(s.config.MaxStreamReceiveWindow),
initialSendWindow,
s.rttStats,
s.logger,
)
}
// scheduleSending signals that we have data for sending
func (s *connection) scheduleSending() {
select {
case s.sendingScheduled <- struct{}{}:
default:
}
}
// tryQueueingUndecryptablePacket queues a packet for which we're missing the decryption keys.
// The logging.PacketType is only used for logging purposes.
func (s *connection) tryQueueingUndecryptablePacket(p receivedPacket, pt logging.PacketType) {
if s.handshakeComplete {
panic("shouldn't queue undecryptable packets after handshake completion")
}
if len(s.undecryptablePackets)+1 > protocol.MaxUndecryptablePackets {
if s.tracer != nil && s.tracer.DroppedPacket != nil {
s.tracer.DroppedPacket(pt, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropDOSPrevention)
}
s.logger.Infof("Dropping undecryptable packet (%d bytes). Undecryptable packet queue full.", p.Size())
return
}
s.logger.Infof("Queueing packet (%d bytes) for later decryption", p.Size())
if s.tracer != nil && s.tracer.BufferedPacket != nil {
s.tracer.BufferedPacket(pt, p.Size())
}
s.undecryptablePackets = append(s.undecryptablePackets, p)
}
func (s *connection) queueControlFrame(f wire.Frame) {
s.framer.QueueControlFrame(f)
s.scheduleSending()
}
func (s *connection) onHasStreamData(id protocol.StreamID, str sendStreamI) {
s.framer.AddActiveStream(id, str)
s.scheduleSending()
}
func (s *connection) onHasStreamControlFrame(id protocol.StreamID, str streamControlFrameGetter) {
s.framer.AddStreamWithControlFrames(id, str)
s.scheduleSending()
}
func (s *connection) onStreamCompleted(id protocol.StreamID) {
if err := s.streamsMap.DeleteStream(id); err != nil {
s.closeLocal(err)
}
s.framer.RemoveActiveStream(id)
}
func (s *connection) onMTUIncreased(mtu protocol.ByteCount) {
s.maxPayloadSizeEstimate.Store(uint32(estimateMaxPayloadSize(mtu)))
s.sentPacketHandler.SetMaxDatagramSize(mtu)
}
func (s *connection) SendDatagram(p []byte) error {
if !s.supportsDatagrams() {
return errors.New("datagram support disabled")
}
f := &wire.DatagramFrame{DataLenPresent: true}
// The payload size estimate is conservative.
// Under many circumstances we could send a few more bytes.
maxDataLen := min(
f.MaxDataLen(s.peerParams.MaxDatagramFrameSize, s.version),
protocol.ByteCount(s.maxPayloadSizeEstimate.Load()),
)
if protocol.ByteCount(len(p)) > maxDataLen {
return &DatagramTooLargeError{MaxDatagramPayloadSize: int64(maxDataLen)}
}
f.Data = make([]byte, len(p))
copy(f.Data, p)
return s.datagramQueue.Add(f)
}
func (s *connection) ReceiveDatagram(ctx context.Context) ([]byte, error) {
if !s.config.EnableDatagrams {
return nil, errors.New("datagram support disabled")
}
return s.datagramQueue.Receive(ctx)
}
func (s *connection) LocalAddr() net.Addr {
return s.conn.LocalAddr()
}
func (s *connection) RemoteAddr() net.Addr {
return s.conn.RemoteAddr()
}
func (s *connection) GetVersion() protocol.Version {
return s.version
}
func (s *connection) NextConnection(ctx context.Context) (Connection, error) {
// The handshake might fail after the server rejected 0-RTT.
// This could happen if the Finished message is malformed or never received.
select {
case <-ctx.Done():
return nil, context.Cause(ctx)
case <-s.Context().Done():
case <-s.HandshakeComplete():
s.streamsMap.UseResetMaps()
}
return s, nil
}
// estimateMaxPayloadSize estimates the maximum payload size for short header packets.
// It is not very sophisticated: it just subtracts the size of header (assuming the maximum
// connection ID length), and the size of the encryption tag.
func estimateMaxPayloadSize(mtu protocol.ByteCount) protocol.ByteCount {
return mtu - 1 /* type byte */ - 20 /* maximum connection ID length */ - 16 /* tag size */
}
golang-github-lucas-clemente-quic-go-0.46.0/connection_logging.go 0000664 0000000 0000000 00000011672 14656644531 0025012 0 ustar 00root root 0000000 0000000 package quic
import (
"slices"
"github.com/quic-go/quic-go/internal/ackhandler"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/wire"
"github.com/quic-go/quic-go/logging"
)
// ConvertFrame converts a wire.Frame into a logging.Frame.
// This makes it possible for external packages to access the frames.
// Furthermore, it removes the data slices from CRYPTO and STREAM frames.
func toLoggingFrame(frame wire.Frame) logging.Frame {
switch f := frame.(type) {
case *wire.AckFrame:
// We use a pool for ACK frames.
// Implementations of the tracer interface may hold on to frames, so we need to make a copy here.
return toLoggingAckFrame(f)
case *wire.CryptoFrame:
return &logging.CryptoFrame{
Offset: f.Offset,
Length: protocol.ByteCount(len(f.Data)),
}
case *wire.StreamFrame:
return &logging.StreamFrame{
StreamID: f.StreamID,
Offset: f.Offset,
Length: f.DataLen(),
Fin: f.Fin,
}
case *wire.DatagramFrame:
return &logging.DatagramFrame{
Length: logging.ByteCount(len(f.Data)),
}
default:
return logging.Frame(frame)
}
}
func toLoggingAckFrame(f *wire.AckFrame) *logging.AckFrame {
ack := &logging.AckFrame{
AckRanges: slices.Clone(f.AckRanges),
DelayTime: f.DelayTime,
ECNCE: f.ECNCE,
ECT0: f.ECT0,
ECT1: f.ECT1,
}
return ack
}
func (s *connection) logLongHeaderPacket(p *longHeaderPacket, ecn protocol.ECN) {
// quic-go logging
if s.logger.Debug() {
p.header.Log(s.logger)
if p.ack != nil {
wire.LogFrame(s.logger, p.ack, true)
}
for _, frame := range p.frames {
wire.LogFrame(s.logger, frame.Frame, true)
}
for _, frame := range p.streamFrames {
wire.LogFrame(s.logger, frame.Frame, true)
}
}
// tracing
if s.tracer != nil && s.tracer.SentLongHeaderPacket != nil {
frames := make([]logging.Frame, 0, len(p.frames))
for _, f := range p.frames {
frames = append(frames, toLoggingFrame(f.Frame))
}
for _, f := range p.streamFrames {
frames = append(frames, toLoggingFrame(f.Frame))
}
var ack *logging.AckFrame
if p.ack != nil {
ack = toLoggingAckFrame(p.ack)
}
s.tracer.SentLongHeaderPacket(p.header, p.length, ecn, ack, frames)
}
}
func (s *connection) logShortHeaderPacket(
destConnID protocol.ConnectionID,
ackFrame *wire.AckFrame,
frames []ackhandler.Frame,
streamFrames []ackhandler.StreamFrame,
pn protocol.PacketNumber,
pnLen protocol.PacketNumberLen,
kp protocol.KeyPhaseBit,
ecn protocol.ECN,
size protocol.ByteCount,
isCoalesced bool,
) {
if s.logger.Debug() && !isCoalesced {
s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, 1-RTT (ECN: %s)", pn, size, s.logID, ecn)
}
// quic-go logging
if s.logger.Debug() {
wire.LogShortHeader(s.logger, destConnID, pn, pnLen, kp)
if ackFrame != nil {
wire.LogFrame(s.logger, ackFrame, true)
}
for _, f := range frames {
wire.LogFrame(s.logger, f.Frame, true)
}
for _, f := range streamFrames {
wire.LogFrame(s.logger, f.Frame, true)
}
}
// tracing
if s.tracer != nil && s.tracer.SentShortHeaderPacket != nil {
fs := make([]logging.Frame, 0, len(frames)+len(streamFrames))
for _, f := range frames {
fs = append(fs, toLoggingFrame(f.Frame))
}
for _, f := range streamFrames {
fs = append(fs, toLoggingFrame(f.Frame))
}
var ack *logging.AckFrame
if ackFrame != nil {
ack = toLoggingAckFrame(ackFrame)
}
s.tracer.SentShortHeaderPacket(
&logging.ShortHeader{
DestConnectionID: destConnID,
PacketNumber: pn,
PacketNumberLen: pnLen,
KeyPhase: kp,
},
size,
ecn,
ack,
fs,
)
}
}
func (s *connection) logCoalescedPacket(packet *coalescedPacket, ecn protocol.ECN) {
if s.logger.Debug() {
// There's a short period between dropping both Initial and Handshake keys and completion of the handshake,
// during which we might call PackCoalescedPacket but just pack a short header packet.
if len(packet.longHdrPackets) == 0 && packet.shortHdrPacket != nil {
s.logShortHeaderPacket(
packet.shortHdrPacket.DestConnID,
packet.shortHdrPacket.Ack,
packet.shortHdrPacket.Frames,
packet.shortHdrPacket.StreamFrames,
packet.shortHdrPacket.PacketNumber,
packet.shortHdrPacket.PacketNumberLen,
packet.shortHdrPacket.KeyPhase,
ecn,
packet.shortHdrPacket.Length,
false,
)
return
}
if len(packet.longHdrPackets) > 1 {
s.logger.Debugf("-> Sending coalesced packet (%d parts, %d bytes) for connection %s", len(packet.longHdrPackets), packet.buffer.Len(), s.logID)
} else {
s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.longHdrPackets[0].header.PacketNumber, packet.buffer.Len(), s.logID, packet.longHdrPackets[0].EncryptionLevel())
}
}
for _, p := range packet.longHdrPackets {
s.logLongHeaderPacket(p, ecn)
}
if p := packet.shortHdrPacket; p != nil {
s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, p.Length, true)
}
}
golang-github-lucas-clemente-quic-go-0.46.0/connection_logging_test.go 0000664 0000000 0000000 00000003103 14656644531 0026037 0 ustar 00root root 0000000 0000000 package quic
import (
"github.com/quic-go/quic-go/internal/wire"
"github.com/quic-go/quic-go/logging"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("CRYPTO frame", func() {
It("converts CRYPTO frames", func() {
f := toLoggingFrame(&wire.CryptoFrame{
Offset: 1234,
Data: []byte("foobar"),
})
Expect(f).To(BeAssignableToTypeOf(&logging.CryptoFrame{}))
cf := f.(*logging.CryptoFrame)
Expect(cf.Offset).To(Equal(logging.ByteCount(1234)))
Expect(cf.Length).To(Equal(logging.ByteCount(6)))
})
It("converts STREAM frames", func() {
f := toLoggingFrame(&wire.StreamFrame{
StreamID: 42,
Offset: 1234,
Data: []byte("foo"),
Fin: true,
})
Expect(f).To(BeAssignableToTypeOf(&logging.StreamFrame{}))
sf := f.(*logging.StreamFrame)
Expect(sf.StreamID).To(Equal(logging.StreamID(42)))
Expect(sf.Offset).To(Equal(logging.ByteCount(1234)))
Expect(sf.Length).To(Equal(logging.ByteCount(3)))
Expect(sf.Fin).To(BeTrue())
})
It("converts DATAGRAM frames", func() {
f := toLoggingFrame(&wire.DatagramFrame{Data: []byte("foobar")})
Expect(f).To(BeAssignableToTypeOf(&logging.DatagramFrame{}))
df := f.(*logging.DatagramFrame)
Expect(df.Length).To(Equal(logging.ByteCount(6)))
})
It("converts other frames", func() {
f := toLoggingFrame(&wire.MaxDataFrame{MaximumData: 1234})
Expect(f).To(BeAssignableToTypeOf(&logging.MaxDataFrame{}))
Expect(f).ToNot(BeAssignableToTypeOf(&logging.MaxStreamDataFrame{}))
mdf := f.(*logging.MaxDataFrame)
Expect(mdf.MaximumData).To(Equal(logging.ByteCount(1234)))
})
})
golang-github-lucas-clemente-quic-go-0.46.0/connection_test.go 0000664 0000000 0000000 00000421532 14656644531 0024343 0 ustar 00root root 0000000 0000000 package quic
import (
"bytes"
"context"
"crypto/rand"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"net/netip"
"runtime/pprof"
"strings"
"time"
"github.com/quic-go/quic-go/internal/ackhandler"
"github.com/quic-go/quic-go/internal/handshake"
"github.com/quic-go/quic-go/internal/mocks"
mockackhandler "github.com/quic-go/quic-go/internal/mocks/ackhandler"
mocklogging "github.com/quic-go/quic-go/internal/mocks/logging"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/internal/wire"
"github.com/quic-go/quic-go/logging"
"github.com/quic-go/quic-go/testutils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"go.uber.org/mock/gomock"
)
func areConnsRunning() bool {
var b bytes.Buffer
pprof.Lookup("goroutine").WriteTo(&b, 1)
return strings.Contains(b.String(), "quic-go.(*connection).run")
}
var _ = Describe("Connection", func() {
var (
conn *connection
connRunner *MockConnRunner
mconn *MockSendConn
streamManager *MockStreamManager
packer *MockPacker
cryptoSetup *mocks.MockCryptoSetup
tracer *mocklogging.MockConnectionTracer
capabilities connCapabilities
)
remoteAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 1337}
localAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 7331}
srcConnID := protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5, 6, 7, 8})
destConnID := protocol.ParseConnectionID([]byte{8, 7, 6, 5, 4, 3, 2, 1})
clientDestConnID := protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})
getCoalescedPacket := func(pn protocol.PacketNumber, encLevel protocol.EncryptionLevel) *coalescedPacket {
buffer := getPacketBuffer()
buffer.Data = append(buffer.Data, []byte("foobar")...)
packet := &coalescedPacket{buffer: buffer}
if encLevel != protocol.Encryption1RTT {
var typ protocol.PacketType
switch encLevel {
case protocol.EncryptionInitial:
typ = protocol.PacketTypeInitial
case protocol.EncryptionHandshake:
typ = protocol.PacketTypeHandshake
}
packet.longHdrPackets = []*longHeaderPacket{{
header: &wire.ExtendedHeader{
Header: wire.Header{Type: typ},
PacketNumber: pn,
},
length: 6, // foobar
}}
} else {
packet.shortHdrPacket = &shortHeaderPacket{
PacketNumber: pn,
Length: 6,
}
}
return packet
}
expectReplaceWithClosed := func() {
connRunner.EXPECT().ReplaceWithClosed(gomock.Any(), gomock.Any()).Do(func(connIDs []protocol.ConnectionID, _ []byte) {
Expect(connIDs).To(ContainElement(srcConnID))
if len(connIDs) > 1 {
Expect(connIDs).To(ContainElement(clientDestConnID))
}
})
}
expectAppendPacket := func(packer *MockPacker, p shortHeaderPacket, b []byte) *MockPackerAppendPacketCall {
return packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), Version1).DoAndReturn(func(buf *packetBuffer, _ protocol.ByteCount, _ protocol.Version) (shortHeaderPacket, error) {
buf.Data = append(buf.Data, b...)
return p, nil
})
}
enableGSO := func() { capabilities = connCapabilities{GSO: true} }
BeforeEach(func() {
Eventually(areConnsRunning).Should(BeFalse())
connRunner = NewMockConnRunner(mockCtrl)
mconn = NewMockSendConn(mockCtrl)
mconn.EXPECT().capabilities().DoAndReturn(func() connCapabilities { return capabilities }).AnyTimes()
mconn.EXPECT().RemoteAddr().Return(remoteAddr).AnyTimes()
mconn.EXPECT().LocalAddr().Return(localAddr).AnyTimes()
tokenGenerator := handshake.NewTokenGenerator([32]byte{0xa, 0xb, 0xc})
var tr *logging.ConnectionTracer
tr, tracer = mocklogging.NewMockConnectionTracer(mockCtrl)
tracer.EXPECT().NegotiatedVersion(gomock.Any(), gomock.Any(), gomock.Any()).MaxTimes(1)
tracer.EXPECT().SentTransportParameters(gomock.Any())
tracer.EXPECT().UpdatedKeyFromTLS(gomock.Any(), gomock.Any()).AnyTimes()
tracer.EXPECT().UpdatedCongestionState(gomock.Any())
ctx, cancel := context.WithCancelCause(context.Background())
conn = newConnection(
ctx,
cancel,
mconn,
connRunner,
protocol.ConnectionID{},
nil,
clientDestConnID,
destConnID,
srcConnID,
&protocol.DefaultConnectionIDGenerator{},
protocol.StatelessResetToken{},
populateConfig(&Config{DisablePathMTUDiscovery: true}),
&tls.Config{},
tokenGenerator,
false,
tr,
utils.DefaultLogger,
protocol.Version1,
).(*connection)
streamManager = NewMockStreamManager(mockCtrl)
conn.streamsMap = streamManager
packer = NewMockPacker(mockCtrl)
conn.packer = packer
cryptoSetup = mocks.NewMockCryptoSetup(mockCtrl)
conn.cryptoStreamHandler = cryptoSetup
conn.handshakeComplete = true
conn.idleTimeout = time.Hour
})
AfterEach(func() {
Eventually(areConnsRunning).Should(BeFalse())
capabilities = connCapabilities{}
})
Context("frame handling", func() {
Context("handling STREAM frames", func() {
It("passes STREAM frames to the stream", func() {
f := &wire.StreamFrame{
StreamID: 5,
Data: []byte{0xde, 0xca, 0xfb, 0xad},
}
str := NewMockReceiveStreamI(mockCtrl)
str.EXPECT().handleStreamFrame(f)
streamManager.EXPECT().GetOrOpenReceiveStream(protocol.StreamID(5)).Return(str, nil)
Expect(conn.handleStreamFrame(f)).To(Succeed())
})
It("returns errors", func() {
testErr := errors.New("test err")
f := &wire.StreamFrame{
StreamID: 5,
Data: []byte{0xde, 0xca, 0xfb, 0xad},
}
str := NewMockReceiveStreamI(mockCtrl)
str.EXPECT().handleStreamFrame(f).Return(testErr)
streamManager.EXPECT().GetOrOpenReceiveStream(protocol.StreamID(5)).Return(str, nil)
Expect(conn.handleStreamFrame(f)).To(MatchError(testErr))
})
It("ignores STREAM frames for closed streams", func() {
streamManager.EXPECT().GetOrOpenReceiveStream(protocol.StreamID(5)).Return(nil, nil) // for closed streams, the streamManager returns nil
Expect(conn.handleStreamFrame(&wire.StreamFrame{
StreamID: 5,
Data: []byte("foobar"),
})).To(Succeed())
})
})
Context("handling ACK frames", func() {
It("informs the SentPacketHandler about ACKs", func() {
f := &wire.AckFrame{AckRanges: []wire.AckRange{{Smallest: 2, Largest: 3}}}
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().ReceivedAck(f, protocol.EncryptionHandshake, gomock.Any())
conn.sentPacketHandler = sph
err := conn.handleAckFrame(f, protocol.EncryptionHandshake)
Expect(err).ToNot(HaveOccurred())
})
})
Context("handling RESET_STREAM frames", func() {
It("closes the streams for writing", func() {
f := &wire.ResetStreamFrame{
StreamID: 555,
ErrorCode: 42,
FinalSize: 0x1337,
}
str := NewMockReceiveStreamI(mockCtrl)
streamManager.EXPECT().GetOrOpenReceiveStream(protocol.StreamID(555)).Return(str, nil)
str.EXPECT().handleResetStreamFrame(f)
err := conn.handleResetStreamFrame(f)
Expect(err).ToNot(HaveOccurred())
})
It("returns errors", func() {
f := &wire.ResetStreamFrame{
StreamID: 7,
FinalSize: 0x1337,
}
testErr := errors.New("flow control violation")
str := NewMockReceiveStreamI(mockCtrl)
streamManager.EXPECT().GetOrOpenReceiveStream(protocol.StreamID(7)).Return(str, nil)
str.EXPECT().handleResetStreamFrame(f).Return(testErr)
err := conn.handleResetStreamFrame(f)
Expect(err).To(MatchError(testErr))
})
It("ignores RESET_STREAM frames for closed streams", func() {
streamManager.EXPECT().GetOrOpenReceiveStream(protocol.StreamID(3)).Return(nil, nil)
Expect(conn.handleFrame(&wire.ResetStreamFrame{
StreamID: 3,
ErrorCode: 42,
}, protocol.Encryption1RTT, protocol.ConnectionID{})).To(Succeed())
})
})
Context("handling MAX_DATA and MAX_STREAM_DATA frames", func() {
var connFC *mocks.MockConnectionFlowController
BeforeEach(func() {
connFC = mocks.NewMockConnectionFlowController(mockCtrl)
conn.connFlowController = connFC
})
It("updates the flow control window of a stream", func() {
f := &wire.MaxStreamDataFrame{
StreamID: 12345,
MaximumStreamData: 0x1337,
}
str := NewMockSendStreamI(mockCtrl)
streamManager.EXPECT().GetOrOpenSendStream(protocol.StreamID(12345)).Return(str, nil)
str.EXPECT().updateSendWindow(protocol.ByteCount(0x1337))
Expect(conn.handleMaxStreamDataFrame(f)).To(Succeed())
})
It("updates the flow control window of the connection", func() {
offset := protocol.ByteCount(0x800000)
connFC.EXPECT().UpdateSendWindow(offset)
conn.handleMaxDataFrame(&wire.MaxDataFrame{MaximumData: offset})
})
It("ignores MAX_STREAM_DATA frames for a closed stream", func() {
streamManager.EXPECT().GetOrOpenSendStream(protocol.StreamID(10)).Return(nil, nil)
Expect(conn.handleFrame(&wire.MaxStreamDataFrame{
StreamID: 10,
MaximumStreamData: 1337,
}, protocol.Encryption1RTT, protocol.ConnectionID{})).To(Succeed())
})
})
Context("handling MAX_STREAM_ID frames", func() {
It("passes the frame to the streamsMap", func() {
f := &wire.MaxStreamsFrame{
Type: protocol.StreamTypeUni,
MaxStreamNum: 10,
}
streamManager.EXPECT().HandleMaxStreamsFrame(f)
conn.handleMaxStreamsFrame(f)
})
})
Context("handling STOP_SENDING frames", func() {
It("passes the frame to the stream", func() {
f := &wire.StopSendingFrame{
StreamID: 5,
ErrorCode: 10,
}
str := NewMockSendStreamI(mockCtrl)
streamManager.EXPECT().GetOrOpenSendStream(protocol.StreamID(5)).Return(str, nil)
str.EXPECT().handleStopSendingFrame(f)
err := conn.handleStopSendingFrame(f)
Expect(err).ToNot(HaveOccurred())
})
It("ignores STOP_SENDING frames for a closed stream", func() {
streamManager.EXPECT().GetOrOpenSendStream(protocol.StreamID(3)).Return(nil, nil)
Expect(conn.handleFrame(&wire.StopSendingFrame{
StreamID: 3,
ErrorCode: 1337,
}, protocol.Encryption1RTT, protocol.ConnectionID{})).To(Succeed())
})
})
It("handles NEW_CONNECTION_ID frames", func() {
connID := protocol.ParseConnectionID([]byte{1, 2, 3, 4})
Expect(conn.handleFrame(&wire.NewConnectionIDFrame{
SequenceNumber: 10,
ConnectionID: connID,
}, protocol.Encryption1RTT, protocol.ConnectionID{})).To(Succeed())
Expect(conn.connIDManager.queue.Back().Value.ConnectionID).To(Equal(connID))
})
It("handles PING frames", func() {
err := conn.handleFrame(&wire.PingFrame{}, protocol.Encryption1RTT, protocol.ConnectionID{})
Expect(err).NotTo(HaveOccurred())
})
It("rejects PATH_RESPONSE frames", func() {
err := conn.handleFrame(&wire.PathResponseFrame{Data: [8]byte{1, 2, 3, 4, 5, 6, 7, 8}}, protocol.Encryption1RTT, protocol.ConnectionID{})
Expect(err).To(MatchError("unexpected PATH_RESPONSE frame"))
})
It("handles PATH_CHALLENGE frames", func() {
data := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
err := conn.handleFrame(&wire.PathChallengeFrame{Data: data}, protocol.Encryption1RTT, protocol.ConnectionID{})
Expect(err).ToNot(HaveOccurred())
frames, _ := conn.framer.AppendControlFrames(nil, 1000, protocol.Version1)
Expect(frames).To(Equal([]ackhandler.Frame{{Frame: &wire.PathResponseFrame{Data: data}}}))
})
It("rejects NEW_TOKEN frames", func() {
err := conn.handleNewTokenFrame(&wire.NewTokenFrame{})
Expect(err).To(HaveOccurred())
Expect(err).To(BeAssignableToTypeOf(&qerr.TransportError{}))
Expect(err.(*qerr.TransportError).ErrorCode).To(Equal(qerr.ProtocolViolation))
})
It("handles BLOCKED frames", func() {
err := conn.handleFrame(&wire.DataBlockedFrame{}, protocol.Encryption1RTT, protocol.ConnectionID{})
Expect(err).NotTo(HaveOccurred())
})
It("handles STREAM_BLOCKED frames", func() {
err := conn.handleFrame(&wire.StreamDataBlockedFrame{}, protocol.Encryption1RTT, protocol.ConnectionID{})
Expect(err).NotTo(HaveOccurred())
})
It("handles STREAMS_BLOCKED frames", func() {
err := conn.handleFrame(&wire.StreamsBlockedFrame{}, protocol.Encryption1RTT, protocol.ConnectionID{})
Expect(err).NotTo(HaveOccurred())
})
It("handles CONNECTION_CLOSE frames, with a transport error code", func() {
expectedErr := &qerr.TransportError{
Remote: true,
ErrorCode: qerr.StreamLimitError,
ErrorMessage: "foobar",
}
streamManager.EXPECT().CloseWithError(expectedErr)
connRunner.EXPECT().ReplaceWithClosed(gomock.Any(), gomock.Any()).Do(func(connIDs []protocol.ConnectionID, _ []byte) {
Expect(connIDs).To(ConsistOf(clientDestConnID, srcConnID))
})
cryptoSetup.EXPECT().Close()
gomock.InOrder(
tracer.EXPECT().ClosedConnection(expectedErr),
tracer.EXPECT().Close(),
)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
Expect(conn.run()).To(MatchError(expectedErr))
}()
Expect(conn.handleFrame(&wire.ConnectionCloseFrame{
ErrorCode: uint64(qerr.StreamLimitError),
ReasonPhrase: "foobar",
}, protocol.Encryption1RTT, protocol.ConnectionID{})).To(Succeed())
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("handles CONNECTION_CLOSE frames, with an application error code", func() {
testErr := &qerr.ApplicationError{
Remote: true,
ErrorCode: 0x1337,
ErrorMessage: "foobar",
}
streamManager.EXPECT().CloseWithError(testErr)
connRunner.EXPECT().ReplaceWithClosed(gomock.Any(), gomock.Any()).Do(func(connIDs []protocol.ConnectionID, _ []byte) {
Expect(connIDs).To(ConsistOf(clientDestConnID, srcConnID))
})
cryptoSetup.EXPECT().Close()
gomock.InOrder(
tracer.EXPECT().ClosedConnection(testErr),
tracer.EXPECT().Close(),
)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
Expect(conn.run()).To(MatchError(testErr))
}()
ccf := &wire.ConnectionCloseFrame{
ErrorCode: 0x1337,
ReasonPhrase: "foobar",
IsApplicationError: true,
}
Expect(conn.handleFrame(ccf, protocol.Encryption1RTT, protocol.ConnectionID{})).To(Succeed())
Eventually(conn.Context().Done()).Should(BeClosed())
Expect(context.Cause(conn.Context())).To(MatchError(testErr))
})
It("errors on HANDSHAKE_DONE frames", func() {
Expect(conn.handleHandshakeDoneFrame()).To(MatchError(&qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "received a HANDSHAKE_DONE frame",
}))
})
})
It("tells its versions", func() {
conn.version = 4242
Expect(conn.GetVersion()).To(Equal(protocol.Version(4242)))
})
Context("closing", func() {
var (
runErr chan error
expectedRunErr error
)
BeforeEach(func() {
runErr = make(chan error, 1)
expectedRunErr = nil
})
AfterEach(func() {
if expectedRunErr != nil {
Eventually(runErr).Should(Receive(MatchError(expectedRunErr)))
} else {
Eventually(runErr).Should(Receive())
}
})
runConn := func() {
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
runErr <- conn.run()
}()
Eventually(areConnsRunning).Should(BeTrue())
}
It("shuts down without error", func() {
conn.handshakeComplete = true
runConn()
streamManager.EXPECT().CloseWithError(&qerr.ApplicationError{})
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
buffer := getPacketBuffer()
buffer.Data = append(buffer.Data, []byte("connection close")...)
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).DoAndReturn(func(e *qerr.ApplicationError, _ protocol.ByteCount, _ protocol.Version) (*coalescedPacket, error) {
Expect(e.ErrorCode).To(BeEquivalentTo(qerr.NoError))
Expect(e.ErrorMessage).To(BeEmpty())
return &coalescedPacket{buffer: buffer}, nil
})
mconn.EXPECT().Write([]byte("connection close"), gomock.Any(), gomock.Any())
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
var appErr *ApplicationError
Expect(errors.As(e, &appErr)).To(BeTrue())
Expect(appErr.Remote).To(BeFalse())
Expect(appErr.ErrorCode).To(BeZero())
}),
tracer.EXPECT().Close(),
)
conn.CloseWithError(0, "")
Eventually(areConnsRunning).Should(BeFalse())
Expect(conn.Context().Done()).To(BeClosed())
})
It("only closes once", func() {
runConn()
streamManager.EXPECT().CloseWithError(gomock.Any())
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.CloseWithError(0, "")
conn.CloseWithError(0, "")
Eventually(areConnsRunning).Should(BeFalse())
Expect(conn.Context().Done()).To(BeClosed())
})
It("closes with an error", func() {
runConn()
expectedErr := &qerr.ApplicationError{
ErrorCode: 0x1337,
ErrorMessage: "test error",
}
streamManager.EXPECT().CloseWithError(expectedErr)
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackApplicationClose(expectedErr, gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
gomock.InOrder(
tracer.EXPECT().ClosedConnection(expectedErr),
tracer.EXPECT().Close(),
)
conn.CloseWithError(0x1337, "test error")
Eventually(areConnsRunning).Should(BeFalse())
Expect(conn.Context().Done()).To(BeClosed())
Expect(context.Cause(conn.Context())).To(MatchError(expectedErr))
})
It("includes the frame type in transport-level close frames", func() {
runConn()
expectedErr := &qerr.TransportError{
ErrorCode: 0x1337,
FrameType: 0x42,
ErrorMessage: "test error",
}
streamManager.EXPECT().CloseWithError(expectedErr)
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackConnectionClose(expectedErr, gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
gomock.InOrder(
tracer.EXPECT().ClosedConnection(expectedErr),
tracer.EXPECT().Close(),
)
conn.closeLocal(expectedErr)
Eventually(areConnsRunning).Should(BeFalse())
Expect(conn.Context().Done()).To(BeClosed())
})
It("destroys the connection", func() {
runConn()
testErr := errors.New("close")
streamManager.EXPECT().CloseWithError(gomock.Any())
connRunner.EXPECT().Remove(gomock.Any()).AnyTimes()
cryptoSetup.EXPECT().Close()
// don't EXPECT any calls to mconn.Write()
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
var transportErr *TransportError
Expect(errors.As(e, &transportErr)).To(BeTrue())
Expect(transportErr.Remote).To(BeFalse())
Expect(transportErr.ErrorCode).To(Equal(qerr.InternalError))
}),
tracer.EXPECT().Close(),
)
conn.destroy(testErr)
Eventually(areConnsRunning).Should(BeFalse())
expectedRunErr = &qerr.TransportError{
ErrorCode: qerr.InternalError,
ErrorMessage: testErr.Error(),
}
})
It("doesn't send any more packets after receiving a CONNECTION_CLOSE", func() {
unpacker := NewMockUnpacker(mockCtrl)
conn.handshakeConfirmed = true
conn.unpacker = unpacker
runConn()
cryptoSetup.EXPECT().Close()
streamManager.EXPECT().CloseWithError(gomock.Any())
connRunner.EXPECT().ReplaceWithClosed(gomock.Any(), gomock.Any()).AnyTimes()
b, err := wire.AppendShortHeader(nil, srcConnID, 42, protocol.PacketNumberLen2, protocol.KeyPhaseOne)
Expect(err).ToNot(HaveOccurred())
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).DoAndReturn(func(time.Time, []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) {
b, err := (&wire.ConnectionCloseFrame{ErrorCode: uint64(qerr.StreamLimitError)}).Append(nil, conn.version)
Expect(err).ToNot(HaveOccurred())
return 3, protocol.PacketNumberLen2, protocol.KeyPhaseOne, b, nil
})
gomock.InOrder(
tracer.EXPECT().ReceivedShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),
tracer.EXPECT().ClosedConnection(gomock.Any()),
tracer.EXPECT().Close(),
)
// don't EXPECT any calls to packer.PackPacket()
conn.handlePacket(receivedPacket{
rcvTime: time.Now(),
remoteAddr: &net.UDPAddr{},
buffer: getPacketBuffer(),
data: b,
})
// Consistently(pack).ShouldNot(Receive())
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("closes when the sendQueue encounters an error", func() {
conn.handshakeConfirmed = true
sconn := NewMockSendConn(mockCtrl)
sconn.EXPECT().capabilities().AnyTimes()
sconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any()).Return(io.ErrClosedPipe).AnyTimes()
conn.sendQueue = newSendQueue(sconn)
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().GetLossDetectionTimeout().Return(time.Now().Add(time.Hour)).AnyTimes()
sph.EXPECT().ECNMode(true).Return(protocol.ECT1).AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
// only expect a single SentPacket() call
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
streamManager.EXPECT().CloseWithError(gomock.Any())
connRunner.EXPECT().Remove(gomock.Any()).AnyTimes()
cryptoSetup.EXPECT().Close()
conn.sentPacketHandler = sph
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 1}, []byte("foobar"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack).AnyTimes()
runConn()
conn.queueControlFrame(&wire.PingFrame{})
conn.scheduleSending()
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("closes due to a stateless reset", func() {
token := protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
runConn()
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
var srErr *StatelessResetError
Expect(errors.As(e, &srErr)).To(BeTrue())
Expect(srErr.Token).To(Equal(token))
}),
tracer.EXPECT().Close(),
)
streamManager.EXPECT().CloseWithError(gomock.Any())
connRunner.EXPECT().Remove(gomock.Any()).AnyTimes()
cryptoSetup.EXPECT().Close()
conn.destroy(&StatelessResetError{Token: token})
})
})
Context("receiving packets", func() {
var unpacker *MockUnpacker
BeforeEach(func() {
unpacker = NewMockUnpacker(mockCtrl)
conn.unpacker = unpacker
})
getShortHeaderPacket := func(connID protocol.ConnectionID, pn protocol.PacketNumber, data []byte) receivedPacket {
b, err := wire.AppendShortHeader(nil, connID, pn, protocol.PacketNumberLen2, protocol.KeyPhaseOne)
Expect(err).ToNot(HaveOccurred())
return receivedPacket{
data: append(b, data...),
buffer: getPacketBuffer(),
rcvTime: time.Now(),
}
}
getLongHeaderPacket := func(extHdr *wire.ExtendedHeader, data []byte) receivedPacket {
b, err := extHdr.Append(nil, conn.version)
Expect(err).ToNot(HaveOccurred())
return receivedPacket{
data: append(b, data...),
buffer: getPacketBuffer(),
rcvTime: time.Now(),
}
}
It("drops Retry packets", func() {
p := getLongHeaderPacket(&wire.ExtendedHeader{Header: wire.Header{
Type: protocol.PacketTypeRetry,
DestConnectionID: destConnID,
SrcConnectionID: srcConnID,
Version: conn.version,
Token: []byte("foobar"),
}}, make([]byte, 16) /* Retry integrity tag */)
tracer.EXPECT().DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnexpectedPacket)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
It("drops Version Negotiation packets", func() {
b := wire.ComposeVersionNegotiation(
protocol.ArbitraryLenConnectionID(srcConnID.Bytes()),
protocol.ArbitraryLenConnectionID(destConnID.Bytes()),
conn.config.Versions,
)
tracer.EXPECT().DroppedPacket(logging.PacketTypeVersionNegotiation, protocol.InvalidPacketNumber, protocol.ByteCount(len(b)), logging.PacketDropUnexpectedPacket)
Expect(conn.handlePacketImpl(receivedPacket{
data: b,
buffer: getPacketBuffer(),
})).To(BeFalse())
})
It("drops packets for which header decryption fails", func() {
p := getLongHeaderPacket(&wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeHandshake,
Version: conn.version,
},
PacketNumberLen: protocol.PacketNumberLen2,
}, nil)
p.data[0] ^= 0x40 // unset the QUIC bit
tracer.EXPECT().DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropHeaderParseError)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
It("drops packets for which the version is unsupported", func() {
p := getLongHeaderPacket(&wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeHandshake,
Version: conn.version + 1,
},
PacketNumberLen: protocol.PacketNumberLen2,
}, nil)
tracer.EXPECT().DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnsupportedVersion)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
It("drops packets with an unsupported version", func() {
origSupportedVersions := make([]protocol.Version, len(protocol.SupportedVersions))
copy(origSupportedVersions, protocol.SupportedVersions)
defer func() {
protocol.SupportedVersions = origSupportedVersions
}()
protocol.SupportedVersions = append(protocol.SupportedVersions, conn.version+1)
p := getLongHeaderPacket(&wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeHandshake,
DestConnectionID: destConnID,
SrcConnectionID: srcConnID,
Version: conn.version + 1,
},
PacketNumberLen: protocol.PacketNumberLen2,
}, nil)
tracer.EXPECT().DroppedPacket(logging.PacketTypeHandshake, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnexpectedVersion)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
It("informs the ReceivedPacketHandler about non-ack-eliciting packets", func() {
hdr := &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeInitial,
DestConnectionID: srcConnID,
Version: protocol.Version1,
Length: 1,
},
PacketNumber: 0x37,
PacketNumberLen: protocol.PacketNumberLen1,
}
unpackedHdr := *hdr
unpackedHdr.PacketNumber = 0x1337
packet := getLongHeaderPacket(hdr, nil)
packet.ecn = protocol.ECNCE
rcvTime := time.Now().Add(-10 * time.Second)
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any()).Return(&unpackedPacket{
encryptionLevel: protocol.EncryptionInitial,
hdr: &unpackedHdr,
data: []byte{0}, // one PADDING frame
}, nil)
rph := mockackhandler.NewMockReceivedPacketHandler(mockCtrl)
gomock.InOrder(
rph.EXPECT().IsPotentiallyDuplicate(protocol.PacketNumber(0x1337), protocol.EncryptionInitial),
rph.EXPECT().ReceivedPacket(protocol.PacketNumber(0x1337), protocol.ECNCE, protocol.EncryptionInitial, rcvTime, false),
)
conn.receivedPacketHandler = rph
packet.rcvTime = rcvTime
tracer.EXPECT().StartedConnection(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), gomock.Any(), logging.ECNCE, []logging.Frame{})
Expect(conn.handlePacketImpl(packet)).To(BeTrue())
})
It("informs the ReceivedPacketHandler about ack-eliciting packets", func() {
rcvTime := time.Now().Add(-10 * time.Second)
b, err := (&wire.PingFrame{}).Append(nil, conn.version)
Expect(err).ToNot(HaveOccurred())
packet := getShortHeaderPacket(srcConnID, 0x37, nil)
packet.ecn = protocol.ECT1
unpacker.EXPECT().UnpackShortHeader(rcvTime, gomock.Any()).Return(protocol.PacketNumber(0x1337), protocol.PacketNumberLen2, protocol.KeyPhaseZero, b, nil)
rph := mockackhandler.NewMockReceivedPacketHandler(mockCtrl)
gomock.InOrder(
rph.EXPECT().IsPotentiallyDuplicate(protocol.PacketNumber(0x1337), protocol.Encryption1RTT),
rph.EXPECT().ReceivedPacket(protocol.PacketNumber(0x1337), protocol.ECT1, protocol.Encryption1RTT, rcvTime, true),
)
conn.receivedPacketHandler = rph
packet.rcvTime = rcvTime
tracer.EXPECT().ReceivedShortHeaderPacket(
&logging.ShortHeader{DestConnectionID: srcConnID, PacketNumber: 0x1337, PacketNumberLen: 2, KeyPhase: protocol.KeyPhaseZero},
protocol.ByteCount(len(packet.data)),
logging.ECT1,
[]logging.Frame{&logging.PingFrame{}},
)
Expect(conn.handlePacketImpl(packet)).To(BeTrue())
})
It("drops duplicate packets", func() {
packet := getShortHeaderPacket(srcConnID, 0x37, nil)
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).Return(protocol.PacketNumber(0x1337), protocol.PacketNumberLen2, protocol.KeyPhaseOne, []byte("foobar"), nil)
rph := mockackhandler.NewMockReceivedPacketHandler(mockCtrl)
rph.EXPECT().IsPotentiallyDuplicate(protocol.PacketNumber(0x1337), protocol.Encryption1RTT).Return(true)
conn.receivedPacketHandler = rph
tracer.EXPECT().DroppedPacket(logging.PacketType1RTT, protocol.PacketNumber(0x1337), protocol.ByteCount(len(packet.data)), logging.PacketDropDuplicate)
Expect(conn.handlePacketImpl(packet)).To(BeFalse())
})
It("drops a packet when unpacking fails", func() {
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any()).Return(nil, handshake.ErrDecryptionFailed)
streamManager.EXPECT().CloseWithError(gomock.Any())
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackConnectionClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
expectReplaceWithClosed()
p := getLongHeaderPacket(&wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeHandshake,
DestConnectionID: srcConnID,
Version: conn.version,
Length: 2 + 6,
},
PacketNumber: 0x1337,
PacketNumberLen: protocol.PacketNumberLen2,
}, []byte("foobar"))
tracer.EXPECT().DroppedPacket(logging.PacketTypeHandshake, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropPayloadDecryptError)
conn.handlePacket(p)
Consistently(conn.Context().Done()).ShouldNot(BeClosed())
// make the go routine return
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
conn.closeLocal(errors.New("close"))
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("processes multiple received packets before sending one", func() {
conn.creationTime = time.Now()
var pn protocol.PacketNumber
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).DoAndReturn(func(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) {
pn++
return pn, protocol.PacketNumberLen2, protocol.KeyPhaseZero, []byte{0} /* PADDING frame */, nil
}).Times(3)
tracer.EXPECT().ReceivedShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3)
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version) // only expect a single call
for i := 0; i < 3; i++ {
conn.handlePacket(getShortHeaderPacket(srcConnID, 0x1337+protocol.PacketNumber(i), []byte("foobar")))
}
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
Consistently(conn.Context().Done()).ShouldNot(BeClosed())
// make the go routine return
streamManager.EXPECT().CloseWithError(gomock.Any())
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackConnectionClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
expectReplaceWithClosed()
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
conn.closeLocal(errors.New("close"))
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("doesn't processes multiple received packets before sending one before handshake completion", func() {
conn.handshakeComplete = false
conn.creationTime = time.Now()
var pn protocol.PacketNumber
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).DoAndReturn(func(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) {
pn++
return pn, protocol.PacketNumberLen4, protocol.KeyPhaseZero, []byte{0} /* PADDING frame */, nil
}).Times(3)
tracer.EXPECT().ReceivedShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3)
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).Times(3)
for i := 0; i < 3; i++ {
conn.handlePacket(getShortHeaderPacket(srcConnID, 0x1337+protocol.PacketNumber(i), []byte("foobar")))
}
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
Consistently(conn.Context().Done()).ShouldNot(BeClosed())
// make the go routine return
streamManager.EXPECT().CloseWithError(gomock.Any())
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackConnectionClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
expectReplaceWithClosed()
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
conn.closeLocal(errors.New("close"))
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("closes the connection when unpacking fails because the reserved bits were incorrect", func() {
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).Return(protocol.PacketNumber(0), protocol.PacketNumberLen(0), protocol.KeyPhaseBit(0), nil, wire.ErrInvalidReservedBits)
streamManager.EXPECT().CloseWithError(gomock.Any())
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackConnectionClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
done := make(chan struct{})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
err := conn.run()
Expect(err).To(HaveOccurred())
Expect(err).To(BeAssignableToTypeOf(&qerr.TransportError{}))
Expect(err.(*qerr.TransportError).ErrorCode).To(Equal(qerr.ProtocolViolation))
close(done)
}()
expectReplaceWithClosed()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
packet := getShortHeaderPacket(srcConnID, 0x42, nil)
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.handlePacket(packet)
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("ignores packets when unpacking the header fails", func() {
testErr := &headerParseError{errors.New("test error")}
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).Return(protocol.PacketNumber(0), protocol.PacketNumberLen(0), protocol.KeyPhaseBit(0), nil, testErr)
streamManager.EXPECT().CloseWithError(gomock.Any())
cryptoSetup.EXPECT().Close()
runErr := make(chan error)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
runErr <- conn.run()
}()
expectReplaceWithClosed()
tracer.EXPECT().DroppedPacket(logging.PacketType1RTT, protocol.InvalidPacketNumber, gomock.Any(), logging.PacketDropHeaderParseError)
conn.handlePacket(getShortHeaderPacket(srcConnID, 0x42, nil))
Consistently(runErr).ShouldNot(Receive())
// make the go routine return
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
conn.CloseWithError(0, "")
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("closes the connection when unpacking fails because of an error other than a decryption error", func() {
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).Return(protocol.PacketNumber(0), protocol.PacketNumberLen(0), protocol.KeyPhaseBit(0), nil, &qerr.TransportError{ErrorCode: qerr.ConnectionIDLimitError})
streamManager.EXPECT().CloseWithError(gomock.Any())
cryptoSetup.EXPECT().Close()
packer.EXPECT().PackConnectionClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
done := make(chan struct{})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
err := conn.run()
Expect(err).To(HaveOccurred())
Expect(err).To(BeAssignableToTypeOf(&qerr.TransportError{}))
Expect(err.(*qerr.TransportError).ErrorCode).To(Equal(qerr.ConnectionIDLimitError))
close(done)
}()
expectReplaceWithClosed()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.handlePacket(getShortHeaderPacket(srcConnID, 0x42, nil))
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("ignores packets with a different source connection ID", func() {
hdr1 := &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeInitial,
DestConnectionID: destConnID,
SrcConnectionID: srcConnID,
Length: 1,
Version: conn.version,
},
PacketNumberLen: protocol.PacketNumberLen1,
PacketNumber: 1,
}
hdr2 := &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeInitial,
DestConnectionID: destConnID,
SrcConnectionID: protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef}),
Length: 1,
Version: conn.version,
},
PacketNumberLen: protocol.PacketNumberLen1,
PacketNumber: 2,
}
Expect(srcConnID).ToNot(Equal(hdr2.SrcConnectionID))
// Send one packet, which might change the connection ID.
// only EXPECT one call to the unpacker
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any()).Return(&unpackedPacket{
encryptionLevel: protocol.Encryption1RTT,
hdr: hdr1,
data: []byte{0}, // one PADDING frame
}, nil)
p1 := getLongHeaderPacket(hdr1, nil)
tracer.EXPECT().StartedConnection(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), protocol.ByteCount(len(p1.data)), gomock.Any(), gomock.Any())
Expect(conn.handlePacketImpl(p1)).To(BeTrue())
// The next packet has to be ignored, since the source connection ID doesn't match.
p2 := getLongHeaderPacket(hdr2, nil)
tracer.EXPECT().DroppedPacket(logging.PacketTypeInitial, protocol.InvalidPacketNumber, protocol.ByteCount(len(p2.data)), logging.PacketDropUnknownConnectionID)
Expect(conn.handlePacketImpl(p2)).To(BeFalse())
})
It("queues undecryptable packets", func() {
conn.handshakeComplete = false
hdr := &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeHandshake,
DestConnectionID: destConnID,
SrcConnectionID: srcConnID,
Length: 1,
Version: conn.version,
},
PacketNumberLen: protocol.PacketNumberLen1,
PacketNumber: 1,
}
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any()).Return(nil, handshake.ErrKeysNotYetAvailable)
packet := getLongHeaderPacket(hdr, nil)
tracer.EXPECT().BufferedPacket(logging.PacketTypeHandshake, packet.Size())
Expect(conn.handlePacketImpl(packet)).To(BeFalse())
Expect(conn.undecryptablePackets).To(Equal([]receivedPacket{packet}))
})
Context("updating the remote address", func() {
It("doesn't support connection migration", func() {
unpacker.EXPECT().UnpackShortHeader(gomock.Any(), gomock.Any()).Return(protocol.PacketNumber(10), protocol.PacketNumberLen2, protocol.KeyPhaseZero, []byte{0} /* one PADDING frame */, nil)
packet := getShortHeaderPacket(srcConnID, 0x42, nil)
packet.remoteAddr = &net.IPAddr{IP: net.IPv4(192, 168, 0, 100)}
tracer.EXPECT().ReceivedShortHeaderPacket(gomock.Any(), protocol.ByteCount(len(packet.data)), gomock.Any(), gomock.Any())
Expect(conn.handlePacketImpl(packet)).To(BeTrue())
})
})
Context("coalesced packets", func() {
BeforeEach(func() {
tracer.EXPECT().StartedConnection(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).MaxTimes(1)
})
getPacketWithLength := func(connID protocol.ConnectionID, length protocol.ByteCount) (int /* header length */, receivedPacket) {
hdr := &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeHandshake,
DestConnectionID: connID,
SrcConnectionID: destConnID,
Version: protocol.Version1,
Length: length,
},
PacketNumberLen: protocol.PacketNumberLen3,
}
hdrLen := hdr.GetLength(conn.version)
b := make([]byte, 1)
rand.Read(b)
packet := getLongHeaderPacket(hdr, bytes.Repeat(b, int(length)-3))
return int(hdrLen), packet
}
It("cuts packets to the right length", func() {
hdrLen, packet := getPacketWithLength(srcConnID, 456)
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any()).DoAndReturn(func(_ *wire.Header, data []byte) (*unpackedPacket, error) {
Expect(data).To(HaveLen(hdrLen + 456 - 3))
return &unpackedPacket{
encryptionLevel: protocol.EncryptionHandshake,
data: []byte{0},
hdr: &wire.ExtendedHeader{Header: wire.Header{}},
}, nil
})
cryptoSetup.EXPECT().DiscardInitialKeys()
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionInitial)
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), protocol.ByteCount(len(packet.data)), gomock.Any(), gomock.Any())
Expect(conn.handlePacketImpl(packet)).To(BeTrue())
})
It("handles coalesced packets", func() {
hdrLen1, packet1 := getPacketWithLength(srcConnID, 456)
packet1.ecn = protocol.ECT1
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any()).DoAndReturn(func(_ *wire.Header, data []byte) (*unpackedPacket, error) {
Expect(data).To(HaveLen(hdrLen1 + 456 - 3))
return &unpackedPacket{
encryptionLevel: protocol.EncryptionHandshake,
data: []byte{0},
hdr: &wire.ExtendedHeader{
PacketNumber: 1,
Header: wire.Header{SrcConnectionID: destConnID},
},
}, nil
})
hdrLen2, packet2 := getPacketWithLength(srcConnID, 123)
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any()).DoAndReturn(func(_ *wire.Header, data []byte) (*unpackedPacket, error) {
Expect(data).To(HaveLen(hdrLen2 + 123 - 3))
return &unpackedPacket{
encryptionLevel: protocol.EncryptionHandshake,
data: []byte{0},
hdr: &wire.ExtendedHeader{
PacketNumber: 2,
Header: wire.Header{SrcConnectionID: destConnID},
},
}, nil
})
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionInitial).AnyTimes()
cryptoSetup.EXPECT().DiscardInitialKeys().AnyTimes()
gomock.InOrder(
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), protocol.ByteCount(len(packet1.data)), logging.ECT1, gomock.Any()),
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), protocol.ByteCount(len(packet2.data)), logging.ECT1, gomock.Any()),
)
packet1.data = append(packet1.data, packet2.data...)
Expect(conn.handlePacketImpl(packet1)).To(BeTrue())
})
It("works with undecryptable packets", func() {
conn.handshakeComplete = false
hdrLen1, packet1 := getPacketWithLength(srcConnID, 456)
hdrLen2, packet2 := getPacketWithLength(srcConnID, 123)
gomock.InOrder(
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any()).Return(nil, handshake.ErrKeysNotYetAvailable),
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any()).DoAndReturn(func(_ *wire.Header, data []byte) (*unpackedPacket, error) {
Expect(data).To(HaveLen(hdrLen2 + 123 - 3))
return &unpackedPacket{
encryptionLevel: protocol.EncryptionHandshake,
data: []byte{0},
hdr: &wire.ExtendedHeader{Header: wire.Header{}},
}, nil
}),
)
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionInitial).AnyTimes()
cryptoSetup.EXPECT().DiscardInitialKeys().AnyTimes()
gomock.InOrder(
tracer.EXPECT().BufferedPacket(gomock.Any(), protocol.ByteCount(len(packet1.data))),
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), protocol.ByteCount(len(packet2.data)), gomock.Any(), gomock.Any()),
)
packet1.data = append(packet1.data, packet2.data...)
Expect(conn.handlePacketImpl(packet1)).To(BeTrue())
Expect(conn.undecryptablePackets).To(HaveLen(1))
Expect(conn.undecryptablePackets[0].data).To(HaveLen(hdrLen1 + 456 - 3))
})
It("ignores coalesced packet parts if the destination connection IDs don't match", func() {
wrongConnID := protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef})
Expect(srcConnID).ToNot(Equal(wrongConnID))
hdrLen1, packet1 := getPacketWithLength(srcConnID, 456)
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any()).DoAndReturn(func(_ *wire.Header, data []byte) (*unpackedPacket, error) {
Expect(data).To(HaveLen(hdrLen1 + 456 - 3))
return &unpackedPacket{
encryptionLevel: protocol.EncryptionHandshake,
data: []byte{0},
hdr: &wire.ExtendedHeader{Header: wire.Header{}},
}, nil
})
_, packet2 := getPacketWithLength(wrongConnID, 123)
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionInitial).AnyTimes()
cryptoSetup.EXPECT().DiscardInitialKeys().AnyTimes()
// don't EXPECT any more calls to unpacker.UnpackLongHeader()
gomock.InOrder(
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), protocol.ByteCount(len(packet1.data)), gomock.Any(), gomock.Any()),
tracer.EXPECT().DroppedPacket(gomock.Any(), protocol.InvalidPacketNumber, protocol.ByteCount(len(packet2.data)), logging.PacketDropUnknownConnectionID),
)
packet1.data = append(packet1.data, packet2.data...)
Expect(conn.handlePacketImpl(packet1)).To(BeTrue())
})
})
})
Context("sending packets", func() {
var (
connDone chan struct{}
sender *MockSender
sph *mockackhandler.MockSentPacketHandler
)
BeforeEach(func() {
sender = NewMockSender(mockCtrl)
sender.EXPECT().Run()
sender.EXPECT().WouldBlock().AnyTimes()
conn.sendQueue = sender
connDone = make(chan struct{})
sph = mockackhandler.NewMockSentPacketHandler(mockCtrl)
conn.sentPacketHandler = sph
})
AfterEach(func() {
streamManager.EXPECT().CloseWithError(gomock.Any())
sph.EXPECT().ECNMode(gomock.Any()).Return(protocol.ECNCE).MaxTimes(1)
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
sender.EXPECT().Close()
conn.CloseWithError(0, "")
Eventually(conn.Context().Done()).Should(BeClosed())
Eventually(connDone).Should(BeClosed())
})
runConn := func() {
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
close(connDone)
}()
}
It("sends packets", func() {
conn.handshakeConfirmed = true
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().ECNMode(true).Return(protocol.ECNNon).AnyTimes()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
runConn()
p := shortHeaderPacket{
DestConnID: protocol.ParseConnectionID([]byte{1, 2, 3}),
PacketNumber: 1337,
PacketNumberLen: protocol.PacketNumberLen3,
KeyPhase: protocol.KeyPhaseOne,
}
expectAppendPacket(packer, p, []byte("foobar"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack).AnyTimes()
sent := make(chan struct{})
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(*packetBuffer, uint16, protocol.ECN) { close(sent) })
tracer.EXPECT().SentShortHeaderPacket(&logging.ShortHeader{
DestConnectionID: p.DestConnID,
PacketNumber: p.PacketNumber,
PacketNumberLen: p.PacketNumberLen,
KeyPhase: p.KeyPhase,
}, gomock.Any(), gomock.Any(), nil, []logging.Frame{})
conn.scheduleSending()
Eventually(sent).Should(BeClosed())
})
It("doesn't send packets if there's nothing to send", func() {
conn.handshakeConfirmed = true
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().ECNMode(true).AnyTimes()
runConn()
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack).AnyTimes()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond) // make sure there are no calls to mconn.Write()
})
It("sends ACK only packets", func() {
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAck)
sph.EXPECT().ECNMode(gomock.Any()).Return(protocol.ECT1).AnyTimes()
done := make(chan struct{})
packer.EXPECT().PackCoalescedPacket(true, gomock.Any(), conn.version).Do(func(bool, protocol.ByteCount, protocol.Version) (*coalescedPacket, error) {
close(done)
return nil, nil
})
runConn()
conn.scheduleSending()
Eventually(done).Should(BeClosed())
})
It("adds a BLOCKED frame when it is connection-level flow control blocked", func() {
conn.handshakeConfirmed = true
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().ECNMode(gomock.Any()).AnyTimes()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
fc := mocks.NewMockConnectionFlowController(mockCtrl)
fc.EXPECT().IsNewlyBlocked().Return(true, protocol.ByteCount(1337))
fc.EXPECT().GetWindowUpdate()
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 13}, []byte("foobar"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack).AnyTimes()
conn.connFlowController = fc
runConn()
sent := make(chan struct{})
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(*packetBuffer, uint16, protocol.ECN) { close(sent) })
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), nil, []logging.Frame{})
conn.scheduleSending()
Eventually(sent).Should(BeClosed())
frames, _ := conn.framer.AppendControlFrames(nil, 1000, protocol.Version1)
Expect(frames).To(Equal([]ackhandler.Frame{{Frame: &logging.DataBlockedFrame{MaximumData: 1337}}}))
})
It("doesn't send when the SentPacketHandler doesn't allow it", func() {
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendNone).AnyTimes()
sph.EXPECT().TimeUntilSend().AnyTimes()
runConn()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond)
})
for _, enc := range []protocol.EncryptionLevel{protocol.EncryptionInitial, protocol.EncryptionHandshake, protocol.Encryption1RTT} {
encLevel := enc
Context(fmt.Sprintf("sending %s probe packets", encLevel), func() {
var sendMode ackhandler.SendMode
var getFrame func(protocol.ByteCount, protocol.Version) wire.Frame
BeforeEach(func() {
switch encLevel {
case protocol.EncryptionInitial:
sendMode = ackhandler.SendPTOInitial
getFrame = conn.retransmissionQueue.GetInitialFrame
case protocol.EncryptionHandshake:
sendMode = ackhandler.SendPTOHandshake
getFrame = conn.retransmissionQueue.GetHandshakeFrame
case protocol.Encryption1RTT:
sendMode = ackhandler.SendPTOAppData
getFrame = conn.retransmissionQueue.GetAppDataFrame
}
})
It("sends a probe packet", func() {
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(sendMode)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendNone)
sph.EXPECT().QueueProbePacket(encLevel)
sph.EXPECT().ECNMode(gomock.Any())
p := getCoalescedPacket(123, encLevel)
packer.EXPECT().MaybePackProbePacket(encLevel, gomock.Any(), conn.version).Return(p, nil)
sph.EXPECT().SentPacket(gomock.Any(), protocol.PacketNumber(123), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
conn.sentPacketHandler = sph
runConn()
sent := make(chan struct{})
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(*packetBuffer, uint16, protocol.ECN) { close(sent) })
if encLevel == protocol.Encryption1RTT {
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), p.shortHdrPacket.Length, gomock.Any(), gomock.Any(), gomock.Any())
} else {
tracer.EXPECT().SentLongHeaderPacket(gomock.Any(), p.longHdrPackets[0].length, gomock.Any(), gomock.Any(), gomock.Any())
}
conn.scheduleSending()
Eventually(sent).Should(BeClosed())
})
It("sends a PING as a probe packet", func() {
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(sendMode)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendNone)
sph.EXPECT().ECNMode(gomock.Any()).Return(protocol.ECT0)
sph.EXPECT().QueueProbePacket(encLevel).Return(false)
p := getCoalescedPacket(123, encLevel)
packer.EXPECT().MaybePackProbePacket(encLevel, gomock.Any(), conn.version).Return(p, nil)
sph.EXPECT().SentPacket(gomock.Any(), protocol.PacketNumber(123), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
runConn()
sent := make(chan struct{})
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(*packetBuffer, uint16, protocol.ECN) { close(sent) })
if encLevel == protocol.Encryption1RTT {
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), p.shortHdrPacket.Length, logging.ECT0, gomock.Any(), gomock.Any())
} else {
tracer.EXPECT().SentLongHeaderPacket(gomock.Any(), p.longHdrPackets[0].length, logging.ECT0, gomock.Any(), gomock.Any())
}
conn.scheduleSending()
Eventually(sent).Should(BeClosed())
// We're using a mock packet packer in this test.
// We therefore need to test separately that the PING was actually queued.
Expect(getFrame(1000, protocol.Version1)).To(BeAssignableToTypeOf(&wire.PingFrame{}))
})
})
}
})
Context("packet pacing", func() {
var (
sph *mockackhandler.MockSentPacketHandler
sender *MockSender
)
BeforeEach(func() {
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
sph = mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
conn.handshakeConfirmed = true
conn.handshakeComplete = true
conn.sentPacketHandler = sph
sender = NewMockSender(mockCtrl)
sender.EXPECT().Run()
conn.sendQueue = sender
streamManager.EXPECT().CloseWithError(gomock.Any())
})
AfterEach(func() {
// make the go routine return
sph.EXPECT().ECNMode(gomock.Any()).MaxTimes(1)
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
sender.EXPECT().Close()
conn.CloseWithError(0, "")
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("sends multiple packets one by one immediately", func() {
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(2)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).Times(2)
sph.EXPECT().ECNMode(gomock.Any()).Times(2)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendPacingLimited)
sph.EXPECT().TimeUntilSend().Return(time.Now().Add(time.Hour))
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 10}, []byte("packet10"))
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 11}, []byte("packet11"))
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(b *packetBuffer, _ uint16, _ protocol.ECN) {
Expect(b.Data).To(Equal([]byte("packet10")))
})
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(b *packetBuffer, _ uint16, _ protocol.ECN) {
Expect(b.Data).To(Equal([]byte("packet11")))
})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond) // make sure that only 2 packets are sent
})
It("sends multiple packets one by one immediately, with GSO", func() {
enableGSO()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(2)
sph.EXPECT().ECNMode(true).Return(protocol.ECT1).Times(4)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).Times(3)
payload1 := make([]byte, conn.maxPacketSize())
rand.Read(payload1)
payload2 := make([]byte, conn.maxPacketSize())
rand.Read(payload2)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 10}, payload1)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 11}, payload2)
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), gomock.Any()).Return(shortHeaderPacket{}, errNothingToPack)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), uint16(conn.maxPacketSize()), gomock.Any()).Do(func(b *packetBuffer, _ uint16, _ protocol.ECN) {
Expect(b.Data).To(Equal(append(payload1, payload2...)))
})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond) // make sure that only 2 packets are sent
})
It("stops appending packets when a smaller packet is packed, with GSO", func() {
enableGSO()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).Times(3)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendNone)
sph.EXPECT().ECNMode(true).Times(4)
payload1 := make([]byte, conn.maxPacketSize())
rand.Read(payload1)
payload2 := make([]byte, conn.maxPacketSize()-1)
rand.Read(payload2)
payload3 := make([]byte, conn.maxPacketSize())
rand.Read(payload3)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 10}, payload1)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 11}, payload2)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 12}, payload3)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), uint16(conn.maxPacketSize()), gomock.Any()).Do(func(b *packetBuffer, _ uint16, _ protocol.ECN) {
Expect(b.Data).To(Equal(append(payload1, payload2...)))
})
sender.EXPECT().Send(gomock.Any(), uint16(conn.maxPacketSize()), gomock.Any()).Do(func(b *packetBuffer, _ uint16, _ protocol.ECN) {
Expect(b.Data).To(Equal(payload3))
})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond) // make sure that only 2 packets are sent
})
It("stops appending packets when the ECN marking changes, with GSO", func() {
enableGSO()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).Times(3)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendNone)
sph.EXPECT().ECNMode(true).Return(protocol.ECT1).Times(2)
sph.EXPECT().ECNMode(true).Return(protocol.ECT0).Times(2)
payload1 := make([]byte, conn.maxPacketSize())
rand.Read(payload1)
payload2 := make([]byte, conn.maxPacketSize())
rand.Read(payload2)
payload3 := make([]byte, conn.maxPacketSize())
rand.Read(payload3)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 10}, payload1)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 11}, payload2)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 11}, payload3)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), uint16(conn.maxPacketSize()), gomock.Any()).Do(func(b *packetBuffer, _ uint16, _ protocol.ECN) {
Expect(b.Data).To(Equal(append(payload1, payload2...)))
})
sender.EXPECT().Send(gomock.Any(), uint16(conn.maxPacketSize()), gomock.Any()).Do(func(b *packetBuffer, _ uint16, _ protocol.ECN) {
Expect(b.Data).To(Equal(payload3))
})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond) // make sure that only 2 packets are sent
})
It("sends multiple packets, when the pacer allows immediate sending", func() {
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).Times(2)
sph.EXPECT().ECNMode(gomock.Any()).Times(2)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 10}, []byte("packet10"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any())
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond) // make sure that only 1 packet is sent
})
It("allows an ACK to be sent when pacing limited", func() {
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
sph.EXPECT().TimeUntilSend().Return(time.Now().Add(time.Hour))
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendPacingLimited)
sph.EXPECT().ECNMode(gomock.Any())
packer.EXPECT().PackAckOnlyPacket(gomock.Any(), conn.version).Return(shortHeaderPacket{PacketNumber: 123}, getPacketBuffer(), nil)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any())
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond) // make sure that only 1 packet is sent
})
// when becoming congestion limited, at some point the SendMode will change from SendAny to SendAck
// we shouldn't send the ACK in the same run
It("doesn't send an ACK right after becoming congestion limited", func() {
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAck)
sph.EXPECT().ECNMode(gomock.Any()).Times(2)
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 100}, []byte("packet100"))
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any())
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
time.Sleep(50 * time.Millisecond) // make sure that only 1 packet is sent
})
It("paces packets", func() {
pacingDelay := scaleDuration(100 * time.Millisecond)
gomock.InOrder(
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny),
sph.EXPECT().ECNMode(gomock.Any()),
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 100}, []byte("packet100")),
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendPacingLimited),
sph.EXPECT().TimeUntilSend().Return(time.Now().Add(pacingDelay)),
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny),
sph.EXPECT().ECNMode(gomock.Any()),
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 101}, []byte("packet101")),
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendPacingLimited),
sph.EXPECT().TimeUntilSend().Return(time.Now().Add(time.Hour)),
)
written := make(chan struct{}, 2)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(*packetBuffer, uint16, protocol.ECN) { written <- struct{}{} }).Times(2)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
Eventually(written).Should(HaveLen(1))
Consistently(written, pacingDelay/2).Should(HaveLen(1))
Eventually(written, 2*pacingDelay).Should(HaveLen(2))
})
It("sends multiple packets at once", func() {
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).Times(3)
sph.EXPECT().ECNMode(gomock.Any()).Times(3)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendPacingLimited)
sph.EXPECT().TimeUntilSend().Return(time.Now().Add(time.Hour))
for pn := protocol.PacketNumber(1000); pn < 1003; pn++ {
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: pn}, []byte("packet"))
}
written := make(chan struct{}, 3)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(*packetBuffer, uint16, protocol.ECN) { written <- struct{}{} }).Times(3)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
Eventually(written).Should(HaveLen(3))
})
for _, withGSO := range []bool{false, true} {
withGSO := withGSO
It(fmt.Sprintf("doesn't try to send if the send queue is full: %t", withGSO), func() {
if withGSO {
enableGSO()
}
available := make(chan struct{}, 1)
sender.EXPECT().WouldBlock().Return(true)
sender.EXPECT().Available().Return(available)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
time.Sleep(scaleDuration(50 * time.Millisecond))
written := make(chan struct{})
sender.EXPECT().WouldBlock().AnyTimes()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().ECNMode(gomock.Any()).AnyTimes()
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 1000}, []byte("packet1000"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack)
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(*packetBuffer, uint16, protocol.ECN) { close(written) })
available <- struct{}{}
Eventually(written).Should(BeClosed())
})
}
It("stops sending when there are new packets to receive", func() {
sender.EXPECT().WouldBlock().AnyTimes()
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
written := make(chan struct{})
sender.EXPECT().WouldBlock().AnyTimes()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(time.Time, protocol.PacketNumber, protocol.PacketNumber, []ackhandler.StreamFrame, []ackhandler.Frame, protocol.EncryptionLevel, protocol.ECN, protocol.ByteCount, bool) {
sph.EXPECT().ReceivedBytes(gomock.Any())
conn.handlePacket(receivedPacket{buffer: getPacketBuffer()})
})
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().ECNMode(gomock.Any()).AnyTimes()
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 10}, []byte("packet10"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack)
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(*packetBuffer, uint16, protocol.ECN) { close(written) })
conn.scheduleSending()
time.Sleep(scaleDuration(50 * time.Millisecond))
Eventually(written).Should(BeClosed())
})
It("stops sending when the send queue is full", func() {
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny)
sph.EXPECT().ECNMode(gomock.Any())
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 1000}, []byte("packet1000"))
written := make(chan struct{}, 1)
sender.EXPECT().WouldBlock()
sender.EXPECT().WouldBlock().Return(true).Times(2)
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(*packetBuffer, uint16, protocol.ECN) { written <- struct{}{} })
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
available := make(chan struct{}, 1)
sender.EXPECT().Available().Return(available)
conn.scheduleSending()
Eventually(written).Should(Receive())
time.Sleep(scaleDuration(50 * time.Millisecond))
// now make room in the send queue
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().ECNMode(gomock.Any()).AnyTimes()
sender.EXPECT().WouldBlock().AnyTimes()
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 1001}, []byte("packet1001"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack)
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(*packetBuffer, uint16, protocol.ECN) { written <- struct{}{} })
available <- struct{}{}
Eventually(written).Should(Receive())
// The send queue is not full any more. Sending on the available channel should have no effect.
available <- struct{}{}
time.Sleep(scaleDuration(50 * time.Millisecond))
})
It("doesn't set a pacing timer when there is no data to send", func() {
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().ECNMode(gomock.Any()).AnyTimes()
sender.EXPECT().WouldBlock().AnyTimes()
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack)
// don't EXPECT any calls to mconn.Write()
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending() // no packet will get sent
time.Sleep(50 * time.Millisecond)
})
It("sends a Path MTU probe packet", func() {
mtuDiscoverer := NewMockMTUDiscoverer(mockCtrl)
conn.mtuDiscoverer = mtuDiscoverer
conn.config.DisablePathMTUDiscovery = false
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny)
sph.EXPECT().ECNMode(true)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendNone)
written := make(chan struct{}, 1)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(*packetBuffer, uint16, protocol.ECN) { written <- struct{}{} })
mtuDiscoverer.EXPECT().ShouldSendProbe(gomock.Any()).Return(true)
ping := ackhandler.Frame{Frame: &wire.PingFrame{}}
mtuDiscoverer.EXPECT().GetPing().Return(ping, protocol.ByteCount(1234))
packer.EXPECT().PackMTUProbePacket(ping, protocol.ByteCount(1234), conn.version).Return(shortHeaderPacket{PacketNumber: 1}, getPacketBuffer(), nil)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
Eventually(written).Should(Receive())
mtuDiscoverer.EXPECT().CurrentSize().Return(protocol.ByteCount(1234))
})
})
Context("scheduling sending", func() {
var sender *MockSender
BeforeEach(func() {
sender = NewMockSender(mockCtrl)
sender.EXPECT().WouldBlock().AnyTimes()
sender.EXPECT().Run()
conn.sendQueue = sender
conn.handshakeConfirmed = true
})
AfterEach(func() {
// make the go routine return
expectReplaceWithClosed()
streamManager.EXPECT().CloseWithError(gomock.Any())
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
sender.EXPECT().Close()
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.CloseWithError(0, "")
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("sends when scheduleSending is called", func() {
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().ECNMode(gomock.Any()).AnyTimes()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
conn.sentPacketHandler = sph
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 1}, []byte("packet1"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
// don't EXPECT any calls to mconn.Write()
time.Sleep(50 * time.Millisecond)
// only EXPECT calls after scheduleSending is called
written := make(chan struct{})
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(*packetBuffer, uint16, protocol.ECN) { close(written) })
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
conn.scheduleSending()
Eventually(written).Should(BeClosed())
})
It("sets the timer to the ack timer", func() {
expectAppendPacket(packer, shortHeaderPacket{PacketNumber: 1234}, []byte("packet1234"))
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack)
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().ECNMode(gomock.Any()).AnyTimes()
sph.EXPECT().SentPacket(gomock.Any(), protocol.PacketNumber(1234), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
conn.sentPacketHandler = sph
rph := mockackhandler.NewMockReceivedPacketHandler(mockCtrl)
rph.EXPECT().GetAlarmTimeout().Return(time.Now().Add(10 * time.Millisecond))
// make the run loop wait
rph.EXPECT().GetAlarmTimeout().Return(time.Now().Add(time.Hour)).MaxTimes(1)
conn.receivedPacketHandler = rph
written := make(chan struct{})
sender.EXPECT().Send(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(*packetBuffer, uint16, protocol.ECN) { close(written) })
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
Eventually(written).Should(BeClosed())
})
})
It("sends coalesced packets before the handshake is confirmed", func() {
conn.handshakeComplete = false
conn.handshakeConfirmed = false
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
conn.sentPacketHandler = sph
buffer := getPacketBuffer()
buffer.Data = append(buffer.Data, []byte("foobar")...)
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).Return(&coalescedPacket{
buffer: buffer,
longHdrPackets: []*longHeaderPacket{
{
header: &wire.ExtendedHeader{
Header: wire.Header{Type: protocol.PacketTypeInitial},
PacketNumber: 13,
},
length: 123,
},
{
header: &wire.ExtendedHeader{
Header: wire.Header{Type: protocol.PacketTypeHandshake},
PacketNumber: 37,
},
length: 1234,
},
},
}, nil)
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).AnyTimes()
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().ECNMode(false).Return(protocol.ECT1).AnyTimes()
sph.EXPECT().TimeUntilSend().Return(time.Now()).AnyTimes()
gomock.InOrder(
sph.EXPECT().SentPacket(gomock.Any(), protocol.PacketNumber(13), gomock.Any(), gomock.Any(), gomock.Any(), protocol.EncryptionInitial, protocol.ECT1, protocol.ByteCount(123), gomock.Any()),
sph.EXPECT().SentPacket(gomock.Any(), protocol.PacketNumber(37), gomock.Any(), gomock.Any(), gomock.Any(), protocol.EncryptionHandshake, protocol.ECT1, protocol.ByteCount(1234), gomock.Any()),
)
gomock.InOrder(
tracer.EXPECT().SentLongHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(hdr *wire.ExtendedHeader, _ protocol.ByteCount, _ logging.ECN, _ *wire.AckFrame, _ []logging.Frame) {
Expect(hdr.Type).To(Equal(protocol.PacketTypeInitial))
}),
tracer.EXPECT().SentLongHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(func(hdr *wire.ExtendedHeader, _ protocol.ByteCount, _ logging.ECN, _ *wire.AckFrame, _ []logging.Frame) {
Expect(hdr.Type).To(Equal(protocol.PacketTypeHandshake))
}),
)
sent := make(chan struct{})
mconn.EXPECT().Write([]byte("foobar"), uint16(0), protocol.ECT1).Do(func([]byte, uint16, protocol.ECN) error { close(sent); return nil })
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
conn.scheduleSending()
Eventually(sent).Should(BeClosed())
// make sure the go routine returns
streamManager.EXPECT().CloseWithError(gomock.Any())
expectReplaceWithClosed()
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.CloseWithError(0, "")
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("cancels the HandshakeComplete context when the handshake completes", func() {
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).AnyTimes()
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
conn.sentPacketHandler = sph
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionHandshake)
tracer.EXPECT().ChoseALPN(gomock.Any())
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().SendMode(gomock.Any()).AnyTimes()
sph.EXPECT().DropPackets(protocol.EncryptionHandshake)
sph.EXPECT().SetHandshakeConfirmed()
connRunner.EXPECT().Retire(clientDestConnID)
cryptoSetup.EXPECT().SetHandshakeConfirmed()
cryptoSetup.EXPECT().GetSessionTicket()
cryptoSetup.EXPECT().ConnectionState()
handshakeCtx := conn.HandshakeComplete()
Consistently(handshakeCtx).ShouldNot(BeClosed())
Expect(conn.handleHandshakeComplete()).To(Succeed())
Eventually(handshakeCtx).Should(BeClosed())
})
It("sends a session ticket when the handshake completes", func() {
const size = protocol.MaxPostHandshakeCryptoFrameSize * 3 / 2
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).AnyTimes()
connRunner.EXPECT().Retire(clientDestConnID)
conn.sentPacketHandler.DropPackets(protocol.EncryptionInitial)
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionHandshake)
tracer.EXPECT().ChoseALPN(gomock.Any())
cryptoSetup.EXPECT().SetHandshakeConfirmed()
cryptoSetup.EXPECT().GetSessionTicket().Return(make([]byte, size), nil)
cryptoSetup.EXPECT().ConnectionState()
handshakeCtx := conn.HandshakeComplete()
Consistently(handshakeCtx).ShouldNot(BeClosed())
Expect(conn.handleHandshakeComplete()).To(Succeed())
var frames []ackhandler.Frame
Eventually(func() []ackhandler.Frame {
frames, _ = conn.framer.AppendControlFrames(nil, protocol.MaxByteCount, protocol.Version1)
return frames
}).ShouldNot(BeEmpty())
var count int
var s int
for _, f := range frames {
if cf, ok := f.Frame.(*wire.CryptoFrame); ok {
count++
s += len(cf.Data)
Expect(f.Frame.Length(conn.version)).To(BeNumerically("<=", protocol.MaxPostHandshakeCryptoFrameSize))
}
}
Expect(size).To(BeEquivalentTo(s))
})
It("doesn't cancel the HandshakeComplete context when the handshake fails", func() {
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).AnyTimes()
streamManager.EXPECT().CloseWithError(gomock.Any())
expectReplaceWithClosed()
packer.EXPECT().PackConnectionClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
cryptoSetup.EXPECT().Close()
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any())
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
handshakeCtx := conn.HandshakeComplete()
Consistently(handshakeCtx).ShouldNot(BeClosed())
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
conn.closeLocal(errors.New("handshake error"))
Consistently(handshakeCtx).ShouldNot(BeClosed())
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("sends a HANDSHAKE_DONE frame when the handshake completes", func() {
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
sph.EXPECT().SendMode(gomock.Any()).Return(ackhandler.SendAny).AnyTimes()
sph.EXPECT().ECNMode(gomock.Any()).AnyTimes()
sph.EXPECT().GetLossDetectionTimeout().AnyTimes()
sph.EXPECT().TimeUntilSend().AnyTimes()
sph.EXPECT().SetHandshakeConfirmed()
sph.EXPECT().SentPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().SentShortHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().ChoseALPN(gomock.Any())
conn.sentPacketHandler = sph
done := make(chan struct{})
connRunner.EXPECT().Retire(clientDestConnID)
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).DoAndReturn(func(_ *packetBuffer, _ protocol.ByteCount, v protocol.Version) (shortHeaderPacket, error) {
frames, _ := conn.framer.AppendControlFrames(nil, protocol.MaxByteCount, v)
Expect(frames).ToNot(BeEmpty())
Expect(frames[0].Frame).To(BeEquivalentTo(&wire.HandshakeDoneFrame{}))
defer close(done)
return shortHeaderPacket{}, nil
})
packer.EXPECT().AppendPacket(gomock.Any(), gomock.Any(), conn.version).Return(shortHeaderPacket{}, errNothingToPack).AnyTimes()
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionHandshake)
sph.EXPECT().DropPackets(protocol.EncryptionHandshake)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any())
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventHandshakeComplete})
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
cryptoSetup.EXPECT().SetHandshakeConfirmed()
cryptoSetup.EXPECT().GetSessionTicket()
cryptoSetup.EXPECT().ConnectionState()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
Expect(conn.handleHandshakeComplete()).To(Succeed())
conn.run()
}()
Eventually(done).Should(BeClosed())
// make sure the go routine returns
streamManager.EXPECT().CloseWithError(gomock.Any())
expectReplaceWithClosed()
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
cryptoSetup.EXPECT().Close()
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.CloseWithError(0, "")
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("doesn't return a run error when closing", func() {
done := make(chan struct{})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
Expect(conn.run()).To(Succeed())
close(done)
}()
streamManager.EXPECT().CloseWithError(gomock.Any())
cryptoSetup.EXPECT().Close()
connRunner.EXPECT().Remove(gomock.Any()).AnyTimes()
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.destroy(nil)
Eventually(done).Should(BeClosed())
Expect(context.Cause(conn.Context())).To(MatchError(context.Canceled))
})
It("passes errors to the connection runner", func() {
testErr := errors.New("handshake error")
expectedErr := &qerr.ApplicationError{
ErrorCode: 0x1337,
ErrorMessage: testErr.Error(),
}
done := make(chan struct{})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
err := conn.run()
Expect(err).To(MatchError(expectedErr))
close(done)
}()
streamManager.EXPECT().CloseWithError(gomock.Any())
expectReplaceWithClosed()
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
Expect(conn.CloseWithError(0x1337, testErr.Error())).To(Succeed())
Eventually(done).Should(BeClosed())
Expect(context.Cause(conn.Context())).To(MatchError(expectedErr))
})
Context("transport parameters", func() {
It("processes transport parameters received from the client", func() {
params := &wire.TransportParameters{
MaxIdleTimeout: 90 * time.Second,
InitialMaxStreamDataBidiLocal: 0x5000,
InitialMaxData: 0x5000,
ActiveConnectionIDLimit: 3,
// marshaling always sets it to this value
MaxUDPPayloadSize: protocol.MaxPacketBufferSize,
InitialSourceConnectionID: destConnID,
}
streamManager.EXPECT().UpdateLimits(params)
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).MaxTimes(3)
Expect(conn.earlyConnReady()).ToNot(BeClosed())
tracer.EXPECT().ReceivedTransportParameters(params)
conn.handleTransportParameters(params)
Expect(conn.earlyConnReady()).To(BeClosed())
})
})
Context("keep-alives", func() {
setRemoteIdleTimeout := func(t time.Duration) {
streamManager.EXPECT().UpdateLimits(gomock.Any())
tracer.EXPECT().ReceivedTransportParameters(gomock.Any())
conn.handleTransportParameters(&wire.TransportParameters{
MaxIdleTimeout: t,
InitialSourceConnectionID: destConnID,
})
}
runConn := func() {
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
}
BeforeEach(func() {
conn.config.MaxIdleTimeout = 30 * time.Second
conn.config.KeepAlivePeriod = 15 * time.Second
conn.receivedPacketHandler.ReceivedPacket(0, protocol.ECNNon, protocol.EncryptionHandshake, time.Now(), true)
})
AfterEach(func() {
// make the go routine return
expectReplaceWithClosed()
streamManager.EXPECT().CloseWithError(gomock.Any())
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.CloseWithError(0, "")
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("sends a PING as a keep-alive after half the idle timeout", func() {
setRemoteIdleTimeout(5 * time.Second)
conn.lastPacketReceivedTime = time.Now().Add(-5 * time.Second / 2)
sent := make(chan struct{})
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).Do(func(bool, protocol.ByteCount, protocol.Version) (*coalescedPacket, error) {
close(sent)
return nil, nil
})
runConn()
Eventually(sent).Should(BeClosed())
})
It("sends a PING after a maximum of protocol.MaxKeepAliveInterval", func() {
conn.config.MaxIdleTimeout = time.Hour
setRemoteIdleTimeout(time.Hour)
conn.lastPacketReceivedTime = time.Now().Add(-protocol.MaxKeepAliveInterval).Add(-time.Millisecond)
sent := make(chan struct{})
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).Do(func(bool, protocol.ByteCount, protocol.Version) (*coalescedPacket, error) {
close(sent)
return nil, nil
})
runConn()
Eventually(sent).Should(BeClosed())
})
It("doesn't send a PING packet if keep-alive is disabled", func() {
setRemoteIdleTimeout(5 * time.Second)
conn.config.KeepAlivePeriod = 0
conn.lastPacketReceivedTime = time.Now().Add(-time.Second * 5 / 2)
runConn()
// don't EXPECT() any calls to mconn.Write()
time.Sleep(50 * time.Millisecond)
})
It("doesn't send a PING if the handshake isn't completed yet", func() {
conn.config.HandshakeIdleTimeout = time.Hour
conn.handshakeComplete = false
// Needs to be shorter than our idle timeout.
// Otherwise we'll try to send a CONNECTION_CLOSE.
conn.lastPacketReceivedTime = time.Now().Add(-20 * time.Second)
runConn()
// don't EXPECT() any calls to mconn.Write()
time.Sleep(50 * time.Millisecond)
})
It("send PING as keep-alive earliest after 1.5 times the PTO", func() {
conn.config.KeepAlivePeriod = time.Microsecond
pto := conn.rttStats.PTO(true)
conn.lastPacketReceivedTime = time.Now()
sentPingTimeChan := make(chan time.Time)
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).Do(func(bool, protocol.ByteCount, protocol.Version) (*coalescedPacket, error) {
sentPingTimeChan <- time.Now()
return nil, nil
})
runConn()
sentPingTime := <-sentPingTimeChan
Expect(sentPingTime.Sub(conn.lastPacketReceivedTime)).To(BeNumerically(">", pto*3/2))
})
})
Context("timeouts", func() {
BeforeEach(func() {
streamManager.EXPECT().CloseWithError(gomock.Any())
})
It("times out due to no network activity", func() {
connRunner.EXPECT().Remove(gomock.Any()).Times(2)
conn.lastPacketReceivedTime = time.Now().Add(-time.Hour)
done := make(chan struct{})
cryptoSetup.EXPECT().Close()
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
Expect(e).To(MatchError(&qerr.IdleTimeoutError{}))
}),
tracer.EXPECT().Close(),
)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
err := conn.run()
nerr, ok := err.(net.Error)
Expect(ok).To(BeTrue())
Expect(nerr.Timeout()).To(BeTrue())
Expect(err).To(MatchError(qerr.ErrIdleTimeout))
close(done)
}()
Eventually(done).Should(BeClosed())
})
It("times out due to non-completed handshake", func() {
conn.handshakeComplete = false
conn.creationTime = time.Now().Add(-2 * protocol.DefaultHandshakeIdleTimeout).Add(-time.Second)
connRunner.EXPECT().Remove(gomock.Any()).Times(2)
cryptoSetup.EXPECT().Close()
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
Expect(e).To(MatchError(&HandshakeTimeoutError{}))
}),
tracer.EXPECT().Close(),
)
done := make(chan struct{})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
err := conn.run()
nerr, ok := err.(net.Error)
Expect(ok).To(BeTrue())
Expect(nerr.Timeout()).To(BeTrue())
Expect(err).To(MatchError(qerr.ErrHandshakeTimeout))
close(done)
}()
Eventually(done).Should(BeClosed())
})
It("does not use the idle timeout before the handshake complete", func() {
conn.handshakeComplete = false
conn.config.HandshakeIdleTimeout = 9999 * time.Second
conn.config.MaxIdleTimeout = 9999 * time.Second
conn.lastPacketReceivedTime = time.Now().Add(-time.Minute)
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).DoAndReturn(func(e *qerr.ApplicationError, _ protocol.ByteCount, _ protocol.Version) (*coalescedPacket, error) {
Expect(e.ErrorCode).To(BeZero())
return &coalescedPacket{buffer: getPacketBuffer()}, nil
})
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
idleTimeout := &IdleTimeoutError{}
handshakeTimeout := &HandshakeTimeoutError{}
Expect(errors.As(e, &idleTimeout)).To(BeFalse())
Expect(errors.As(e, &handshakeTimeout)).To(BeFalse())
}),
tracer.EXPECT().Close(),
)
// the handshake timeout is irrelevant here, since it depends on the time the connection was created,
// and not on the last network activity
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
Consistently(conn.Context().Done()).ShouldNot(BeClosed())
// make the go routine return
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
conn.CloseWithError(0, "")
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("closes the connection due to the idle timeout before handshake", func() {
conn.config.HandshakeIdleTimeout = scaleDuration(25 * time.Millisecond)
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).AnyTimes()
connRunner.EXPECT().Remove(gomock.Any()).AnyTimes()
cryptoSetup.EXPECT().Close()
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
Expect(e).To(MatchError(&IdleTimeoutError{}))
}),
tracer.EXPECT().Close(),
)
done := make(chan struct{})
conn.handshakeComplete = false
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
cryptoSetup.EXPECT().GetSessionTicket().MaxTimes(1)
err := conn.run()
nerr, ok := err.(net.Error)
Expect(ok).To(BeTrue())
Expect(nerr.Timeout()).To(BeTrue())
Expect(err).To(MatchError(qerr.ErrIdleTimeout))
close(done)
}()
Eventually(done).Should(BeClosed())
})
It("closes the connection due to the idle timeout after handshake", func() {
conn.sentPacketHandler.DropPackets(protocol.EncryptionInitial)
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).AnyTimes()
gomock.InOrder(
connRunner.EXPECT().Retire(clientDestConnID),
connRunner.EXPECT().Remove(gomock.Any()),
)
cryptoSetup.EXPECT().Close()
gomock.InOrder(
tracer.EXPECT().ChoseALPN(gomock.Any()),
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionHandshake),
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
Expect(e).To(MatchError(&IdleTimeoutError{}))
}),
tracer.EXPECT().Close(),
)
conn.idleTimeout = 0
done := make(chan struct{})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventHandshakeComplete})
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
cryptoSetup.EXPECT().GetSessionTicket().MaxTimes(1)
cryptoSetup.EXPECT().SetHandshakeConfirmed().MaxTimes(1)
cryptoSetup.EXPECT().ConnectionState()
Expect(conn.handleHandshakeComplete()).To(Succeed())
err := conn.run()
nerr, ok := err.(net.Error)
Expect(ok).To(BeTrue())
Expect(nerr.Timeout()).To(BeTrue())
Expect(err).To(MatchError(qerr.ErrIdleTimeout))
close(done)
}()
Eventually(done).Should(BeClosed())
})
It("doesn't time out when it just sent a packet", func() {
conn.lastPacketReceivedTime = time.Now().Add(-time.Hour)
conn.firstAckElicitingPacketAfterIdleSentTime = time.Now().Add(-time.Second)
conn.idleTimeout = 30 * time.Second
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
Consistently(conn.Context().Done()).ShouldNot(BeClosed())
// make the go routine return
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
expectReplaceWithClosed()
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.CloseWithError(0, "")
Eventually(conn.Context().Done()).Should(BeClosed())
})
It("times out earliest after 3 times the PTO", func() {
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).AnyTimes()
connRunner.EXPECT().Retire(gomock.Any()).AnyTimes()
connRunner.EXPECT().Remove(gomock.Any()).Times(2)
cryptoSetup.EXPECT().Close()
closeTimeChan := make(chan time.Time)
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
Expect(e).To(MatchError(&IdleTimeoutError{}))
closeTimeChan <- time.Now()
})
tracer.EXPECT().Close()
conn.idleTimeout = time.Millisecond
done := make(chan struct{})
pto := conn.rttStats.PTO(true)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
cryptoSetup.EXPECT().GetSessionTicket().MaxTimes(1)
cryptoSetup.EXPECT().SetHandshakeConfirmed().MaxTimes(1)
conn.run()
close(done)
}()
closeTime := <-closeTimeChan
Expect(closeTime.Sub(conn.lastPacketReceivedTime)).To(BeNumerically(">", pto*3))
Eventually(done).Should(BeClosed())
})
})
It("stores up to MaxConnUnprocessedPackets packets", func() {
done := make(chan struct{})
tracer.EXPECT().DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, logging.ByteCount(6), logging.PacketDropDOSPrevention).Do(func(logging.PacketType, logging.PacketNumber, logging.ByteCount, logging.PacketDropReason) {
close(done)
})
// Nothing here should block
for i := protocol.PacketNumber(0); i < protocol.MaxConnUnprocessedPackets+1; i++ {
conn.handlePacket(receivedPacket{data: []byte("foobar")})
}
Eventually(done).Should(BeClosed())
})
Context("getting streams", func() {
It("opens streams", func() {
mstr := NewMockStreamI(mockCtrl)
streamManager.EXPECT().OpenStream().Return(mstr, nil)
str, err := conn.OpenStream()
Expect(err).ToNot(HaveOccurred())
Expect(str).To(Equal(mstr))
})
It("opens streams synchronously", func() {
mstr := NewMockStreamI(mockCtrl)
streamManager.EXPECT().OpenStreamSync(context.Background()).Return(mstr, nil)
str, err := conn.OpenStreamSync(context.Background())
Expect(err).ToNot(HaveOccurred())
Expect(str).To(Equal(mstr))
})
It("opens unidirectional streams", func() {
mstr := NewMockSendStreamI(mockCtrl)
streamManager.EXPECT().OpenUniStream().Return(mstr, nil)
str, err := conn.OpenUniStream()
Expect(err).ToNot(HaveOccurred())
Expect(str).To(Equal(mstr))
})
It("opens unidirectional streams synchronously", func() {
mstr := NewMockSendStreamI(mockCtrl)
streamManager.EXPECT().OpenUniStreamSync(context.Background()).Return(mstr, nil)
str, err := conn.OpenUniStreamSync(context.Background())
Expect(err).ToNot(HaveOccurred())
Expect(str).To(Equal(mstr))
})
It("accepts streams", func() {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
mstr := NewMockStreamI(mockCtrl)
streamManager.EXPECT().AcceptStream(ctx).Return(mstr, nil)
str, err := conn.AcceptStream(ctx)
Expect(err).ToNot(HaveOccurred())
Expect(str).To(Equal(mstr))
})
It("accepts unidirectional streams", func() {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
mstr := NewMockReceiveStreamI(mockCtrl)
streamManager.EXPECT().AcceptUniStream(ctx).Return(mstr, nil)
str, err := conn.AcceptUniStream(ctx)
Expect(err).ToNot(HaveOccurred())
Expect(str).To(Equal(mstr))
})
})
Context("datagrams", func() {
It("doesn't allow datagrams if the peer didn't enable support", func() {
conn.peerParams = &wire.TransportParameters{MaxDatagramFrameSize: 0}
Expect(conn.SendDatagram(make([]byte, 200))).To(MatchError("datagram support disabled"))
})
It("sends a datagram", func() {
conn.peerParams = &wire.TransportParameters{MaxDatagramFrameSize: 1000}
Expect(conn.SendDatagram([]byte("foobar"))).To(Succeed())
f := conn.datagramQueue.Peek()
Expect(f).ToNot(BeNil())
Expect(f.Data).To(Equal([]byte("foobar")))
})
It("says when a datagram is too big", func() {
conn.peerParams = &wire.TransportParameters{MaxDatagramFrameSize: 1000}
err := conn.SendDatagram(make([]byte, 2000))
Expect(err).To(HaveOccurred())
Expect(err).To(BeAssignableToTypeOf(&DatagramTooLargeError{}))
derr := err.(*DatagramTooLargeError)
Expect(derr.MaxDatagramPayloadSize).To(BeNumerically("<", 1000))
Expect(conn.SendDatagram(make([]byte, derr.MaxDatagramPayloadSize))).To(Succeed())
})
It("receives datagrams", func() {
conn.config.EnableDatagrams = true
conn.datagramQueue.HandleDatagramFrame(&wire.DatagramFrame{Data: []byte("foobar")})
data, err := conn.ReceiveDatagram(context.Background())
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal([]byte("foobar")))
})
})
It("returns the local address", func() {
Expect(conn.LocalAddr()).To(Equal(localAddr))
})
It("returns the remote address", func() {
Expect(conn.RemoteAddr()).To(Equal(remoteAddr))
})
})
var _ = Describe("Client Connection", func() {
var (
conn *connection
connRunner *MockConnRunner
packer *MockPacker
mconn *MockSendConn
cryptoSetup *mocks.MockCryptoSetup
tracer *mocklogging.MockConnectionTracer
tlsConf *tls.Config
quicConf *Config
)
srcConnID := protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5, 6, 7, 8})
destConnID := protocol.ParseConnectionID([]byte{8, 7, 6, 5, 4, 3, 2, 1})
getPacket := func(hdr *wire.ExtendedHeader, data []byte) receivedPacket {
b, err := hdr.Append(nil, conn.version)
Expect(err).ToNot(HaveOccurred())
return receivedPacket{
rcvTime: time.Now(),
data: append(b, data...),
buffer: getPacketBuffer(),
}
}
BeforeEach(func() {
quicConf = populateConfig(&Config{})
tlsConf = nil
})
JustBeforeEach(func() {
Eventually(areConnsRunning).Should(BeFalse())
mconn = NewMockSendConn(mockCtrl)
mconn.EXPECT().capabilities().AnyTimes()
mconn.EXPECT().RemoteAddr().Return(&net.UDPAddr{}).AnyTimes()
mconn.EXPECT().LocalAddr().Return(&net.UDPAddr{}).AnyTimes()
mconn.EXPECT().capabilities().AnyTimes()
if tlsConf == nil {
tlsConf = &tls.Config{}
}
connRunner = NewMockConnRunner(mockCtrl)
var tr *logging.ConnectionTracer
tr, tracer = mocklogging.NewMockConnectionTracer(mockCtrl)
tracer.EXPECT().NegotiatedVersion(gomock.Any(), gomock.Any(), gomock.Any()).MaxTimes(1)
tracer.EXPECT().SentTransportParameters(gomock.Any())
tracer.EXPECT().UpdatedKeyFromTLS(gomock.Any(), gomock.Any()).AnyTimes()
tracer.EXPECT().UpdatedCongestionState(gomock.Any())
conn = newClientConnection(
context.Background(),
mconn,
connRunner,
destConnID,
protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5, 6, 7, 8}),
&protocol.DefaultConnectionIDGenerator{},
quicConf,
tlsConf,
42, // initial packet number
false,
false,
tr,
utils.DefaultLogger,
protocol.Version1,
).(*connection)
packer = NewMockPacker(mockCtrl)
conn.packer = packer
cryptoSetup = mocks.NewMockCryptoSetup(mockCtrl)
conn.cryptoStreamHandler = cryptoSetup
conn.sentFirstPacket = true
})
It("changes the connection ID when receiving the first packet from the server", func() {
unpacker := NewMockUnpacker(mockCtrl)
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any()).DoAndReturn(func(hdr *wire.Header, data []byte) (*unpackedPacket, error) {
return &unpackedPacket{
encryptionLevel: protocol.Encryption1RTT,
hdr: &wire.ExtendedHeader{Header: *hdr},
data: []byte{0}, // one PADDING frame
}, nil
})
conn.unpacker = unpacker
done := make(chan struct{})
packer.EXPECT().PackCoalescedPacket(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(onlyAck bool, maxPacketSize protocol.ByteCount, v protocol.Version) (*coalescedPacket, error) {
close(done)
return nil, nil
})
newConnID := protocol.ParseConnectionID([]byte{1, 3, 3, 7, 1, 3, 3, 7})
p := getPacket(&wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeHandshake,
SrcConnectionID: newConnID,
DestConnectionID: srcConnID,
Length: 2 + 6,
Version: conn.version,
},
PacketNumberLen: protocol.PacketNumberLen2,
}, []byte("foobar"))
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), p.Size(), gomock.Any(), []logging.Frame{})
Expect(conn.handlePacketImpl(p)).To(BeTrue())
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
conn.run()
}()
Eventually(done).Should(BeClosed())
// make sure the go routine returns
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil)
cryptoSetup.EXPECT().Close()
connRunner.EXPECT().ReplaceWithClosed([]protocol.ConnectionID{srcConnID}, gomock.Any())
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any()).MaxTimes(1)
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
conn.CloseWithError(0, "")
Eventually(conn.Context().Done()).Should(BeClosed())
time.Sleep(200 * time.Millisecond)
})
It("continues accepting Long Header packets after using a new connection ID", func() {
unpacker := NewMockUnpacker(mockCtrl)
conn.unpacker = unpacker
connRunner.EXPECT().AddResetToken(gomock.Any(), gomock.Any())
conn.connIDManager.SetHandshakeComplete()
conn.handleNewConnectionIDFrame(&wire.NewConnectionIDFrame{
SequenceNumber: 1,
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5}),
})
Expect(conn.connIDManager.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5})))
// now receive a packet with the original source connection ID
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any()).DoAndReturn(func(hdr *wire.Header, _ []byte) (*unpackedPacket, error) {
return &unpackedPacket{
hdr: &wire.ExtendedHeader{Header: *hdr},
data: []byte{0},
encryptionLevel: protocol.EncryptionHandshake,
}, nil
})
hdr := &wire.Header{
Type: protocol.PacketTypeHandshake,
DestConnectionID: srcConnID,
SrcConnectionID: destConnID,
}
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
Expect(conn.handleLongHeaderPacket(receivedPacket{buffer: getPacketBuffer()}, hdr)).To(BeTrue())
})
It("handles HANDSHAKE_DONE frames", func() {
conn.peerParams = &wire.TransportParameters{}
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
conn.sentPacketHandler = sph
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionHandshake)
sph.EXPECT().DropPackets(protocol.EncryptionHandshake)
sph.EXPECT().SetHandshakeConfirmed()
cryptoSetup.EXPECT().SetHandshakeConfirmed()
Expect(conn.handleHandshakeDoneFrame()).To(Succeed())
})
It("interprets an ACK for 1-RTT packets as confirmation of the handshake", func() {
conn.peerParams = &wire.TransportParameters{}
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
conn.sentPacketHandler = sph
ack := &wire.AckFrame{AckRanges: []wire.AckRange{{Smallest: 1, Largest: 3}}}
tracer.EXPECT().DroppedEncryptionLevel(protocol.EncryptionHandshake)
sph.EXPECT().ReceivedAck(ack, protocol.Encryption1RTT, gomock.Any()).Return(true, nil)
sph.EXPECT().DropPackets(protocol.EncryptionHandshake)
sph.EXPECT().SetHandshakeConfirmed()
cryptoSetup.EXPECT().SetLargest1RTTAcked(protocol.PacketNumber(3))
cryptoSetup.EXPECT().SetHandshakeConfirmed()
Expect(conn.handleAckFrame(ack, protocol.Encryption1RTT)).To(Succeed())
})
It("doesn't send a CONNECTION_CLOSE when no packet was sent", func() {
conn.sentFirstPacket = false
tracer.EXPECT().ClosedConnection(gomock.Any())
tracer.EXPECT().Close()
running := make(chan struct{})
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).Do(func(context.Context) error {
close(running)
conn.closeLocal(errors.New("early error"))
return nil
})
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
cryptoSetup.EXPECT().Close()
connRunner.EXPECT().Remove(gomock.Any())
go func() {
defer GinkgoRecover()
conn.run()
}()
Eventually(running).Should(BeClosed())
Eventually(areConnsRunning).Should(BeFalse())
})
Context("handling tokens", func() {
var tokenStore TokenStore
BeforeEach(func() {
tlsConf = &tls.Config{ServerName: "server"}
tokenStore = NewLRUTokenStore(10, 10)
quicConf.TokenStore = tokenStore
})
It("handles NEW_TOKEN frames", func() {
Expect(conn.handleNewTokenFrame(&wire.NewTokenFrame{Token: []byte("foobar")})).To(Succeed())
Expect(tokenStore.Pop("server")).To(Equal(&ClientToken{data: []byte("foobar")}))
})
})
Context("handling Version Negotiation", func() {
getVNP := func(versions ...protocol.Version) receivedPacket {
b := wire.ComposeVersionNegotiation(
protocol.ArbitraryLenConnectionID(srcConnID.Bytes()),
protocol.ArbitraryLenConnectionID(destConnID.Bytes()),
versions,
)
return receivedPacket{
rcvTime: time.Now(),
data: b,
buffer: getPacketBuffer(),
}
}
It("closes and returns the right error", func() {
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
conn.sentPacketHandler = sph
sph.EXPECT().ReceivedBytes(gomock.Any())
sph.EXPECT().PeekPacketNumber(protocol.EncryptionInitial).Return(protocol.PacketNumber(128), protocol.PacketNumberLen4)
conn.config.Versions = []protocol.Version{1234, 4321}
errChan := make(chan error, 1)
start := make(chan struct{})
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().DoAndReturn(func() handshake.Event {
<-start
return handshake.Event{Kind: handshake.EventNoEvent}
})
errChan <- conn.run()
}()
connRunner.EXPECT().Remove(srcConnID)
tracer.EXPECT().ReceivedVersionNegotiationPacket(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(_, _ protocol.ArbitraryLenConnectionID, versions []logging.Version) {
Expect(versions).To(And(
ContainElement(protocol.Version(4321)),
ContainElement(protocol.Version(1337)),
))
})
cryptoSetup.EXPECT().Close()
Expect(conn.handlePacketImpl(getVNP(4321, 1337))).To(BeFalse())
close(start)
var err error
Eventually(errChan).Should(Receive(&err))
Expect(err).To(HaveOccurred())
Expect(err).To(BeAssignableToTypeOf(&errCloseForRecreating{}))
recreateErr := err.(*errCloseForRecreating)
Expect(recreateErr.nextVersion).To(Equal(protocol.Version(4321)))
Expect(recreateErr.nextPacketNumber).To(Equal(protocol.PacketNumber(128)))
})
It("closes when no matching version is found", func() {
errChan := make(chan error, 1)
packer.EXPECT().PackCoalescedPacket(gomock.Any(), gomock.Any(), gomock.Any()).MaxTimes(1)
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent})
errChan <- conn.run()
}()
connRunner.EXPECT().Remove(srcConnID).MaxTimes(1)
gomock.InOrder(
tracer.EXPECT().ReceivedVersionNegotiationPacket(gomock.Any(), gomock.Any(), gomock.Any()),
tracer.EXPECT().ClosedConnection(gomock.Any()).Do(func(e error) {
var vnErr *VersionNegotiationError
Expect(errors.As(e, &vnErr)).To(BeTrue())
Expect(vnErr.Theirs).To(ContainElement(logging.Version(12345678)))
}),
tracer.EXPECT().Close(),
)
cryptoSetup.EXPECT().Close()
Expect(conn.handlePacketImpl(getVNP(12345678))).To(BeFalse())
var err error
Eventually(errChan).Should(Receive(&err))
Expect(err).To(HaveOccurred())
Expect(err).ToNot(BeAssignableToTypeOf(errCloseForRecreating{}))
Expect(err.Error()).To(ContainSubstring("no compatible QUIC version found"))
})
It("ignores Version Negotiation packets that offer the current version", func() {
p := getVNP(conn.version)
tracer.EXPECT().DroppedPacket(logging.PacketTypeVersionNegotiation, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnexpectedVersion)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
It("ignores unparseable Version Negotiation packets", func() {
p := getVNP(conn.version)
p.data = p.data[:len(p.data)-2]
tracer.EXPECT().DroppedPacket(logging.PacketTypeVersionNegotiation, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropHeaderParseError)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
})
Context("handling Retry", func() {
origDestConnID := protocol.ParseConnectionID([]byte{8, 7, 6, 5, 4, 3, 2, 1})
var retryHdr *wire.ExtendedHeader
JustBeforeEach(func() {
retryHdr = &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeRetry,
SrcConnectionID: protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef}),
DestConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4, 5, 6, 7, 8}),
Token: []byte("foobar"),
Version: conn.version,
},
}
})
getRetryTag := func(hdr *wire.ExtendedHeader) []byte {
b, err := hdr.Append(nil, conn.version)
Expect(err).ToNot(HaveOccurred())
return handshake.GetRetryIntegrityTag(b, origDestConnID, hdr.Version)[:]
}
It("handles Retry packets", func() {
now := time.Now()
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
conn.sentPacketHandler = sph
sph.EXPECT().ResetForRetry(now)
sph.EXPECT().ReceivedBytes(gomock.Any())
cryptoSetup.EXPECT().ChangeConnectionID(protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef}))
packer.EXPECT().SetToken([]byte("foobar"))
tracer.EXPECT().ReceivedRetry(gomock.Any()).Do(func(hdr *wire.Header) {
Expect(hdr.DestConnectionID).To(Equal(retryHdr.DestConnectionID))
Expect(hdr.SrcConnectionID).To(Equal(retryHdr.SrcConnectionID))
Expect(hdr.Token).To(Equal(retryHdr.Token))
})
p := getPacket(retryHdr, getRetryTag(retryHdr))
p.rcvTime = now
Expect(conn.handlePacketImpl(p)).To(BeTrue())
})
It("ignores Retry packets after receiving a regular packet", func() {
conn.receivedFirstPacket = true
p := getPacket(retryHdr, getRetryTag(retryHdr))
tracer.EXPECT().DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnexpectedPacket)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
It("ignores Retry packets if the server didn't change the connection ID", func() {
retryHdr.SrcConnectionID = destConnID
p := getPacket(retryHdr, getRetryTag(retryHdr))
tracer.EXPECT().DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnexpectedPacket)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
It("ignores Retry packets with the a wrong Integrity tag", func() {
tag := getRetryTag(retryHdr)
tag[0]++
p := getPacket(retryHdr, tag)
tracer.EXPECT().DroppedPacket(logging.PacketTypeRetry, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropPayloadDecryptError)
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
})
Context("transport parameters", func() {
var (
closed bool
errChan chan error
paramsChan chan *wire.TransportParameters
)
JustBeforeEach(func() {
errChan = make(chan error, 1)
paramsChan = make(chan *wire.TransportParameters, 1)
closed = false
packer.EXPECT().PackCoalescedPacket(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
go func() {
defer GinkgoRecover()
cryptoSetup.EXPECT().StartHandshake(gomock.Any()).MaxTimes(1)
// This is not 100% what would happen in reality.
// The run loop calls NextEvent once when it starts up (to send out the ClientHello),
// and then again every time a CRYPTO frame is handled.
// Injecting a CRYPTO frame is not straightforward though,
// so we inject the transport parameters on the first call to NextEvent.
params := <-paramsChan
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{
Kind: handshake.EventReceivedTransportParameters,
TransportParameters: params,
})
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventHandshakeComplete}).MaxTimes(1)
cryptoSetup.EXPECT().NextEvent().Return(handshake.Event{Kind: handshake.EventNoEvent}).MaxTimes(1).Do(func() {
defer GinkgoRecover()
Expect(conn.handleHandshakeComplete()).To(Succeed())
})
tracer.EXPECT().ChoseALPN(gomock.Any()).MaxTimes(1)
cryptoSetup.EXPECT().ConnectionState().MaxTimes(1)
errChan <- conn.run()
close(errChan)
}()
})
expectClose := func(applicationClose, errored bool) {
if !closed && !errored {
connRunner.EXPECT().ReplaceWithClosed(gomock.Any(), gomock.Any())
if applicationClose {
packer.EXPECT().PackApplicationClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil).MaxTimes(1)
} else {
packer.EXPECT().PackConnectionClose(gomock.Any(), gomock.Any(), conn.version).Return(&coalescedPacket{buffer: getPacketBuffer()}, nil).MaxTimes(1)
}
cryptoSetup.EXPECT().Close()
mconn.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any())
gomock.InOrder(
tracer.EXPECT().ClosedConnection(gomock.Any()),
tracer.EXPECT().Close(),
)
}
closed = true
}
AfterEach(func() {
conn.CloseWithError(0, "")
Eventually(conn.Context().Done()).Should(BeClosed())
Eventually(errChan).Should(BeClosed())
})
It("uses the preferred_address connection ID", func() {
params := &wire.TransportParameters{
OriginalDestinationConnectionID: destConnID,
InitialSourceConnectionID: destConnID,
PreferredAddress: &wire.PreferredAddress{
IPv4: netip.AddrPortFrom(netip.AddrFrom4([4]byte{127, 0, 0, 1}), 42),
IPv6: netip.AddrPortFrom(netip.AddrFrom16([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), 13),
ConnectionID: protocol.ParseConnectionID([]byte{1, 2, 3, 4}),
StatelessResetToken: protocol.StatelessResetToken{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1},
},
}
packer.EXPECT().PackCoalescedPacket(false, gomock.Any(), conn.version).MaxTimes(1)
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
paramsChan <- params
Eventually(processed).Should(BeClosed())
// make sure the connection ID is not retired
cf, _ := conn.framer.AppendControlFrames(nil, protocol.MaxByteCount, protocol.Version1)
Expect(cf).To(BeEmpty())
connRunner.EXPECT().AddResetToken(protocol.StatelessResetToken{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}, conn)
Expect(conn.connIDManager.Get()).To(Equal(protocol.ParseConnectionID([]byte{1, 2, 3, 4})))
// shut down
connRunner.EXPECT().RemoveResetToken(protocol.StatelessResetToken{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
expectClose(true, false)
})
It("uses the minimum of the peers' idle timeouts", func() {
conn.config.MaxIdleTimeout = 19 * time.Second
params := &wire.TransportParameters{
OriginalDestinationConnectionID: destConnID,
InitialSourceConnectionID: destConnID,
MaxIdleTimeout: 18 * time.Second,
}
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
paramsChan <- params
Eventually(processed).Should(BeClosed())
// close first
expectClose(true, false)
conn.CloseWithError(0, "")
// then check. Avoids race condition when accessing idleTimeout
Expect(conn.idleTimeout).To(Equal(18 * time.Second))
})
It("errors if the transport parameters contain a wrong initial_source_connection_id", func() {
conn.handshakeDestConnID = protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef})
params := &wire.TransportParameters{
OriginalDestinationConnectionID: destConnID,
InitialSourceConnectionID: protocol.ParseConnectionID([]byte{0xde, 0xca, 0xfb, 0xad}),
StatelessResetToken: &protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
}
expectClose(false, true)
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
paramsChan <- params
Eventually(processed).Should(BeClosed())
Eventually(errChan).Should(Receive(MatchError(&qerr.TransportError{
ErrorCode: qerr.TransportParameterError,
ErrorMessage: "expected initial_source_connection_id to equal deadbeef, is decafbad",
})))
})
It("errors if the transport parameters don't contain the retry_source_connection_id, if a Retry was performed", func() {
rcid := protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef})
conn.retrySrcConnID = &rcid
params := &wire.TransportParameters{
OriginalDestinationConnectionID: destConnID,
InitialSourceConnectionID: destConnID,
StatelessResetToken: &protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
}
expectClose(false, true)
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
paramsChan <- params
Eventually(processed).Should(BeClosed())
Eventually(errChan).Should(Receive(MatchError(&qerr.TransportError{
ErrorCode: qerr.TransportParameterError,
ErrorMessage: "missing retry_source_connection_id",
})))
})
It("errors if the transport parameters contain the wrong retry_source_connection_id, if a Retry was performed", func() {
rcid := protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef})
rcid2 := protocol.ParseConnectionID([]byte{0xde, 0xad, 0xc0, 0xde})
conn.retrySrcConnID = &rcid
params := &wire.TransportParameters{
OriginalDestinationConnectionID: destConnID,
InitialSourceConnectionID: destConnID,
RetrySourceConnectionID: &rcid2,
StatelessResetToken: &protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
}
expectClose(false, true)
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
paramsChan <- params
Eventually(processed).Should(BeClosed())
Eventually(errChan).Should(Receive(MatchError(&qerr.TransportError{
ErrorCode: qerr.TransportParameterError,
ErrorMessage: "expected retry_source_connection_id to equal deadbeef, is deadc0de",
})))
})
It("errors if the transport parameters contain the retry_source_connection_id, if no Retry was performed", func() {
rcid := protocol.ParseConnectionID([]byte{0xde, 0xad, 0xc0, 0xde})
params := &wire.TransportParameters{
OriginalDestinationConnectionID: destConnID,
InitialSourceConnectionID: destConnID,
RetrySourceConnectionID: &rcid,
StatelessResetToken: &protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
}
expectClose(false, true)
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
paramsChan <- params
Eventually(processed).Should(BeClosed())
Eventually(errChan).Should(Receive(MatchError(&qerr.TransportError{
ErrorCode: qerr.TransportParameterError,
ErrorMessage: "received retry_source_connection_id, although no Retry was performed",
})))
})
It("errors if the transport parameters contain a wrong original_destination_connection_id", func() {
conn.origDestConnID = protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef})
params := &wire.TransportParameters{
OriginalDestinationConnectionID: protocol.ParseConnectionID([]byte{0xde, 0xca, 0xfb, 0xad}),
InitialSourceConnectionID: conn.handshakeDestConnID,
StatelessResetToken: &protocol.StatelessResetToken{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
}
expectClose(false, true)
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
paramsChan <- params
Eventually(processed).Should(BeClosed())
Eventually(errChan).Should(Receive(MatchError(&qerr.TransportError{
ErrorCode: qerr.TransportParameterError,
ErrorMessage: "expected original_destination_connection_id to equal deadbeef, is decafbad",
})))
})
It("errors if the transport parameters contain reduced limits after knowing 0-RTT data is accepted by the server", func() {
conn.perspective = protocol.PerspectiveClient
conn.peerParams = &wire.TransportParameters{
ActiveConnectionIDLimit: 3,
InitialMaxData: 0x5000,
InitialMaxStreamDataBidiLocal: 0x5000,
InitialMaxStreamDataBidiRemote: 1000,
InitialMaxStreamDataUni: 1000,
MaxBidiStreamNum: 500,
MaxUniStreamNum: 500,
}
params := &wire.TransportParameters{
OriginalDestinationConnectionID: destConnID,
InitialSourceConnectionID: destConnID,
ActiveConnectionIDLimit: 3,
InitialMaxData: 0x5000,
InitialMaxStreamDataBidiLocal: 0x5000,
InitialMaxStreamDataBidiRemote: 1000,
InitialMaxStreamDataUni: 1000,
MaxBidiStreamNum: 300,
MaxUniStreamNum: 300,
}
expectClose(false, true)
processed := make(chan struct{})
tracer.EXPECT().ReceivedTransportParameters(params).Do(func(*wire.TransportParameters) { close(processed) })
cryptoSetup.EXPECT().ConnectionState().Return(handshake.ConnectionState{Used0RTT: true})
paramsChan <- params
Eventually(processed).Should(BeClosed())
Eventually(errChan).Should(Receive(MatchError(&qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "server sent reduced limits after accepting 0-RTT data",
})))
})
})
Context("handling potentially injected packets", func() {
var unpacker *MockUnpacker
getPacket := func(extHdr *wire.ExtendedHeader, data []byte) receivedPacket {
b, err := extHdr.Append(nil, conn.version)
Expect(err).ToNot(HaveOccurred())
return receivedPacket{
data: append(b, data...),
buffer: getPacketBuffer(),
}
}
// Convert an already packed raw packet into a receivedPacket
wrapPacket := func(packet []byte) receivedPacket {
return receivedPacket{
data: packet,
buffer: getPacketBuffer(),
}
}
// Illustrates that attacker may inject an Initial packet with a different
// source connection ID, causing endpoint to ignore a subsequent real Initial packets.
It("ignores Initial packets with a different source connection ID", func() {
// Modified from test "ignores packets with a different source connection ID"
unpacker = NewMockUnpacker(mockCtrl)
conn.unpacker = unpacker
hdr1 := &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeInitial,
DestConnectionID: destConnID,
SrcConnectionID: srcConnID,
Length: 1,
Version: conn.version,
},
PacketNumberLen: protocol.PacketNumberLen1,
PacketNumber: 1,
}
hdr2 := &wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketTypeInitial,
DestConnectionID: destConnID,
SrcConnectionID: protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef}),
Length: 1,
Version: conn.version,
},
PacketNumberLen: protocol.PacketNumberLen1,
PacketNumber: 2,
}
Expect(hdr2.SrcConnectionID).ToNot(Equal(srcConnID))
// Send one packet, which might change the connection ID.
// only EXPECT one call to the unpacker
unpacker.EXPECT().UnpackLongHeader(gomock.Any(), gomock.Any()).Return(&unpackedPacket{
encryptionLevel: protocol.EncryptionInitial,
hdr: hdr1,
data: []byte{0}, // one PADDING frame
}, nil)
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
Expect(conn.handlePacketImpl(getPacket(hdr1, nil))).To(BeTrue())
// The next packet has to be ignored, since the source connection ID doesn't match.
tracer.EXPECT().DroppedPacket(gomock.Any(), protocol.InvalidPacketNumber, gomock.Any(), gomock.Any())
Expect(conn.handlePacketImpl(getPacket(hdr2, nil))).To(BeFalse())
})
It("ignores 0-RTT packets", func() {
p := getPacket(&wire.ExtendedHeader{
Header: wire.Header{
Type: protocol.PacketType0RTT,
DestConnectionID: srcConnID,
Length: 2 + 6,
Version: conn.version,
},
PacketNumber: 0x42,
PacketNumberLen: protocol.PacketNumberLen2,
}, []byte("foobar"))
tracer.EXPECT().DroppedPacket(logging.PacketType0RTT, protocol.InvalidPacketNumber, p.Size(), gomock.Any())
Expect(conn.handlePacketImpl(p)).To(BeFalse())
})
// Illustrates that an injected Initial with an ACK frame for an unsent packet causes
// the connection to immediately break down
It("fails on Initial-level ACK for unsent packet", func() {
ack := &wire.AckFrame{AckRanges: []wire.AckRange{{Smallest: 2, Largest: 2}}}
initialPacket := testutils.ComposeInitialPacket(destConnID, srcConnID, destConnID, nil, []wire.Frame{ack}, protocol.PerspectiveServer, conn.version)
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
Expect(conn.handlePacketImpl(wrapPacket(initialPacket))).To(BeFalse())
})
// Illustrates that an injected Initial with a CONNECTION_CLOSE frame causes
// the connection to immediately break down
It("fails on Initial-level CONNECTION_CLOSE frame", func() {
connCloseFrame := &wire.ConnectionCloseFrame{
IsApplicationError: true,
ReasonPhrase: "mitm attacker",
}
initialPacket := testutils.ComposeInitialPacket(destConnID, srcConnID, destConnID, nil, []wire.Frame{connCloseFrame}, protocol.PerspectiveServer, conn.version)
tracer.EXPECT().ReceivedLongHeaderPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
Expect(conn.handlePacketImpl(wrapPacket(initialPacket))).To(BeTrue())
})
// Illustrates that attacker who injects a Retry packet and changes the connection ID
// can cause subsequent real Initial packets to be ignored
It("ignores Initial packets which use original source id, after accepting a Retry", func() {
sph := mockackhandler.NewMockSentPacketHandler(mockCtrl)
conn.sentPacketHandler = sph
sph.EXPECT().ReceivedBytes(gomock.Any()).Times(2)
sph.EXPECT().ResetForRetry(gomock.Any())
newSrcConnID := protocol.ParseConnectionID([]byte{0xde, 0xad, 0xbe, 0xef})
cryptoSetup.EXPECT().ChangeConnectionID(newSrcConnID)
packer.EXPECT().SetToken([]byte("foobar"))
tracer.EXPECT().ReceivedRetry(gomock.Any())
conn.handlePacketImpl(wrapPacket(testutils.ComposeRetryPacket(newSrcConnID, destConnID, destConnID, []byte("foobar"), conn.version)))
initialPacket := testutils.ComposeInitialPacket(conn.connIDManager.Get(), srcConnID, conn.connIDManager.Get(), nil, nil, protocol.PerspectiveServer, conn.version)
tracer.EXPECT().DroppedPacket(gomock.Any(), protocol.InvalidPacketNumber, gomock.Any(), gomock.Any())
Expect(conn.handlePacketImpl(wrapPacket(initialPacket))).To(BeFalse())
})
})
})
golang-github-lucas-clemente-quic-go-0.46.0/connection_timer.go 0000664 0000000 0000000 00000002565 14656644531 0024505 0 ustar 00root root 0000000 0000000 package quic
import (
"time"
"github.com/quic-go/quic-go/internal/utils"
)
var deadlineSendImmediately = time.Time{}.Add(42 * time.Millisecond) // any value > time.Time{} and before time.Now() is fine
type connectionTimer struct {
timer *utils.Timer
last time.Time
}
func newTimer() *connectionTimer {
return &connectionTimer{timer: utils.NewTimer()}
}
func (t *connectionTimer) SetRead() {
if deadline := t.timer.Deadline(); deadline != deadlineSendImmediately {
t.last = deadline
}
t.timer.SetRead()
}
func (t *connectionTimer) Chan() <-chan time.Time {
return t.timer.Chan()
}
// SetTimer resets the timer.
// It makes sure that the deadline is strictly increasing.
// This prevents busy-looping in cases where the timer fires, but we can't actually send out a packet.
// This doesn't apply to the pacing deadline, which can be set multiple times to deadlineSendImmediately.
func (t *connectionTimer) SetTimer(idleTimeoutOrKeepAlive, ackAlarm, lossTime, pacing time.Time) {
deadline := idleTimeoutOrKeepAlive
if !ackAlarm.IsZero() && ackAlarm.Before(deadline) && ackAlarm.After(t.last) {
deadline = ackAlarm
}
if !lossTime.IsZero() && lossTime.Before(deadline) && lossTime.After(t.last) {
deadline = lossTime
}
if !pacing.IsZero() && pacing.Before(deadline) {
deadline = pacing
}
t.timer.Reset(deadline)
}
func (t *connectionTimer) Stop() {
t.timer.Stop()
}
golang-github-lucas-clemente-quic-go-0.46.0/connection_timer_test.go 0000664 0000000 0000000 00000003534 14656644531 0025541 0 ustar 00root root 0000000 0000000 package quic
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func (t *connectionTimer) Deadline() time.Time { return t.timer.Deadline() }
var _ = Describe("Timer", func() {
It("sets an idle timeout", func() {
now := time.Now()
t := newTimer()
t.SetTimer(now.Add(time.Hour), time.Time{}, time.Time{}, time.Time{})
Expect(t.Deadline()).To(Equal(now.Add(time.Hour)))
})
It("sets an ACK timer", func() {
now := time.Now()
t := newTimer()
t.SetTimer(now.Add(time.Hour), now.Add(time.Minute), time.Time{}, time.Time{})
Expect(t.Deadline()).To(Equal(now.Add(time.Minute)))
})
It("sets a loss timer", func() {
now := time.Now()
t := newTimer()
t.SetTimer(now.Add(time.Hour), now.Add(time.Minute), now.Add(time.Second), time.Time{})
Expect(t.Deadline()).To(Equal(now.Add(time.Second)))
})
It("sets a pacing timer", func() {
now := time.Now()
t := newTimer()
t.SetTimer(now.Add(time.Hour), now.Add(time.Minute), now.Add(time.Second), now.Add(time.Millisecond))
Expect(t.Deadline()).To(Equal(now.Add(time.Millisecond)))
})
It("doesn't reset to an earlier time", func() {
now := time.Now()
t := newTimer()
t.SetTimer(now.Add(time.Hour), now.Add(time.Minute), time.Time{}, time.Time{})
Expect(t.Deadline()).To(Equal(now.Add(time.Minute)))
t.SetRead()
t.SetTimer(now.Add(time.Hour), now.Add(time.Minute), time.Time{}, time.Time{})
Expect(t.Deadline()).To(Equal(now.Add(time.Hour)))
})
It("allows the pacing timer to be set to send immediately", func() {
now := time.Now()
t := newTimer()
t.SetTimer(now.Add(time.Hour), now.Add(time.Minute), time.Time{}, time.Time{})
Expect(t.Deadline()).To(Equal(now.Add(time.Minute)))
t.SetRead()
t.SetTimer(now.Add(time.Hour), now.Add(time.Minute), time.Time{}, deadlineSendImmediately)
Expect(t.Deadline()).To(Equal(deadlineSendImmediately))
})
})
golang-github-lucas-clemente-quic-go-0.46.0/crypto_stream.go 0000664 0000000 0000000 00000004415 14656644531 0024035 0 ustar 00root root 0000000 0000000 package quic
import (
"fmt"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/wire"
)
type cryptoStream struct {
queue frameSorter
highestOffset protocol.ByteCount
finished bool
writeOffset protocol.ByteCount
writeBuf []byte
}
func newCryptoStream() *cryptoStream {
return &cryptoStream{queue: *newFrameSorter()}
}
func (s *cryptoStream) HandleCryptoFrame(f *wire.CryptoFrame) error {
highestOffset := f.Offset + protocol.ByteCount(len(f.Data))
if maxOffset := highestOffset; maxOffset > protocol.MaxCryptoStreamOffset {
return &qerr.TransportError{
ErrorCode: qerr.CryptoBufferExceeded,
ErrorMessage: fmt.Sprintf("received invalid offset %d on crypto stream, maximum allowed %d", maxOffset, protocol.MaxCryptoStreamOffset),
}
}
if s.finished {
if highestOffset > s.highestOffset {
// reject crypto data received after this stream was already finished
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "received crypto data after change of encryption level",
}
}
// ignore data with a smaller offset than the highest received
// could e.g. be a retransmission
return nil
}
s.highestOffset = max(s.highestOffset, highestOffset)
return s.queue.Push(f.Data, f.Offset, nil)
}
// GetCryptoData retrieves data that was received in CRYPTO frames
func (s *cryptoStream) GetCryptoData() []byte {
_, data, _ := s.queue.Pop()
return data
}
func (s *cryptoStream) Finish() error {
if s.queue.HasMoreData() {
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "encryption level changed, but crypto stream has more data to read",
}
}
s.finished = true
return nil
}
// Writes writes data that should be sent out in CRYPTO frames
func (s *cryptoStream) Write(p []byte) (int, error) {
s.writeBuf = append(s.writeBuf, p...)
return len(p), nil
}
func (s *cryptoStream) HasData() bool {
return len(s.writeBuf) > 0
}
func (s *cryptoStream) PopCryptoFrame(maxLen protocol.ByteCount) *wire.CryptoFrame {
f := &wire.CryptoFrame{Offset: s.writeOffset}
n := min(f.MaxDataLen(maxLen), protocol.ByteCount(len(s.writeBuf)))
f.Data = s.writeBuf[:n]
s.writeBuf = s.writeBuf[n:]
s.writeOffset += n
return f
}
golang-github-lucas-clemente-quic-go-0.46.0/crypto_stream_manager.go 0000664 0000000 0000000 00000004210 14656644531 0025520 0 ustar 00root root 0000000 0000000 package quic
import (
"fmt"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/wire"
)
type cryptoStreamManager struct {
initialStream *cryptoStream
handshakeStream *cryptoStream
oneRTTStream *cryptoStream
}
func newCryptoStreamManager(
initialStream *cryptoStream,
handshakeStream *cryptoStream,
oneRTTStream *cryptoStream,
) *cryptoStreamManager {
return &cryptoStreamManager{
initialStream: initialStream,
handshakeStream: handshakeStream,
oneRTTStream: oneRTTStream,
}
}
func (m *cryptoStreamManager) HandleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel) error {
var str *cryptoStream
//nolint:exhaustive // CRYPTO frames cannot be sent in 0-RTT packets.
switch encLevel {
case protocol.EncryptionInitial:
str = m.initialStream
case protocol.EncryptionHandshake:
str = m.handshakeStream
case protocol.Encryption1RTT:
str = m.oneRTTStream
default:
return fmt.Errorf("received CRYPTO frame with unexpected encryption level: %s", encLevel)
}
return str.HandleCryptoFrame(frame)
}
func (m *cryptoStreamManager) GetCryptoData(encLevel protocol.EncryptionLevel) []byte {
var str *cryptoStream
//nolint:exhaustive // CRYPTO frames cannot be sent in 0-RTT packets.
switch encLevel {
case protocol.EncryptionInitial:
str = m.initialStream
case protocol.EncryptionHandshake:
str = m.handshakeStream
case protocol.Encryption1RTT:
str = m.oneRTTStream
default:
panic(fmt.Sprintf("received CRYPTO frame with unexpected encryption level: %s", encLevel))
}
return str.GetCryptoData()
}
func (m *cryptoStreamManager) GetPostHandshakeData(maxSize protocol.ByteCount) *wire.CryptoFrame {
if !m.oneRTTStream.HasData() {
return nil
}
return m.oneRTTStream.PopCryptoFrame(maxSize)
}
func (m *cryptoStreamManager) Drop(encLevel protocol.EncryptionLevel) error {
//nolint:exhaustive // 1-RTT keys should never get dropped.
switch encLevel {
case protocol.EncryptionInitial:
return m.initialStream.Finish()
case protocol.EncryptionHandshake:
return m.handshakeStream.Finish()
default:
panic(fmt.Sprintf("dropped unexpected encryption level: %s", encLevel))
}
}
golang-github-lucas-clemente-quic-go-0.46.0/crypto_stream_manager_test.go 0000664 0000000 0000000 00000005317 14656644531 0026570 0 ustar 00root root 0000000 0000000 package quic
import (
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/wire"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Crypto Stream Manager", func() {
var (
csm *cryptoStreamManager
initialStream *cryptoStream
handshakeStream *cryptoStream
oneRTTStream *cryptoStream
)
BeforeEach(func() {
initialStream = newCryptoStream()
handshakeStream = newCryptoStream()
oneRTTStream = newCryptoStream()
csm = newCryptoStreamManager(initialStream, handshakeStream, oneRTTStream)
})
It("passes messages to the initial stream", func() {
cf := &wire.CryptoFrame{Data: []byte("foobar")}
Expect(csm.HandleCryptoFrame(cf, protocol.EncryptionInitial)).To(Succeed())
Expect(csm.GetCryptoData(protocol.EncryptionInitial)).To(Equal([]byte("foobar")))
})
It("passes messages to the handshake stream", func() {
cf := &wire.CryptoFrame{Data: []byte("foobar")}
Expect(csm.HandleCryptoFrame(cf, protocol.EncryptionHandshake)).To(Succeed())
Expect(csm.GetCryptoData(protocol.EncryptionHandshake)).To(Equal([]byte("foobar")))
})
It("passes messages to the 1-RTT stream", func() {
cf := &wire.CryptoFrame{Data: []byte("foobar")}
Expect(csm.HandleCryptoFrame(cf, protocol.Encryption1RTT)).To(Succeed())
Expect(csm.GetCryptoData(protocol.Encryption1RTT)).To(Equal([]byte("foobar")))
})
It("processes all messages", func() {
Expect(csm.HandleCryptoFrame(&wire.CryptoFrame{Data: []byte("bar"), Offset: 3}, protocol.EncryptionHandshake)).To(Succeed())
Expect(csm.HandleCryptoFrame(&wire.CryptoFrame{Data: []byte("foo")}, protocol.EncryptionHandshake)).To(Succeed())
var data []byte
for {
b := csm.GetCryptoData(protocol.EncryptionHandshake)
if len(b) == 0 {
break
}
data = append(data, b...)
}
Expect(data).To(Equal([]byte("foobar")))
})
It("errors for unknown encryption levels", func() {
err := csm.HandleCryptoFrame(&wire.CryptoFrame{}, 42)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("received CRYPTO frame with unexpected encryption level"))
})
It("drops Initial", func() {
Expect(initialStream.HandleCryptoFrame(&wire.CryptoFrame{Data: []byte("foo")})).To(Succeed())
err := csm.Drop(protocol.EncryptionInitial)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("encryption level changed, but crypto stream has more data to read"))
})
It("drops Handshake", func() {
Expect(handshakeStream.HandleCryptoFrame(&wire.CryptoFrame{Data: []byte("foo")})).To(Succeed())
err := csm.Drop(protocol.EncryptionHandshake)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("encryption level changed, but crypto stream has more data to read"))
})
})
golang-github-lucas-clemente-quic-go-0.46.0/crypto_stream_test.go 0000664 0000000 0000000 00000012124 14656644531 0025070 0 ustar 00root root 0000000 0000000 package quic
import (
"fmt"
"github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/qerr"
"github.com/quic-go/quic-go/internal/wire"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Crypto Stream", func() {
var str *cryptoStream
BeforeEach(func() { str = newCryptoStream() })
Context("handling incoming data", func() {
It("handles in-order CRYPTO frames", func() {
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{Data: []byte("foo")})).To(Succeed())
Expect(str.GetCryptoData()).To(Equal([]byte("foo")))
Expect(str.GetCryptoData()).To(BeNil())
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{Data: []byte("bar"), Offset: 3})).To(Succeed())
Expect(str.GetCryptoData()).To(Equal([]byte("bar")))
Expect(str.GetCryptoData()).To(BeNil())
})
It("errors if the frame exceeds the maximum offset", func() {
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{
Offset: protocol.MaxCryptoStreamOffset - 5,
Data: []byte("foobar"),
})).To(MatchError(&qerr.TransportError{
ErrorCode: qerr.CryptoBufferExceeded,
ErrorMessage: fmt.Sprintf("received invalid offset %d on crypto stream, maximum allowed %d", protocol.MaxCryptoStreamOffset+1, protocol.MaxCryptoStreamOffset),
}))
})
It("handles out-of-order CRYPTO frames", func() {
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{Offset: 3, Data: []byte("bar")})).To(Succeed())
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{Data: []byte("foo")})).To(Succeed())
var data []byte
for {
b := str.GetCryptoData()
if b == nil {
break
}
data = append(data, b...)
}
Expect(data).To(Equal([]byte("foobar")))
Expect(str.GetCryptoData()).To(BeNil())
})
Context("finishing", func() {
It("errors if there's still data to read at the current offset after finishing", func() {
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{
Data: []byte("foo"),
})).To(Succeed())
Expect(str.Finish()).To(MatchError(&qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "encryption level changed, but crypto stream has more data to read",
}))
})
It("errors if there's still data to read at a higher offset after finishing", func() {
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{
Data: []byte("foobar"),
Offset: 10,
})).To(Succeed())
Expect(str.Finish()).To(MatchError(&qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "encryption level changed, but crypto stream has more data to read",
}))
})
It("works with reordered data", func() {
f1 := &wire.CryptoFrame{
Data: []byte("foo"),
}
f2 := &wire.CryptoFrame{
Offset: 3,
Data: []byte("bar"),
}
Expect(str.HandleCryptoFrame(f2)).To(Succeed())
Expect(str.HandleCryptoFrame(f1)).To(Succeed())
Expect(str.GetCryptoData()).To(HaveLen(3))
Expect(str.GetCryptoData()).To(HaveLen(3))
Expect(str.Finish()).To(Succeed())
Expect(str.HandleCryptoFrame(f2)).To(Succeed())
})
It("rejects new crypto data after finishing", func() {
Expect(str.Finish()).To(Succeed())
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{
Data: []byte("foo"),
})).To(MatchError(&qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: "received crypto data after change of encryption level",
}))
})
It("ignores crypto data below the maximum offset received before finishing", func() {
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{
Data: []byte("foobar"),
})).To(Succeed())
Expect(str.GetCryptoData()).To(Equal([]byte("foobar")))
Expect(str.Finish()).To(Succeed())
Expect(str.HandleCryptoFrame(&wire.CryptoFrame{
Offset: 2,
Data: []byte("foo"),
})).To(Succeed())
})
})
})
Context("writing data", func() {
It("says if it has data", func() {
Expect(str.HasData()).To(BeFalse())
_, err := str.Write([]byte("foobar"))
Expect(err).ToNot(HaveOccurred())
Expect(str.HasData()).To(BeTrue())
})
It("pops crypto frames", func() {
_, err := str.Write([]byte("foobar"))
Expect(err).ToNot(HaveOccurred())
f := str.PopCryptoFrame(1000)
Expect(f).ToNot(BeNil())
Expect(f.Offset).To(BeZero())
Expect(f.Data).To(Equal([]byte("foobar")))
})
It("coalesces multiple writes", func() {
_, err := str.Write([]byte("foo"))
Expect(err).ToNot(HaveOccurred())
_, err = str.Write([]byte("bar"))
Expect(err).ToNot(HaveOccurred())
f := str.PopCryptoFrame(1000)
Expect(f).ToNot(BeNil())
Expect(f.Offset).To(BeZero())
Expect(f.Data).To(Equal([]byte("foobar")))
})
It("respects the maximum size", func() {
frameHeaderLen := (&wire.CryptoFrame{}).Length(protocol.Version1)
_, err := str.Write([]byte("foobar"))
Expect(err).ToNot(HaveOccurred())
f := str.PopCryptoFrame(frameHeaderLen + 3)
Expect(f).ToNot(BeNil())
Expect(f.Offset).To(BeZero())
Expect(f.Data).To(Equal([]byte("foo")))
f = str.PopCryptoFrame(frameHeaderLen + 3)
Expect(f).ToNot(BeNil())
Expect(f.Offset).To(Equal(protocol.ByteCount(3)))
Expect(f.Data).To(Equal([]byte("bar")))
})
})
})
golang-github-lucas-clemente-quic-go-0.46.0/datagram_queue.go 0000664 0000000 0000000 00000005640 14656644531 0024127 0 ustar 00root root 0000000 0000000 package quic
import (
"context"
"sync"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/internal/utils/ringbuffer"
"github.com/quic-go/quic-go/internal/wire"
)
const (
maxDatagramSendQueueLen = 32
maxDatagramRcvQueueLen = 128
)
type datagramQueue struct {
sendMx sync.Mutex
sendQueue ringbuffer.RingBuffer[*wire.DatagramFrame]
sent chan struct{} // used to notify Add that a datagram was dequeued
rcvMx sync.Mutex
rcvQueue [][]byte
rcvd chan struct{} // used to notify Receive that a new datagram was received
closeErr error
closed chan struct{}
hasData func()
logger utils.Logger
}
func newDatagramQueue(hasData func(), logger utils.Logger) *datagramQueue {
return &datagramQueue{
hasData: hasData,
rcvd: make(chan struct{}, 1),
sent: make(chan struct{}, 1),
closed: make(chan struct{}),
logger: logger,
}
}
// Add queues a new DATAGRAM frame for sending.
// Up to 32 DATAGRAM frames will be queued.
// Once that limit is reached, Add blocks until the queue size has reduced.
func (h *datagramQueue) Add(f *wire.DatagramFrame) error {
h.sendMx.Lock()
for {
if h.sendQueue.Len() < maxDatagramSendQueueLen {
h.sendQueue.PushBack(f)
h.sendMx.Unlock()
h.hasData()
return nil
}
select {
case <-h.sent: // drain the queue so we don't loop immediately
default:
}
h.sendMx.Unlock()
select {
case <-h.closed:
return h.closeErr
case <-h.sent:
}
h.sendMx.Lock()
}
}
// Peek gets the next DATAGRAM frame for sending.
// If actually sent out, Pop needs to be called before the next call to Peek.
func (h *datagramQueue) Peek() *wire.DatagramFrame {
h.sendMx.Lock()
defer h.sendMx.Unlock()
if h.sendQueue.Empty() {
return nil
}
return h.sendQueue.PeekFront()
}
func (h *datagramQueue) Pop() {
h.sendMx.Lock()
defer h.sendMx.Unlock()
_ = h.sendQueue.PopFront()
select {
case h.sent <- struct{}{}:
default:
}
}
// HandleDatagramFrame handles a received DATAGRAM frame.
func (h *datagramQueue) HandleDatagramFrame(f *wire.DatagramFrame) {
data := make([]byte, len(f.Data))
copy(data, f.Data)
var queued bool
h.rcvMx.Lock()
if len(h.rcvQueue) < maxDatagramRcvQueueLen {
h.rcvQueue = append(h.rcvQueue, data)
queued = true
select {
case h.rcvd <- struct{}{}:
default:
}
}
h.rcvMx.Unlock()
if !queued && h.logger.Debug() {
h.logger.Debugf("Discarding received DATAGRAM frame (%d bytes payload)", len(f.Data))
}
}
// Receive gets a received DATAGRAM frame.
func (h *datagramQueue) Receive(ctx context.Context) ([]byte, error) {
for {
h.rcvMx.Lock()
if len(h.rcvQueue) > 0 {
data := h.rcvQueue[0]
h.rcvQueue = h.rcvQueue[1:]
h.rcvMx.Unlock()
return data, nil
}
h.rcvMx.Unlock()
select {
case <-h.rcvd:
continue
case <-h.closed:
return nil, h.closeErr
case <-ctx.Done():
return nil, ctx.Err()
}
}
}
func (h *datagramQueue) CloseWithError(e error) {
h.closeErr = e
close(h.closed)
}
golang-github-lucas-clemente-quic-go-0.46.0/datagram_queue_test.go 0000664 0000000 0000000 00000010272 14656644531 0025163 0 ustar 00root root 0000000 0000000 package quic
import (
"context"
"errors"
"time"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/internal/wire"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Datagram Queue", func() {
var queue *datagramQueue
var queued chan struct{}
BeforeEach(func() {
queued = make(chan struct{}, 100)
queue = newDatagramQueue(func() { queued <- struct{}{} }, utils.DefaultLogger)
})
Context("sending", func() {
It("returns nil when there's no datagram to send", func() {
Expect(queue.Peek()).To(BeNil())
})
It("queues a datagram", func() {
frame := &wire.DatagramFrame{Data: []byte("foobar")}
Expect(queue.Add(frame)).To(Succeed())
Expect(queued).To(HaveLen(1))
f := queue.Peek()
Expect(f.Data).To(Equal([]byte("foobar")))
queue.Pop()
Expect(queue.Peek()).To(BeNil())
})
It("blocks when the maximum number of datagrams have been queued", func() {
for i := 0; i < maxDatagramSendQueueLen; i++ {
Expect(queue.Add(&wire.DatagramFrame{Data: []byte{0}})).To(Succeed())
}
errChan := make(chan error, 1)
go func() {
defer GinkgoRecover()
errChan <- queue.Add(&wire.DatagramFrame{Data: []byte("foobar")})
}()
Consistently(errChan, 50*time.Millisecond).ShouldNot(Receive())
Expect(queue.Peek()).ToNot(BeNil())
Consistently(errChan, 50*time.Millisecond).ShouldNot(Receive())
queue.Pop()
Eventually(errChan).Should(Receive(BeNil()))
for i := 1; i < maxDatagramSendQueueLen; i++ {
queue.Pop()
}
f := queue.Peek()
Expect(f).ToNot(BeNil())
Expect(f.Data).To(Equal([]byte("foobar")))
})
It("returns the same datagram multiple times, when Pop isn't called", func() {
Expect(queue.Add(&wire.DatagramFrame{Data: []byte("foo")})).To(Succeed())
Expect(queue.Add(&wire.DatagramFrame{Data: []byte("bar")})).To(Succeed())
Eventually(queued).Should(HaveLen(2))
f := queue.Peek()
Expect(f.Data).To(Equal([]byte("foo")))
Expect(queue.Peek()).To(Equal(f))
Expect(queue.Peek()).To(Equal(f))
queue.Pop()
f = queue.Peek()
Expect(f).ToNot(BeNil())
Expect(f.Data).To(Equal([]byte("bar")))
})
It("closes", func() {
for i := 0; i < maxDatagramSendQueueLen; i++ {
Expect(queue.Add(&wire.DatagramFrame{Data: []byte("foo")})).To(Succeed())
}
errChan := make(chan error, 1)
go func() {
defer GinkgoRecover()
errChan <- queue.Add(&wire.DatagramFrame{Data: []byte("foo")})
}()
Consistently(errChan, 25*time.Millisecond).ShouldNot(Receive())
testErr := errors.New("test error")
queue.CloseWithError(testErr)
Eventually(errChan).Should(Receive(MatchError(testErr)))
})
})
Context("receiving", func() {
It("receives DATAGRAM frames", func() {
queue.HandleDatagramFrame(&wire.DatagramFrame{Data: []byte("foo")})
queue.HandleDatagramFrame(&wire.DatagramFrame{Data: []byte("bar")})
data, err := queue.Receive(context.Background())
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal([]byte("foo")))
data, err = queue.Receive(context.Background())
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal([]byte("bar")))
})
It("blocks until a frame is received", func() {
c := make(chan []byte, 1)
go func() {
defer GinkgoRecover()
data, err := queue.Receive(context.Background())
Expect(err).ToNot(HaveOccurred())
c <- data
}()
Consistently(c).ShouldNot(Receive())
queue.HandleDatagramFrame(&wire.DatagramFrame{Data: []byte("foobar")})
Eventually(c).Should(Receive(Equal([]byte("foobar"))))
})
It("blocks until context is done", func() {
ctx, cancel := context.WithCancel(context.Background())
errChan := make(chan error)
go func() {
defer GinkgoRecover()
_, err := queue.Receive(ctx)
errChan <- err
}()
Consistently(errChan).ShouldNot(Receive())
cancel()
Eventually(errChan).Should(Receive(Equal(context.Canceled)))
})
It("closes", func() {
errChan := make(chan error, 1)
go func() {
defer GinkgoRecover()
_, err := queue.Receive(context.Background())
errChan <- err
}()
Consistently(errChan).ShouldNot(Receive())
queue.CloseWithError(errors.New("test error"))
Eventually(errChan).Should(Receive(MatchError("test error")))
})
})
})
golang-github-lucas-clemente-quic-go-0.46.0/docs/ 0000775 0000000 0000000 00000000000 14656644531 0021537 5 ustar 00root root 0000000 0000000 golang-github-lucas-clemente-quic-go-0.46.0/docs/quic.png 0000664 0000000 0000000 00000041623 14656644531 0023214 0 ustar 00root root 0000000 0000000 ‰PNG
IHDR ^ ø =ûè\ CZIDATxìÖÌØPFáÙ¶mÛ¶mÛ¶mÛ¶Íh¶mÛ6¾ÙûmïIž¨nïmëE)¥”RJ)¥”RJ9.¿È…šè‚q‚æ(…¸pfJ)¥”RÊJc>Ãpí?§| ºa"Ö`?Öc&ú¡‚â?J)¥”R¥pæ¯ÐþñßGi0·aŽðkQ>áÌT@DG2dAaTB´GO´A=”GdD"Do¸eJ)¥TBl†ý(xìx·DYKY¿™eéÚÏÒ·éb‰«Ô²¨Ùs›þ~ZP‘ADÄE*¤C|„G ¸uá0æ§PHyG\”B,Ã9¼…¹À+Æ,´A>D€k¤”RJåÃ#Øg~C„´ÔMÚXù5›Þ±+¿UkßiË9p”…O•îÇo×[4„kéQ}°ûq7ñÌ ßÕÛ8ÝXŽî(ð
çÖ ` 0\„?«Yº°ãgZ©EkðÔ–£ßpKÕ¸•…IšÂ¼xõú«sÝ„(ø”J†®Ø‰g0wt«QááÔ”RJ©zx
ƒyóéË’Õj`5vŸ0~¬œ$gÿ‘æ7xˆ¿UmáÔ¼"!c2¶â6Ì<ÄfŒ@uDpä9„@ÌüE¬ø¼•ŽºU6î³ô;›Ÿ ïØ;è(’î‹7– $DÐww‚»/îîn«ÖqY÷Å}qwwwww[ÇÞ¿ÞœjþõõŽôÔtÍDÞïœûI¦3ЗªW÷ç>Ÿ¶L$cªÉoÆ
&ˆ%zÍÍßp>œê
‚ ‚è%¾K²FB›5ÛÑH«ë¾Ó±T9ã;j˜æš¦®|vçÄ2äïØüv½aj18w>h