pax_global_header00006660000000000000000000000064143762034440014521gustar00rootroot0000000000000052 comment=1f4b50ded8c649a83ecc027c770dd47cc1bb177f ice-2.3.1/000077500000000000000000000000001437620344400122645ustar00rootroot00000000000000ice-2.3.1/.github/000077500000000000000000000000001437620344400136245ustar00rootroot00000000000000ice-2.3.1/.github/.gitignore000066400000000000000000000000121437620344400156050ustar00rootroot00000000000000.goassets ice-2.3.1/.github/fetch-scripts.sh000077500000000000000000000014351437620344400167440ustar00rootroot00000000000000#!/bin/sh # # DO NOT EDIT THIS FILE # # It is automatically copied from https://github.com/pion/.goassets repository. # # If you want to update the shared CI config, send a PR to # https://github.com/pion/.goassets instead of this repository. # set -eu SCRIPT_PATH="$(realpath "$(dirname "$0")")" GOASSETS_PATH="${SCRIPT_PATH}/.goassets" GOASSETS_REF=${GOASSETS_REF:-master} if [ -d "${GOASSETS_PATH}" ]; then if ! git -C "${GOASSETS_PATH}" diff --exit-code; then echo "${GOASSETS_PATH} has uncommitted changes" >&2 exit 1 fi git -C "${GOASSETS_PATH}" fetch origin git -C "${GOASSETS_PATH}" checkout ${GOASSETS_REF} git -C "${GOASSETS_PATH}" reset --hard origin/${GOASSETS_REF} else git clone -b ${GOASSETS_REF} https://github.com/pion/.goassets.git "${GOASSETS_PATH}" fi ice-2.3.1/.github/install-hooks.sh000077500000000000000000000010771437620344400167570ustar00rootroot00000000000000#!/bin/sh # # DO NOT EDIT THIS FILE # # It is automatically copied from https://github.com/pion/.goassets repository. # # If you want to update the shared CI config, send a PR to # https://github.com/pion/.goassets instead of this repository. # SCRIPT_PATH="$(realpath "$(dirname "$0")")" . ${SCRIPT_PATH}/fetch-scripts.sh cp "${GOASSETS_PATH}/hooks/commit-msg.sh" "${SCRIPT_PATH}/../.git/hooks/commit-msg" cp "${GOASSETS_PATH}/hooks/pre-commit.sh" "${SCRIPT_PATH}/../.git/hooks/pre-commit" cp "${GOASSETS_PATH}/hooks/pre-push.sh" "${SCRIPT_PATH}/../.git/hooks/pre-push" ice-2.3.1/.github/workflows/000077500000000000000000000000001437620344400156615ustar00rootroot00000000000000ice-2.3.1/.github/workflows/codeql-analysis.yml000066400000000000000000000011551437620344400214760ustar00rootroot00000000000000# # DO NOT EDIT THIS FILE # # It is automatically copied from https://github.com/pion/.goassets repository. # If this repository should have package specific CI config, # remove the repository name from .goassets/.github/workflows/assets-sync.yml. # # If you want to update the shared CI config, send a PR to # https://github.com/pion/.goassets instead of this repository. # name: CodeQL on: workflow_dispatch: schedule: - cron: '23 5 * * 0' pull_request: branches: - master paths: - '**.go' jobs: analyze: uses: pion/.goassets/.github/workflows/codeql-analysis.reusable.yml@master ice-2.3.1/.github/workflows/generate-authors.yml000066400000000000000000000011041437620344400216550ustar00rootroot00000000000000# # DO NOT EDIT THIS FILE # # It is automatically copied from https://github.com/pion/.goassets repository. # If this repository should have package specific CI config, # remove the repository name from .goassets/.github/workflows/assets-sync.yml. # # If you want to update the shared CI config, send a PR to # https://github.com/pion/.goassets instead of this repository. # name: Generate Authors on: pull_request: jobs: generate: uses: pion/.goassets/.github/workflows/generate-authors.reusable.yml@master secrets: token: ${{ secrets.PIONBOT_PRIVATE_KEY }} ice-2.3.1/.github/workflows/lint.yaml000066400000000000000000000007521437620344400175170ustar00rootroot00000000000000# # DO NOT EDIT THIS FILE # # It is automatically copied from https://github.com/pion/.goassets repository. # If this repository should have package specific CI config, # remove the repository name from .goassets/.github/workflows/assets-sync.yml. # # If you want to update the shared CI config, send a PR to # https://github.com/pion/.goassets instead of this repository. # name: Lint on: pull_request: jobs: lint: uses: pion/.goassets/.github/workflows/lint.reusable.yml@master ice-2.3.1/.github/workflows/release.yml000066400000000000000000000011051437620344400200210ustar00rootroot00000000000000# # DO NOT EDIT THIS FILE # # It is automatically copied from https://github.com/pion/.goassets repository. # If this repository should have package specific CI config, # remove the repository name from .goassets/.github/workflows/assets-sync.yml. # # If you want to update the shared CI config, send a PR to # https://github.com/pion/.goassets instead of this repository. # name: Release on: push: tags: - 'v*' jobs: release: uses: pion/.goassets/.github/workflows/release.reusable.yml@master with: go-version: '1.19' # auto-update/latest-go-version ice-2.3.1/.github/workflows/renovate-go-sum-fix.yaml000066400000000000000000000011241437620344400223570ustar00rootroot00000000000000# # DO NOT EDIT THIS FILE # # It is automatically copied from https://github.com/pion/.goassets repository. # If this repository should have package specific CI config, # remove the repository name from .goassets/.github/workflows/assets-sync.yml. # # If you want to update the shared CI config, send a PR to # https://github.com/pion/.goassets instead of this repository. # name: Fix go.sum on: push: branches: - renovate/* jobs: fix: uses: pion/.goassets/.github/workflows/renovate-go-sum-fix.reusable.yml@master secrets: token: ${{ secrets.PIONBOT_PRIVATE_KEY }} ice-2.3.1/.github/workflows/test.yaml000066400000000000000000000021121437620344400175200ustar00rootroot00000000000000# # DO NOT EDIT THIS FILE # # It is automatically copied from https://github.com/pion/.goassets repository. # If this repository should have package specific CI config, # remove the repository name from .goassets/.github/workflows/assets-sync.yml. # # If you want to update the shared CI config, send a PR to # https://github.com/pion/.goassets instead of this repository. # name: Test on: push: branches: - master pull_request: jobs: test: uses: pion/.goassets/.github/workflows/test.reusable.yml@master strategy: matrix: go: ['1.19', '1.18'] # auto-update/supported-go-version-list fail-fast: false with: go-version: ${{ matrix.go }} test-i386: uses: pion/.goassets/.github/workflows/test-i386.reusable.yml@master strategy: matrix: go: ['1.19', '1.18'] # auto-update/supported-go-version-list fail-fast: false with: go-version: ${{ matrix.go }} test-wasm: uses: pion/.goassets/.github/workflows/test-wasm.reusable.yml@master with: go-version: '1.19' # auto-update/latest-go-version ice-2.3.1/.github/workflows/tidy-check.yaml000066400000000000000000000011371437620344400205730ustar00rootroot00000000000000# # DO NOT EDIT THIS FILE # # It is automatically copied from https://github.com/pion/.goassets repository. # If this repository should have package specific CI config, # remove the repository name from .goassets/.github/workflows/assets-sync.yml. # # If you want to update the shared CI config, send a PR to # https://github.com/pion/.goassets instead of this repository. # name: Go mod tidy on: pull_request: push: branches: - master jobs: tidy: uses: pion/.goassets/.github/workflows/tidy-check.reusable.yml@master with: go-version: '1.19' # auto-update/latest-go-version ice-2.3.1/.gitignore000066400000000000000000000004661437620344400142620ustar00rootroot00000000000000### JetBrains IDE ### ##################### .idea/ ### Emacs Temporary Files ### ############################# *~ ### Folders ### ############### bin/ vendor/ node_modules/ ### Files ### ############# *.ivf *.ogg tags cover.out *.sw[poe] *.wasm examples/sfu-ws/cert.pem examples/sfu-ws/key.pem wasm_exec.js ice-2.3.1/.golangci.yml000066400000000000000000000172751437620344400146640ustar00rootroot00000000000000linters-settings: govet: check-shadowing: true misspell: locale: US exhaustive: default-signifies-exhaustive: true gomodguard: blocked: modules: - github.com/pkg/errors: recommendations: - errors linters: enable: - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers - bidichk # Checks for dangerous unicode character sequences - bodyclose # checks whether HTTP response body is closed successfully - contextcheck # check the function whether use a non-inherited context - decorder # check declaration order and count of types, constants, variables and functions - depguard # Go linter that checks if package imports are in a list of acceptable packages - dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) - dupl # Tool for code clone detection - durationcheck # check for two durations multiplied together - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and optionally reports occations, where the check for the returned error can be omitted. - errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`. - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. - exhaustive # check exhaustiveness of enum switch statements - exportloopref # checks for pointers to enclosing loop variables - forcetypeassert # finds forced type assertions - gci # Gci control golang package import order and make it always deterministic. - gochecknoglobals # Checks that no globals are present in Go code - gochecknoinits # Checks that no init functions are present in Go code - gocognit # Computes and checks the cognitive complexity of functions - goconst # Finds repeated strings that could be replaced by a constant - gocritic # The most opinionated Go source code linter - godox # Tool for detection of FIXME, TODO and other comment keywords - goerr113 # Golang linter to check the errors handling expressions - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification - gofumpt # Gofumpt checks whether code was gofumpt-ed. - goheader # Checks is file header matches to pattern - goimports # Goimports does everything that gofmt does. Additionally it checks unused imports - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations. - goprintffuncname # Checks that printf-like functions are named with `f` at the end - gosec # Inspects source code for security problems - gosimple # Linter for Go source code that specializes in simplifying a code - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string - grouper # An analyzer to analyze expression groups. - importas # Enforces consistent import aliases - ineffassign # Detects when assignments to existing variables are not used - misspell # Finds commonly misspelled English words in comments - nakedret # Finds naked returns in functions greater than a specified function length - nilerr # Finds the code that returns nil even if it checks that the error is not nil. - nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value. - noctx # noctx finds sending http request without context.Context - predeclared # find code that shadows one of Go's predeclared identifiers - revive # golint replacement, finds style mistakes - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks - stylecheck # Stylecheck is a replacement for golint - tagliatelle # Checks the struct tags. - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17 - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code - unconvert # Remove unnecessary type conversions - unparam # Reports unused function parameters - unused # Checks Go code for unused constants, variables, functions and types - wastedassign # wastedassign finds wasted assignment statements - whitespace # Tool for detection of leading and trailing whitespace disable: - containedctx # containedctx is a linter that detects struct contained context.Context field - cyclop # checks function and package cyclomatic complexity - exhaustivestruct # Checks if all struct's fields are initialized - forbidigo # Forbids identifiers - funlen # Tool for detection of long functions - gocyclo # Computes and checks the cyclomatic complexity of functions - godot # Check if comments end in a period - gomnd # An analyzer to detect magic numbers. - ifshort # Checks that your code uses short syntax for if-statements whenever possible - ireturn # Accept Interfaces, Return Concrete Types - lll # Reports long lines - maintidx # maintidx measures the maintainability index of each function. - makezero # Finds slice declarations with non-zero initial length - maligned # Tool to detect Go structs that would take less memory if their fields were sorted - nestif # Reports deeply nested if statements - nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity - nolintlint # Reports ill-formed or insufficient nolint directives - paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test - prealloc # Finds slice declarations that could potentially be preallocated - promlinter # Check Prometheus metrics naming via promlint - rowserrcheck # checks whether Err of rows is checked successfully - sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed. - testpackage # linter that makes you use a separate _test package - thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers - varnamelen # checks that the length of a variable's name matches its scope - wrapcheck # Checks that errors returned from external packages are wrapped - wsl # Whitespace Linter - Forces you to use empty lines! issues: exclude-use-default: false exclude-rules: # Allow complex tests, better to be self contained - path: _test\.go linters: - gocognit # Allow complex main function in examples - path: examples text: "of func `main` is high" linters: - gocognit run: skip-dirs-use-default: false ice-2.3.1/.goreleaser.yml000066400000000000000000000000251437620344400152120ustar00rootroot00000000000000builds: - skip: true ice-2.3.1/AUTHORS.txt000066400000000000000000000044611437620344400141570ustar00rootroot00000000000000# Thank you to everyone that made Pion possible. If you are interested in contributing # we would love to have you https://github.com/pion/webrtc/wiki/Contributing # # This file is auto generated, using git to list all individuals contributors. # see https://github.com/pion/.goassets/blob/master/scripts/generate-authors.sh for the scripting Aaron France Adam Kiss adwpc Aleksandr Razumov Antoine Baché Artur Shellunts Assad Obaid Atsushi Watanabe backkem buptczq cgojin Chao Yuan cnderrauber David Hamilton David Zhao David Zhao Eric Daniels Genteure Henry hexiang hn8 <10730886+hn8@users.noreply.github.com> Hugo Arregui Hugo Arregui Jason Maldonis Jerko Steiner JooYoung Juliusz Chroboczek Kacper Bąk <56700396+53jk1@users.noreply.github.com> Kevin Caffrey Konstantin Itskov korymiller1489 Kyle Carberry Lander Noterman Luke Curley Meelap Shah Michael MacDonald Michael MacDonald Mikhail Bragin Miroslav Šedivý Nevio Vesic Ori Bernstein Robert Eperjesi Sam Lancia Sam Lancia Sean DuBois Sean DuBois Sebastian Waisbrot Sidney San Martín Steffen Vogel Will Forcey Woodrow Douglass Yutaka Takeda ZHENK Zizheng Tai # List of contributors not appearing in Git history ice-2.3.1/LICENSE000066400000000000000000000020411437620344400132660ustar00rootroot00000000000000MIT License Copyright (c) 2018 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ice-2.3.1/README.md000066400000000000000000000037551437620344400135550ustar00rootroot00000000000000


Pion ICE

A Go implementation of ICE

Pion transport Slack Widget
Build Status GoDoc Coverage Status Go Report Card License: MIT


### Roadmap The library is used as a part of our WebRTC implementation. Please refer to that [roadmap](https://github.com/pion/webrtc/issues/9) to track our major milestones. ### Community Pion has an active community on the [Golang Slack](https://invite.slack.golangbridge.org/). Sign up and join the **#pion** channel for discussions and support. You can also use [Pion mailing list](https://groups.google.com/forum/#!forum/pion). We are always looking to support **your projects**. Please reach out if you have something to build! If you need commercial support or don't want to use public methods you can contact us at [team@pion.ly](mailto:team@pion.ly) ### Contributing Check out the **[contributing wiki](https://github.com/pion/webrtc/wiki/Contributing)** to join the group of amazing people making this project possible: ### License MIT License - see [LICENSE](LICENSE) for full text ice-2.3.1/addr.go000066400000000000000000000017141437620344400135300ustar00rootroot00000000000000package ice import "net" func parseMulticastAnswerAddr(in net.Addr) (net.IP, bool) { switch addr := in.(type) { case *net.IPAddr: return addr.IP, true case *net.UDPAddr: return addr.IP, true case *net.TCPAddr: return addr.IP, true } return nil, false } func parseAddr(in net.Addr) (net.IP, int, NetworkType, bool) { switch addr := in.(type) { case *net.UDPAddr: return addr.IP, addr.Port, NetworkTypeUDP4, true case *net.TCPAddr: return addr.IP, addr.Port, NetworkTypeTCP4, true } return nil, 0, 0, false } func createAddr(network NetworkType, ip net.IP, port int) net.Addr { switch { case network.IsTCP(): return &net.TCPAddr{IP: ip, Port: port} default: return &net.UDPAddr{IP: ip, Port: port} } } func addrEqual(a, b net.Addr) bool { aIP, aPort, aType, aOk := parseAddr(a) if !aOk { return false } bIP, bPort, bType, bOk := parseAddr(b) if !bOk { return false } return aType == bType && aIP.Equal(bIP) && aPort == bPort } ice-2.3.1/agent.go000066400000000000000000001011721437620344400137130ustar00rootroot00000000000000// Package ice implements the Interactive Connectivity Establishment (ICE) // protocol defined in rfc5245. package ice import ( "context" "fmt" "net" "strings" "sync" "sync/atomic" "time" atomicx "github.com/pion/ice/v2/internal/atomic" stunx "github.com/pion/ice/v2/internal/stun" "github.com/pion/logging" "github.com/pion/mdns" "github.com/pion/stun" "github.com/pion/transport/v2" "github.com/pion/transport/v2/packetio" "github.com/pion/transport/v2/stdnet" "github.com/pion/transport/v2/vnet" "golang.org/x/net/proxy" ) type bindingRequest struct { timestamp time.Time transactionID [stun.TransactionIDSize]byte destination net.Addr isUseCandidate bool } // Agent represents the ICE agent type Agent struct { chanTask chan task afterRunFn []func(ctx context.Context) muAfterRun sync.Mutex onConnectionStateChangeHdlr atomic.Value // func(ConnectionState) onSelectedCandidatePairChangeHdlr atomic.Value // func(Candidate, Candidate) onCandidateHdlr atomic.Value // func(Candidate) // State owned by the taskLoop onConnected chan struct{} onConnectedOnce sync.Once // force candidate to be contacted immediately (instead of waiting for task ticker) forceCandidateContact chan bool tieBreaker uint64 lite bool connectionState ConnectionState gatheringState GatheringState mDNSMode MulticastDNSMode mDNSName string mDNSConn *mdns.Conn muHaveStarted sync.Mutex startedCh <-chan struct{} startedFn func() isControlling bool maxBindingRequests uint16 hostAcceptanceMinWait time.Duration srflxAcceptanceMinWait time.Duration prflxAcceptanceMinWait time.Duration relayAcceptanceMinWait time.Duration portMin uint16 portMax uint16 candidateTypes []CandidateType // How long connectivity checks can fail before the ICE Agent // goes to disconnected disconnectedTimeout time.Duration // How long connectivity checks can fail before the ICE Agent // goes to failed failedTimeout time.Duration // How often should we send keepalive packets? // 0 means never keepaliveInterval time.Duration // How often should we run our internal taskLoop to check for state changes when connecting checkInterval time.Duration localUfrag string localPwd string localCandidates map[NetworkType][]Candidate remoteUfrag string remotePwd string remoteCandidates map[NetworkType][]Candidate checklist []*CandidatePair selector pairCandidateSelector selectedPair atomic.Value // *CandidatePair urls []*URL networkTypes []NetworkType buf *packetio.Buffer // LRU of outbound Binding request Transaction IDs pendingBindingRequests []bindingRequest // 1:1 D-NAT IP address mapping extIPMapper *externalIPMapper // State for closing done chan struct{} taskLoopDone chan struct{} err atomicx.Error gatherCandidateCancel func() gatherCandidateDone chan struct{} chanCandidate chan Candidate chanCandidatePair chan *CandidatePair chanState chan ConnectionState loggerFactory logging.LoggerFactory log logging.LeveledLogger net transport.Net tcpMux TCPMux udpMux UDPMux udpMuxSrflx UniversalUDPMux interfaceFilter func(string) bool ipFilter func(net.IP) bool includeLoopback bool insecureSkipVerify bool proxyDialer proxy.Dialer } type task struct { fn func(context.Context, *Agent) done chan struct{} } // afterRun registers function to be run after the task. func (a *Agent) afterRun(f func(context.Context)) { a.muAfterRun.Lock() a.afterRunFn = append(a.afterRunFn, f) a.muAfterRun.Unlock() } func (a *Agent) getAfterRunFn() []func(context.Context) { a.muAfterRun.Lock() defer a.muAfterRun.Unlock() fns := a.afterRunFn a.afterRunFn = nil return fns } func (a *Agent) ok() error { select { case <-a.done: return a.getErr() default: } return nil } func (a *Agent) getErr() error { if err := a.err.Load(); err != nil { return err } return ErrClosed } // Run task in serial. Blocking tasks must be cancelable by context. func (a *Agent) run(ctx context.Context, t func(context.Context, *Agent)) error { if err := a.ok(); err != nil { return err } done := make(chan struct{}) select { case <-ctx.Done(): return ctx.Err() case a.chanTask <- task{t, done}: <-done return nil } } // taskLoop handles registered tasks and agent close. func (a *Agent) taskLoop() { after := func() { for { // Get and run func registered by afterRun(). fns := a.getAfterRunFn() if len(fns) == 0 { break } for _, fn := range fns { fn(a.context()) } } } defer func() { a.deleteAllCandidates() a.startedFn() if err := a.buf.Close(); err != nil { a.log.Warnf("failed to close buffer: %v", err) } a.closeMulticastConn() a.updateConnectionState(ConnectionStateClosed) after() close(a.chanState) close(a.chanCandidate) close(a.chanCandidatePair) close(a.taskLoopDone) }() for { select { case <-a.done: return case t := <-a.chanTask: t.fn(a.context(), a) close(t.done) after() } } } // NewAgent creates a new Agent func NewAgent(config *AgentConfig) (*Agent, error) { //nolint:gocognit var err error if config.PortMax < config.PortMin { return nil, ErrPort } mDNSName := config.MulticastDNSHostName if mDNSName == "" { if mDNSName, err = generateMulticastDNSName(); err != nil { return nil, err } } if !strings.HasSuffix(mDNSName, ".local") || len(strings.Split(mDNSName, ".")) != 2 { return nil, ErrInvalidMulticastDNSHostName } mDNSMode := config.MulticastDNSMode if mDNSMode == 0 { mDNSMode = MulticastDNSModeQueryOnly } loggerFactory := config.LoggerFactory if loggerFactory == nil { loggerFactory = logging.NewDefaultLoggerFactory() } log := loggerFactory.NewLogger("ice") startedCtx, startedFn := context.WithCancel(context.Background()) a := &Agent{ chanTask: make(chan task), chanState: make(chan ConnectionState), chanCandidate: make(chan Candidate), chanCandidatePair: make(chan *CandidatePair), tieBreaker: globalMathRandomGenerator.Uint64(), lite: config.Lite, gatheringState: GatheringStateNew, connectionState: ConnectionStateNew, localCandidates: make(map[NetworkType][]Candidate), remoteCandidates: make(map[NetworkType][]Candidate), urls: config.Urls, networkTypes: config.NetworkTypes, onConnected: make(chan struct{}), buf: packetio.NewBuffer(), done: make(chan struct{}), taskLoopDone: make(chan struct{}), startedCh: startedCtx.Done(), startedFn: startedFn, portMin: config.PortMin, portMax: config.PortMax, loggerFactory: loggerFactory, log: log, net: config.Net, proxyDialer: config.ProxyDialer, mDNSMode: mDNSMode, mDNSName: mDNSName, gatherCandidateCancel: func() {}, forceCandidateContact: make(chan bool, 1), interfaceFilter: config.InterfaceFilter, ipFilter: config.IPFilter, insecureSkipVerify: config.InsecureSkipVerify, includeLoopback: config.IncludeLoopback, } a.tcpMux = config.TCPMux if a.tcpMux == nil { a.tcpMux = newInvalidTCPMux() } a.udpMux = config.UDPMux a.udpMuxSrflx = config.UDPMuxSrflx if a.net == nil { a.net, err = stdnet.NewNet() if err != nil { return nil, fmt.Errorf("failed to create network: %w", err) } } else if _, isVirtual := a.net.(*vnet.Net); isVirtual { a.log.Warn("virtual network is enabled") if a.mDNSMode != MulticastDNSModeDisabled { a.log.Warn("virtual network does not support mDNS yet") } } // Opportunistic mDNS: If we can't open the connection, that's ok: we // can continue without it. if a.mDNSConn, a.mDNSMode, err = createMulticastDNS(a.net, mDNSMode, mDNSName, log); err != nil { log.Warnf("Failed to initialize mDNS %s: %v", mDNSName, err) } closeMDNSConn := func() { if a.mDNSConn != nil { if mdnsCloseErr := a.mDNSConn.Close(); mdnsCloseErr != nil { log.Warnf("Failed to close mDNS: %v", mdnsCloseErr) } } } config.initWithDefaults(a) // Make sure the buffer doesn't grow indefinitely. // NOTE: We actually won't get anywhere close to this limit. // SRTP will constantly read from the endpoint and drop packets if it's full. a.buf.SetLimitSize(maxBufferSize) if a.lite && (len(a.candidateTypes) != 1 || a.candidateTypes[0] != CandidateTypeHost) { closeMDNSConn() return nil, ErrLiteUsingNonHostCandidates } if config.Urls != nil && len(config.Urls) > 0 && !containsCandidateType(CandidateTypeServerReflexive, a.candidateTypes) && !containsCandidateType(CandidateTypeRelay, a.candidateTypes) { closeMDNSConn() return nil, ErrUselessUrlsProvided } if err = config.initExtIPMapping(a); err != nil { closeMDNSConn() return nil, err } go a.taskLoop() a.startOnConnectionStateChangeRoutine() // Restart is also used to initialize the agent for the first time if err := a.Restart(config.LocalUfrag, config.LocalPwd); err != nil { closeMDNSConn() _ = a.Close() return nil, err } return a, nil } // OnConnectionStateChange sets a handler that is fired when the connection state changes func (a *Agent) OnConnectionStateChange(f func(ConnectionState)) error { a.onConnectionStateChangeHdlr.Store(f) return nil } // OnSelectedCandidatePairChange sets a handler that is fired when the final candidate // pair is selected func (a *Agent) OnSelectedCandidatePairChange(f func(Candidate, Candidate)) error { a.onSelectedCandidatePairChangeHdlr.Store(f) return nil } // OnCandidate sets a handler that is fired when new candidates gathered. When // the gathering process complete the last candidate is nil. func (a *Agent) OnCandidate(f func(Candidate)) error { a.onCandidateHdlr.Store(f) return nil } func (a *Agent) onSelectedCandidatePairChange(p *CandidatePair) { if h, ok := a.onSelectedCandidatePairChangeHdlr.Load().(func(Candidate, Candidate)); ok { h(p.Local, p.Remote) } } func (a *Agent) onCandidate(c Candidate) { if onCandidateHdlr, ok := a.onCandidateHdlr.Load().(func(Candidate)); ok { onCandidateHdlr(c) } } func (a *Agent) onConnectionStateChange(s ConnectionState) { if hdlr, ok := a.onConnectionStateChangeHdlr.Load().(func(ConnectionState)); ok { hdlr(s) } } func (a *Agent) startOnConnectionStateChangeRoutine() { go func() { for { // CandidatePair and ConnectionState are usually changed at once. // Blocking one by the other one causes deadlock. p, isOpen := <-a.chanCandidatePair if !isOpen { return } a.onSelectedCandidatePairChange(p) } }() go func() { for { select { case s, isOpen := <-a.chanState: if !isOpen { for c := range a.chanCandidate { a.onCandidate(c) } return } go a.onConnectionStateChange(s) case c, isOpen := <-a.chanCandidate: if !isOpen { for s := range a.chanState { go a.onConnectionStateChange(s) } return } a.onCandidate(c) } } }() } func (a *Agent) startConnectivityChecks(isControlling bool, remoteUfrag, remotePwd string) error { a.muHaveStarted.Lock() defer a.muHaveStarted.Unlock() select { case <-a.startedCh: return ErrMultipleStart default: } if err := a.SetRemoteCredentials(remoteUfrag, remotePwd); err != nil { //nolint:contextcheck return err } a.log.Debugf("Started agent: isControlling? %t, remoteUfrag: %q, remotePwd: %q", isControlling, remoteUfrag, remotePwd) return a.run(a.context(), func(ctx context.Context, agent *Agent) { agent.isControlling = isControlling agent.remoteUfrag = remoteUfrag agent.remotePwd = remotePwd if isControlling { a.selector = &controllingSelector{agent: a, log: a.log} } else { a.selector = &controlledSelector{agent: a, log: a.log} } if a.lite { a.selector = &liteSelector{pairCandidateSelector: a.selector} } a.selector.Start() a.startedFn() agent.updateConnectionState(ConnectionStateChecking) a.requestConnectivityCheck() go a.connectivityChecks() //nolint:contextcheck }) } func (a *Agent) connectivityChecks() { lastConnectionState := ConnectionState(0) checkingDuration := time.Time{} contact := func() { if err := a.run(a.context(), func(ctx context.Context, a *Agent) { defer func() { lastConnectionState = a.connectionState }() switch a.connectionState { case ConnectionStateFailed: // The connection is currently failed so don't send any checks // In the future it may be restarted though return case ConnectionStateChecking: // We have just entered checking for the first time so update our checking timer if lastConnectionState != a.connectionState { checkingDuration = time.Now() } // We have been in checking longer then Disconnect+Failed timeout, set the connection to Failed if time.Since(checkingDuration) > a.disconnectedTimeout+a.failedTimeout { a.updateConnectionState(ConnectionStateFailed) return } } a.selector.ContactCandidates() }); err != nil { a.log.Warnf("taskLoop failed: %v", err) } } for { interval := defaultKeepaliveInterval updateInterval := func(x time.Duration) { if x != 0 && (interval == 0 || interval > x) { interval = x } } switch lastConnectionState { case ConnectionStateNew, ConnectionStateChecking: // While connecting, check candidates more frequently updateInterval(a.checkInterval) case ConnectionStateConnected, ConnectionStateDisconnected: updateInterval(a.keepaliveInterval) default: } // Ensure we run our task loop as quickly as the minimum of our various configured timeouts updateInterval(a.disconnectedTimeout) updateInterval(a.failedTimeout) t := time.NewTimer(interval) select { case <-a.forceCandidateContact: t.Stop() contact() case <-t.C: contact() case <-a.done: t.Stop() return } } } func (a *Agent) updateConnectionState(newState ConnectionState) { if a.connectionState != newState { // Connection has gone to failed, release all gathered candidates if newState == ConnectionStateFailed { a.deleteAllCandidates() } a.log.Infof("Setting new connection state: %s", newState) a.connectionState = newState // Call handler after finishing current task since we may be holding the agent lock // and the handler may also require it a.afterRun(func(ctx context.Context) { a.chanState <- newState }) } } func (a *Agent) setSelectedPair(p *CandidatePair) { if p == nil { var nilPair *CandidatePair a.selectedPair.Store(nilPair) a.log.Tracef("Unset selected candidate pair") return } p.nominated = true a.selectedPair.Store(p) a.log.Tracef("Set selected candidate pair: %s", p) a.updateConnectionState(ConnectionStateConnected) // Notify when the selected pair changes a.afterRun(func(ctx context.Context) { select { case a.chanCandidatePair <- p: case <-ctx.Done(): } }) // Signal connected a.onConnectedOnce.Do(func() { close(a.onConnected) }) } func (a *Agent) pingAllCandidates() { a.log.Trace("pinging all candidates") if len(a.checklist) == 0 { a.log.Warn("pingAllCandidates called with no candidate pairs. Connection is not possible yet.") } for _, p := range a.checklist { if p.state == CandidatePairStateWaiting { p.state = CandidatePairStateInProgress } else if p.state != CandidatePairStateInProgress { continue } if p.bindingRequestCount > a.maxBindingRequests { a.log.Tracef("max requests reached for pair %s, marking it as failed", p) p.state = CandidatePairStateFailed } else { a.selector.PingCandidate(p.Local, p.Remote) p.bindingRequestCount++ } } } func (a *Agent) getBestAvailableCandidatePair() *CandidatePair { var best *CandidatePair for _, p := range a.checklist { if p.state == CandidatePairStateFailed { continue } if best == nil { best = p } else if best.priority() < p.priority() { best = p } } return best } func (a *Agent) getBestValidCandidatePair() *CandidatePair { var best *CandidatePair for _, p := range a.checklist { if p.state != CandidatePairStateSucceeded { continue } if best == nil { best = p } else if best.priority() < p.priority() { best = p } } return best } func (a *Agent) addPair(local, remote Candidate) *CandidatePair { p := newCandidatePair(local, remote, a.isControlling) a.checklist = append(a.checklist, p) return p } func (a *Agent) findPair(local, remote Candidate) *CandidatePair { for _, p := range a.checklist { if p.Local.Equal(local) && p.Remote.Equal(remote) { return p } } return nil } // validateSelectedPair checks if the selected pair is (still) valid // Note: the caller should hold the agent lock. func (a *Agent) validateSelectedPair() bool { selectedPair := a.getSelectedPair() if selectedPair == nil { return false } disconnectedTime := time.Since(selectedPair.Remote.LastReceived()) // Only allow transitions to failed if a.failedTimeout is non-zero totalTimeToFailure := a.failedTimeout if totalTimeToFailure != 0 { totalTimeToFailure += a.disconnectedTimeout } switch { case totalTimeToFailure != 0 && disconnectedTime > totalTimeToFailure: a.updateConnectionState(ConnectionStateFailed) case a.disconnectedTimeout != 0 && disconnectedTime > a.disconnectedTimeout: a.updateConnectionState(ConnectionStateDisconnected) default: a.updateConnectionState(ConnectionStateConnected) } return true } // checkKeepalive sends STUN Binding Indications to the selected pair // if no packet has been sent on that pair in the last keepaliveInterval // Note: the caller should hold the agent lock. func (a *Agent) checkKeepalive() { selectedPair := a.getSelectedPair() if selectedPair == nil { return } if (a.keepaliveInterval != 0) && ((time.Since(selectedPair.Local.LastSent()) > a.keepaliveInterval) || (time.Since(selectedPair.Remote.LastReceived()) > a.keepaliveInterval)) { // we use binding request instead of indication to support refresh consent schemas // see https://tools.ietf.org/html/rfc7675 a.selector.PingCandidate(selectedPair.Local, selectedPair.Remote) } } // AddRemoteCandidate adds a new remote candidate func (a *Agent) AddRemoteCandidate(c Candidate) error { if c == nil { return nil } // cannot check for network yet because it might not be applied // when mDNS hostname is used. if c.TCPType() == TCPTypeActive { // TCP Candidates with TCP type active will probe server passive ones, so // no need to do anything with them. a.log.Infof("Ignoring remote candidate with tcpType active: %s", c) return nil } // If we have a mDNS Candidate lets fully resolve it before adding it locally if c.Type() == CandidateTypeHost && strings.HasSuffix(c.Address(), ".local") { if a.mDNSMode == MulticastDNSModeDisabled { a.log.Warnf("remote mDNS candidate added, but mDNS is disabled: (%s)", c.Address()) return nil } hostCandidate, ok := c.(*CandidateHost) if !ok { return ErrAddressParseFailed } go a.resolveAndAddMulticastCandidate(hostCandidate) return nil } go func() { if err := a.run(a.context(), func(ctx context.Context, agent *Agent) { agent.addRemoteCandidate(c) }); err != nil { a.log.Warnf("Failed to add remote candidate %s: %v", c.Address(), err) return } }() return nil } func (a *Agent) resolveAndAddMulticastCandidate(c *CandidateHost) { if a.mDNSConn == nil { return } _, src, err := a.mDNSConn.Query(c.context(), c.Address()) if err != nil { a.log.Warnf("Failed to discover mDNS candidate %s: %v", c.Address(), err) return } ip, ipOk := parseMulticastAnswerAddr(src) if !ipOk { a.log.Warnf("Failed to discover mDNS candidate %s: failed to parse IP", c.Address()) return } if err = c.setIP(ip); err != nil { a.log.Warnf("Failed to discover mDNS candidate %s: %v", c.Address(), err) return } if err = a.run(a.context(), func(ctx context.Context, agent *Agent) { agent.addRemoteCandidate(c) }); err != nil { a.log.Warnf("Failed to add mDNS candidate %s: %v", c.Address(), err) return } } func (a *Agent) requestConnectivityCheck() { select { case a.forceCandidateContact <- true: default: } } // addRemoteCandidate assumes you are holding the lock (must be execute using a.run) func (a *Agent) addRemoteCandidate(c Candidate) { set := a.remoteCandidates[c.NetworkType()] for _, candidate := range set { if candidate.Equal(c) { return } } set = append(set, c) a.remoteCandidates[c.NetworkType()] = set if localCandidates, ok := a.localCandidates[c.NetworkType()]; ok { for _, localCandidate := range localCandidates { a.addPair(localCandidate, c) } } a.requestConnectivityCheck() } func (a *Agent) addCandidate(ctx context.Context, c Candidate, candidateConn net.PacketConn) error { return a.run(ctx, func(ctx context.Context, agent *Agent) { set := a.localCandidates[c.NetworkType()] for _, candidate := range set { if candidate.Equal(c) { a.log.Debugf("Ignore duplicate candidate: %s", c.String()) if err := c.close(); err != nil { a.log.Warnf("Failed to close duplicate candidate: %v", err) } if err := candidateConn.Close(); err != nil { a.log.Warnf("Failed to close duplicate candidate connection: %v", err) } return } } c.start(a, candidateConn, a.startedCh) set = append(set, c) a.localCandidates[c.NetworkType()] = set if remoteCandidates, ok := a.remoteCandidates[c.NetworkType()]; ok { for _, remoteCandidate := range remoteCandidates { a.addPair(c, remoteCandidate) } } a.requestConnectivityCheck() a.chanCandidate <- c }) } // GetLocalCandidates returns the local candidates func (a *Agent) GetLocalCandidates() ([]Candidate, error) { var res []Candidate err := a.run(a.context(), func(ctx context.Context, agent *Agent) { var candidates []Candidate for _, set := range agent.localCandidates { candidates = append(candidates, set...) } res = candidates }) if err != nil { return nil, err } return res, nil } // GetLocalUserCredentials returns the local user credentials func (a *Agent) GetLocalUserCredentials() (frag string, pwd string, err error) { valSet := make(chan struct{}) err = a.run(a.context(), func(ctx context.Context, agent *Agent) { frag = agent.localUfrag pwd = agent.localPwd close(valSet) }) if err == nil { <-valSet } return } // GetRemoteUserCredentials returns the remote user credentials func (a *Agent) GetRemoteUserCredentials() (frag string, pwd string, err error) { valSet := make(chan struct{}) err = a.run(a.context(), func(ctx context.Context, agent *Agent) { frag = agent.remoteUfrag pwd = agent.remotePwd close(valSet) }) if err == nil { <-valSet } return } func (a *Agent) removeUfragFromMux() { a.tcpMux.RemoveConnByUfrag(a.localUfrag) if a.udpMux != nil { a.udpMux.RemoveConnByUfrag(a.localUfrag) } if a.udpMuxSrflx != nil { a.udpMuxSrflx.RemoveConnByUfrag(a.localUfrag) } } // Close cleans up the Agent func (a *Agent) Close() error { if err := a.ok(); err != nil { return err } a.afterRun(func(context.Context) { a.gatherCandidateCancel() if a.gatherCandidateDone != nil { <-a.gatherCandidateDone } }) a.err.Store(ErrClosed) a.removeUfragFromMux() close(a.done) <-a.taskLoopDone return nil } // Remove all candidates. This closes any listening sockets // and removes both the local and remote candidate lists. // // This is used for restarts, failures and on close func (a *Agent) deleteAllCandidates() { for net, cs := range a.localCandidates { for _, c := range cs { if err := c.close(); err != nil { a.log.Warnf("Failed to close candidate %s: %v", c, err) } } delete(a.localCandidates, net) } for net, cs := range a.remoteCandidates { for _, c := range cs { if err := c.close(); err != nil { a.log.Warnf("Failed to close candidate %s: %v", c, err) } } delete(a.remoteCandidates, net) } } func (a *Agent) findRemoteCandidate(networkType NetworkType, addr net.Addr) Candidate { ip, port, _, ok := parseAddr(addr) if !ok { a.log.Warnf("Error parsing addr: %s", addr) return nil } set := a.remoteCandidates[networkType] for _, c := range set { if c.Address() == ip.String() && c.Port() == port { return c } } return nil } func (a *Agent) sendBindingRequest(m *stun.Message, local, remote Candidate) { a.log.Tracef("ping STUN from %s to %s", local.String(), remote.String()) a.invalidatePendingBindingRequests(time.Now()) a.pendingBindingRequests = append(a.pendingBindingRequests, bindingRequest{ timestamp: time.Now(), transactionID: m.TransactionID, destination: remote.addr(), isUseCandidate: m.Contains(stun.AttrUseCandidate), }) a.sendSTUN(m, local, remote) } func (a *Agent) sendBindingSuccess(m *stun.Message, local, remote Candidate) { base := remote ip, port, _, ok := parseAddr(base.addr()) if !ok { a.log.Warnf("Error parsing addr: %s", base.addr()) return } if out, err := stun.Build(m, stun.BindingSuccess, &stun.XORMappedAddress{ IP: ip, Port: port, }, stun.NewShortTermIntegrity(a.localPwd), stun.Fingerprint, ); err != nil { a.log.Warnf("Failed to handle inbound ICE from: %s to: %s error: %s", local, remote, err) } else { a.sendSTUN(out, local, remote) } } // Removes pending binding requests that are over maxBindingRequestTimeout old // // Let HTO be the transaction timeout, which SHOULD be 2*RTT if // RTT is known or 500 ms otherwise. // https://tools.ietf.org/html/rfc8445#appendix-B.1 func (a *Agent) invalidatePendingBindingRequests(filterTime time.Time) { initialSize := len(a.pendingBindingRequests) temp := a.pendingBindingRequests[:0] for _, bindingRequest := range a.pendingBindingRequests { if filterTime.Sub(bindingRequest.timestamp) < maxBindingRequestTimeout { temp = append(temp, bindingRequest) } } a.pendingBindingRequests = temp if bindRequestsRemoved := initialSize - len(a.pendingBindingRequests); bindRequestsRemoved > 0 { a.log.Tracef("Discarded %d binding requests because they expired", bindRequestsRemoved) } } // Assert that the passed TransactionID is in our pendingBindingRequests and returns the destination // If the bindingRequest was valid remove it from our pending cache func (a *Agent) handleInboundBindingSuccess(id [stun.TransactionIDSize]byte) (bool, *bindingRequest) { a.invalidatePendingBindingRequests(time.Now()) for i := range a.pendingBindingRequests { if a.pendingBindingRequests[i].transactionID == id { validBindingRequest := a.pendingBindingRequests[i] a.pendingBindingRequests = append(a.pendingBindingRequests[:i], a.pendingBindingRequests[i+1:]...) return true, &validBindingRequest } } return false, nil } // handleInbound processes STUN traffic from a remote candidate func (a *Agent) handleInbound(m *stun.Message, local Candidate, remote net.Addr) { //nolint:gocognit var err error if m == nil || local == nil { return } if m.Type.Method != stun.MethodBinding || !(m.Type.Class == stun.ClassSuccessResponse || m.Type.Class == stun.ClassRequest || m.Type.Class == stun.ClassIndication) { a.log.Tracef("unhandled STUN from %s to %s class(%s) method(%s)", remote, local, m.Type.Class, m.Type.Method) return } if a.isControlling { if m.Contains(stun.AttrICEControlling) { a.log.Debug("inbound isControlling && a.isControlling == true") return } else if m.Contains(stun.AttrUseCandidate) { a.log.Debug("useCandidate && a.isControlling == true") return } } else { if m.Contains(stun.AttrICEControlled) { a.log.Debug("inbound isControlled && a.isControlling == false") return } } remoteCandidate := a.findRemoteCandidate(local.NetworkType(), remote) if m.Type.Class == stun.ClassSuccessResponse { if err = stun.MessageIntegrity([]byte(a.remotePwd)).Check(m); err != nil { a.log.Warnf("discard message from (%s), %v", remote, err) return } if remoteCandidate == nil { a.log.Warnf("discard success message from (%s), no such remote", remote) return } a.selector.HandleSuccessResponse(m, local, remoteCandidate, remote) } else if m.Type.Class == stun.ClassRequest { if err = stunx.AssertUsername(m, a.localUfrag+":"+a.remoteUfrag); err != nil { a.log.Warnf("discard message from (%s), %v", remote, err) return } else if err = stun.MessageIntegrity([]byte(a.localPwd)).Check(m); err != nil { a.log.Warnf("discard message from (%s), %v", remote, err) return } if remoteCandidate == nil { ip, port, networkType, ok := parseAddr(remote) if !ok { a.log.Errorf("Failed to create parse remote net.Addr when creating remote prflx candidate") return } prflxCandidateConfig := CandidatePeerReflexiveConfig{ Network: networkType.String(), Address: ip.String(), Port: port, Component: local.Component(), RelAddr: "", RelPort: 0, } prflxCandidate, err := NewCandidatePeerReflexive(&prflxCandidateConfig) if err != nil { a.log.Errorf("Failed to create new remote prflx candidate (%s)", err) return } remoteCandidate = prflxCandidate a.log.Debugf("adding a new peer-reflexive candidate: %s ", remote) a.addRemoteCandidate(remoteCandidate) } a.log.Tracef("inbound STUN (Request) from %s to %s", remote.String(), local.String()) a.selector.HandleBindingRequest(m, local, remoteCandidate) } if remoteCandidate != nil { remoteCandidate.seen(false) } } // validateNonSTUNTraffic processes non STUN traffic from a remote candidate, // and returns true if it is an actual remote candidate func (a *Agent) validateNonSTUNTraffic(local Candidate, remote net.Addr) bool { var isValidCandidate uint64 if err := a.run(local.context(), func(ctx context.Context, agent *Agent) { remoteCandidate := a.findRemoteCandidate(local.NetworkType(), remote) if remoteCandidate != nil { remoteCandidate.seen(false) atomic.AddUint64(&isValidCandidate, 1) } }); err != nil { a.log.Warnf("failed to validate remote candidate: %v", err) } return atomic.LoadUint64(&isValidCandidate) == 1 } // GetSelectedCandidatePair returns the selected pair or nil if there is none func (a *Agent) GetSelectedCandidatePair() (*CandidatePair, error) { selectedPair := a.getSelectedPair() if selectedPair == nil { return nil, nil //nolint:nilnil } local, err := selectedPair.Local.copy() if err != nil { return nil, err } remote, err := selectedPair.Remote.copy() if err != nil { return nil, err } return &CandidatePair{Local: local, Remote: remote}, nil } func (a *Agent) getSelectedPair() *CandidatePair { if selectedPair, ok := a.selectedPair.Load().(*CandidatePair); ok { return selectedPair } return nil } func (a *Agent) closeMulticastConn() { if a.mDNSConn != nil { if err := a.mDNSConn.Close(); err != nil { a.log.Warnf("failed to close mDNS Conn: %v", err) } } } // SetRemoteCredentials sets the credentials of the remote agent func (a *Agent) SetRemoteCredentials(remoteUfrag, remotePwd string) error { switch { case remoteUfrag == "": return ErrRemoteUfragEmpty case remotePwd == "": return ErrRemotePwdEmpty } return a.run(a.context(), func(ctx context.Context, agent *Agent) { agent.remoteUfrag = remoteUfrag agent.remotePwd = remotePwd }) } // Restart restarts the ICE Agent with the provided ufrag/pwd // If no ufrag/pwd is provided the Agent will generate one itself // // If there is a gatherer routine currently running, Restart will // cancel it. // After a Restart, the user must then call GatherCandidates explicitly // to start generating new ones. func (a *Agent) Restart(ufrag, pwd string) error { if ufrag == "" { var err error ufrag, err = generateUFrag() if err != nil { return err } } if pwd == "" { var err error pwd, err = generatePwd() if err != nil { return err } } if len([]rune(ufrag))*8 < 24 { return ErrLocalUfragInsufficientBits } if len([]rune(pwd))*8 < 128 { return ErrLocalPwdInsufficientBits } var err error if runErr := a.run(a.context(), func(ctx context.Context, agent *Agent) { if agent.gatheringState == GatheringStateGathering { agent.gatherCandidateCancel() } // Clear all agent needed to take back to fresh state a.removeUfragFromMux() agent.localUfrag = ufrag agent.localPwd = pwd agent.remoteUfrag = "" agent.remotePwd = "" a.gatheringState = GatheringStateNew a.checklist = make([]*CandidatePair, 0) a.pendingBindingRequests = make([]bindingRequest, 0) a.setSelectedPair(nil) a.deleteAllCandidates() if a.selector != nil { a.selector.Start() } // Restart is used by NewAgent. Accept/Connect should be used to move to checking // for new Agents if a.connectionState != ConnectionStateNew { a.updateConnectionState(ConnectionStateChecking) } }); runErr != nil { return runErr } return err } func (a *Agent) setGatheringState(newState GatheringState) error { done := make(chan struct{}) if err := a.run(a.context(), func(ctx context.Context, agent *Agent) { if a.gatheringState != newState && newState == GatheringStateComplete { a.chanCandidate <- nil } a.gatheringState = newState close(done) }); err != nil { return err } <-done return nil } ice-2.3.1/agent_config.go000066400000000000000000000227551437620344400152510ustar00rootroot00000000000000package ice import ( "net" "time" "github.com/pion/logging" "github.com/pion/transport/v2" "golang.org/x/net/proxy" ) const ( // defaultCheckInterval is the interval at which the agent performs candidate checks in the connecting phase defaultCheckInterval = 200 * time.Millisecond // keepaliveInterval used to keep candidates alive defaultKeepaliveInterval = 2 * time.Second // defaultDisconnectedTimeout is the default time till an Agent transitions disconnected defaultDisconnectedTimeout = 5 * time.Second // defaultFailedTimeout is the default time till an Agent transitions to failed after disconnected defaultFailedTimeout = 25 * time.Second // wait time before nominating a host candidate defaultHostAcceptanceMinWait = 0 // wait time before nominating a srflx candidate defaultSrflxAcceptanceMinWait = 500 * time.Millisecond // wait time before nominating a prflx candidate defaultPrflxAcceptanceMinWait = 1000 * time.Millisecond // wait time before nominating a relay candidate defaultRelayAcceptanceMinWait = 2000 * time.Millisecond // max binding request before considering a pair failed defaultMaxBindingRequests = 7 // the number of bytes that can be buffered before we start to error maxBufferSize = 1000 * 1000 // 1MB // wait time before binding requests can be deleted maxBindingRequestTimeout = 4000 * time.Millisecond ) func defaultCandidateTypes() []CandidateType { return []CandidateType{CandidateTypeHost, CandidateTypeServerReflexive, CandidateTypeRelay} } // AgentConfig collects the arguments to ice.Agent construction into // a single structure, for future-proofness of the interface type AgentConfig struct { Urls []*URL // PortMin and PortMax are optional. Leave them 0 for the default UDP port allocation strategy. PortMin uint16 PortMax uint16 // LocalUfrag and LocalPwd values used to perform connectivity // checks. The values MUST be unguessable, with at least 128 bits of // random number generator output used to generate the password, and // at least 24 bits of output to generate the username fragment. LocalUfrag string LocalPwd string // MulticastDNSMode controls mDNS behavior for the ICE agent MulticastDNSMode MulticastDNSMode // MulticastDNSHostName controls the hostname for this agent. If none is specified a random one will be generated MulticastDNSHostName string // DisconnectedTimeout defaults to 5 seconds when this property is nil. // If the duration is 0, the ICE Agent will never go to disconnected DisconnectedTimeout *time.Duration // FailedTimeout defaults to 25 seconds when this property is nil. // If the duration is 0, we will never go to failed. FailedTimeout *time.Duration // KeepaliveInterval determines how often should we send ICE // keepalives (should be less then connectiontimeout above) // when this is nil, it defaults to 10 seconds. // A keepalive interval of 0 means we never send keepalive packets KeepaliveInterval *time.Duration // CheckInterval controls how often our task loop runs when in the // connecting state. CheckInterval *time.Duration // NetworkTypes is an optional configuration for disabling or enabling // support for specific network types. NetworkTypes []NetworkType // CandidateTypes is an optional configuration for disabling or enabling // support for specific candidate types. CandidateTypes []CandidateType LoggerFactory logging.LoggerFactory // MaxBindingRequests is the max amount of binding requests the agent will send // over a candidate pair for validation or nomination, if after MaxBindingRequests // the candidate is yet to answer a binding request or a nomination we set the pair as failed MaxBindingRequests *uint16 // Lite agents do not perform connectivity check and only provide host candidates. Lite bool // NAT1To1IPCandidateType is used along with NAT1To1IPs to specify which candidate type // the 1:1 NAT IP addresses should be mapped to. // If unspecified or CandidateTypeHost, NAT1To1IPs are used to replace host candidate IPs. // If CandidateTypeServerReflexive, it will insert a srflx candidate (as if it was derived // from a STUN server) with its port number being the one for the actual host candidate. // Other values will result in an error. NAT1To1IPCandidateType CandidateType // NAT1To1IPs contains a list of public IP addresses that are to be used as a host // candidate or srflx candidate. This is used typically for servers that are behind // 1:1 D-NAT (e.g. AWS EC2 instances) and to eliminate the need of server reflexive // candidate gathering. NAT1To1IPs []string // HostAcceptanceMinWait specify a minimum wait time before selecting host candidates HostAcceptanceMinWait *time.Duration // HostAcceptanceMinWait specify a minimum wait time before selecting srflx candidates SrflxAcceptanceMinWait *time.Duration // HostAcceptanceMinWait specify a minimum wait time before selecting prflx candidates PrflxAcceptanceMinWait *time.Duration // HostAcceptanceMinWait specify a minimum wait time before selecting relay candidates RelayAcceptanceMinWait *time.Duration // Net is the our abstracted network interface for internal development purpose only // (see https://github.com/pion/transport) Net transport.Net // InterfaceFilter is a function that you can use in order to whitelist or blacklist // the interfaces which are used to gather ICE candidates. InterfaceFilter func(string) bool // IPFilter is a function that you can use in order to whitelist or blacklist // the ips which are used to gather ICE candidates. IPFilter func(net.IP) bool // InsecureSkipVerify controls if self-signed certificates are accepted when connecting // to TURN servers via TLS or DTLS InsecureSkipVerify bool // TCPMux will be used for multiplexing incoming TCP connections for ICE TCP. // Currently only passive candidates are supported. This functionality is // experimental and the API might change in the future. TCPMux TCPMux // UDPMux is used for multiplexing multiple incoming UDP connections on a single port // when this is set, the agent ignores PortMin and PortMax configurations and will // defer to UDPMux for incoming connections UDPMux UDPMux // UDPMuxSrflx is used for multiplexing multiple incoming UDP connections of server reflexive candidates // on a single port when this is set, the agent ignores PortMin and PortMax configurations and will // defer to UDPMuxSrflx for incoming connections // It embeds UDPMux to do the actual connection multiplexing UDPMuxSrflx UniversalUDPMux // Proxy Dialer is a dialer that should be implemented by the user based on golang.org/x/net/proxy // dial interface in order to support corporate proxies ProxyDialer proxy.Dialer // Deprecated: AcceptAggressiveNomination always enabled. AcceptAggressiveNomination bool // Include loopback addresses in the candidate list. IncludeLoopback bool } // initWithDefaults populates an agent and falls back to defaults if fields are unset func (config *AgentConfig) initWithDefaults(a *Agent) { if config.MaxBindingRequests == nil { a.maxBindingRequests = defaultMaxBindingRequests } else { a.maxBindingRequests = *config.MaxBindingRequests } if config.HostAcceptanceMinWait == nil { a.hostAcceptanceMinWait = defaultHostAcceptanceMinWait } else { a.hostAcceptanceMinWait = *config.HostAcceptanceMinWait } if config.SrflxAcceptanceMinWait == nil { a.srflxAcceptanceMinWait = defaultSrflxAcceptanceMinWait } else { a.srflxAcceptanceMinWait = *config.SrflxAcceptanceMinWait } if config.PrflxAcceptanceMinWait == nil { a.prflxAcceptanceMinWait = defaultPrflxAcceptanceMinWait } else { a.prflxAcceptanceMinWait = *config.PrflxAcceptanceMinWait } if config.RelayAcceptanceMinWait == nil { a.relayAcceptanceMinWait = defaultRelayAcceptanceMinWait } else { a.relayAcceptanceMinWait = *config.RelayAcceptanceMinWait } if config.DisconnectedTimeout == nil { a.disconnectedTimeout = defaultDisconnectedTimeout } else { a.disconnectedTimeout = *config.DisconnectedTimeout } if config.FailedTimeout == nil { a.failedTimeout = defaultFailedTimeout } else { a.failedTimeout = *config.FailedTimeout } if config.KeepaliveInterval == nil { a.keepaliveInterval = defaultKeepaliveInterval } else { a.keepaliveInterval = *config.KeepaliveInterval } if config.CheckInterval == nil { a.checkInterval = defaultCheckInterval } else { a.checkInterval = *config.CheckInterval } if config.CandidateTypes == nil || len(config.CandidateTypes) == 0 { a.candidateTypes = defaultCandidateTypes() } else { a.candidateTypes = config.CandidateTypes } } func (config *AgentConfig) initExtIPMapping(a *Agent) error { var err error a.extIPMapper, err = newExternalIPMapper(config.NAT1To1IPCandidateType, config.NAT1To1IPs) if err != nil { return err } if a.extIPMapper == nil { return nil // this may happen when config.NAT1To1IPs is an empty array } if a.extIPMapper.candidateType == CandidateTypeHost { if a.mDNSMode == MulticastDNSModeQueryAndGather { return ErrMulticastDNSWithNAT1To1IPMapping } candiHostEnabled := false for _, candiType := range a.candidateTypes { if candiType == CandidateTypeHost { candiHostEnabled = true break } } if !candiHostEnabled { return ErrIneffectiveNAT1To1IPMappingHost } } else if a.extIPMapper.candidateType == CandidateTypeServerReflexive { candiSrflxEnabled := false for _, candiType := range a.candidateTypes { if candiType == CandidateTypeServerReflexive { candiSrflxEnabled = true break } } if !candiSrflxEnabled { return ErrIneffectiveNAT1To1IPMappingSrflx } } return nil } ice-2.3.1/agent_get_best_available_candidate_pair_test.go000066400000000000000000000007561437620344400236430ustar00rootroot00000000000000//go:build !js // +build !js package ice import ( "testing" "github.com/stretchr/testify/require" ) func TestNoBestAvailableCandidatePairAfterAgentConstruction(t *testing.T) { agent := setupTest(t) require.Nil(t, agent.getBestAvailableCandidatePair()) tearDownTest(t, agent) } func setupTest(t *testing.T) *Agent { agent, err := NewAgent(&AgentConfig{}) require.NoError(t, err) return agent } func tearDownTest(t *testing.T, agent *Agent) { require.NoError(t, agent.Close()) } ice-2.3.1/agent_get_best_valid_candidate_pair_test.go000066400000000000000000000057361437620344400230250ustar00rootroot00000000000000//go:build !js // +build !js package ice import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestAgentGetBestValidCandidatePair(t *testing.T) { f := setupTestAgentGetBestValidCandidatePair(t) remoteCandidatesFromLowestPriorityToHighest := []Candidate{f.relayRemote, f.srflxRemote, f.prflxRemote, f.hostRemote} for _, remoteCandidate := range remoteCandidatesFromLowestPriorityToHighest { candidatePair := f.sut.addPair(f.hostLocal, remoteCandidate) candidatePair.state = CandidatePairStateSucceeded actualBestPair := f.sut.getBestValidCandidatePair() expectedBestPair := &CandidatePair{Remote: remoteCandidate, Local: f.hostLocal} require.Equal(t, actualBestPair.String(), expectedBestPair.String()) } assert.NoError(t, f.sut.Close()) } func setupTestAgentGetBestValidCandidatePair(t *testing.T) *TestAgentGetBestValidCandidatePairFixture { fixture := new(TestAgentGetBestValidCandidatePairFixture) fixture.hostLocal = newHostLocal(t) fixture.relayRemote = newRelayRemote(t) fixture.srflxRemote = newSrflxRemote(t) fixture.prflxRemote = newPrflxRemote(t) fixture.hostRemote = newHostRemote(t) agent, err := NewAgent(&AgentConfig{}) require.NoError(t, err) fixture.sut = agent return fixture } type TestAgentGetBestValidCandidatePairFixture struct { sut *Agent hostLocal Candidate relayRemote Candidate srflxRemote Candidate prflxRemote Candidate hostRemote Candidate } func newHostRemote(t *testing.T) *CandidateHost { remoteHostConfig := &CandidateHostConfig{ Network: "udp", Address: "1.2.3.5", Port: 12350, Component: 1, } hostRemote, err := NewCandidateHost(remoteHostConfig) require.NoError(t, err) return hostRemote } func newPrflxRemote(t *testing.T) *CandidatePeerReflexive { prflxConfig := &CandidatePeerReflexiveConfig{ Network: "udp", Address: "10.10.10.2", Port: 19217, Component: 1, RelAddr: "4.3.2.1", RelPort: 43211, } prflxRemote, err := NewCandidatePeerReflexive(prflxConfig) require.NoError(t, err) return prflxRemote } func newSrflxRemote(t *testing.T) *CandidateServerReflexive { srflxConfig := &CandidateServerReflexiveConfig{ Network: "udp", Address: "10.10.10.2", Port: 19218, Component: 1, RelAddr: "4.3.2.1", RelPort: 43212, } srflxRemote, err := NewCandidateServerReflexive(srflxConfig) require.NoError(t, err) return srflxRemote } func newRelayRemote(t *testing.T) *CandidateRelay { relayConfig := &CandidateRelayConfig{ Network: "udp", Address: "1.2.3.4", Port: 12340, Component: 1, RelAddr: "4.3.2.1", RelPort: 43210, } relayRemote, err := NewCandidateRelay(relayConfig) require.NoError(t, err) return relayRemote } func newHostLocal(t *testing.T) *CandidateHost { localHostConfig := &CandidateHostConfig{ Network: "udp", Address: "192.168.1.1", Port: 19216, Component: 1, } hostLocal, err := NewCandidateHost(localHostConfig) require.NoError(t, err) return hostLocal } ice-2.3.1/agent_stats.go000066400000000000000000000066721437620344400151420ustar00rootroot00000000000000package ice import ( "context" "time" ) // GetCandidatePairsStats returns a list of candidate pair stats func (a *Agent) GetCandidatePairsStats() []CandidatePairStats { var res []CandidatePairStats err := a.run(a.context(), func(ctx context.Context, agent *Agent) { result := make([]CandidatePairStats, 0, len(agent.checklist)) for _, cp := range agent.checklist { stat := CandidatePairStats{ Timestamp: time.Now(), LocalCandidateID: cp.Local.ID(), RemoteCandidateID: cp.Remote.ID(), State: cp.state, Nominated: cp.nominated, // PacketsSent uint32 // PacketsReceived uint32 // BytesSent uint64 // BytesReceived uint64 // LastPacketSentTimestamp time.Time // LastPacketReceivedTimestamp time.Time // FirstRequestTimestamp time.Time // LastRequestTimestamp time.Time // LastResponseTimestamp time.Time // TotalRoundTripTime float64 // CurrentRoundTripTime float64 // AvailableOutgoingBitrate float64 // AvailableIncomingBitrate float64 // CircuitBreakerTriggerCount uint32 // RequestsReceived uint64 // RequestsSent uint64 // ResponsesReceived uint64 // ResponsesSent uint64 // RetransmissionsReceived uint64 // RetransmissionsSent uint64 // ConsentRequestsSent uint64 // ConsentExpiredTimestamp time.Time } result = append(result, stat) } res = result }) if err != nil { a.log.Errorf("error getting candidate pairs stats %v", err) return []CandidatePairStats{} } return res } // GetLocalCandidatesStats returns a list of local candidates stats func (a *Agent) GetLocalCandidatesStats() []CandidateStats { var res []CandidateStats err := a.run(a.context(), func(ctx context.Context, agent *Agent) { result := make([]CandidateStats, 0, len(agent.localCandidates)) for networkType, localCandidates := range agent.localCandidates { for _, c := range localCandidates { relayProtocol := "" if c.Type() == CandidateTypeRelay { if cRelay, ok := c.(*CandidateRelay); ok { relayProtocol = cRelay.RelayProtocol() } } stat := CandidateStats{ Timestamp: time.Now(), ID: c.ID(), NetworkType: networkType, IP: c.Address(), Port: c.Port(), CandidateType: c.Type(), Priority: c.Priority(), // URL string RelayProtocol: relayProtocol, // Deleted bool } result = append(result, stat) } } res = result }) if err != nil { a.log.Errorf("error getting candidate pairs stats %v", err) return []CandidateStats{} } return res } // GetRemoteCandidatesStats returns a list of remote candidates stats func (a *Agent) GetRemoteCandidatesStats() []CandidateStats { var res []CandidateStats err := a.run(a.context(), func(ctx context.Context, agent *Agent) { result := make([]CandidateStats, 0, len(agent.remoteCandidates)) for networkType, remoteCandidates := range agent.remoteCandidates { for _, c := range remoteCandidates { stat := CandidateStats{ Timestamp: time.Now(), ID: c.ID(), NetworkType: networkType, IP: c.Address(), Port: c.Port(), CandidateType: c.Type(), Priority: c.Priority(), // URL string RelayProtocol: "", } result = append(result, stat) } } res = result }) if err != nil { a.log.Errorf("error getting candidate pairs stats %v", err) return []CandidateStats{} } return res } ice-2.3.1/agent_test.go000066400000000000000000001344311437620344400147560ustar00rootroot00000000000000//go:build !js // +build !js package ice import ( "context" "errors" "fmt" "net" "strconv" "sync" "testing" "time" "github.com/pion/logging" "github.com/pion/stun" "github.com/pion/transport/v2/test" "github.com/pion/transport/v2/vnet" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type mockPacketConn struct{} func (m *mockPacketConn) ReadFrom(p []byte) (n int, addr net.Addr, err error) { return 0, nil, nil } func (m *mockPacketConn) WriteTo(p []byte, addr net.Addr) (n int, err error) { return 0, nil } func (m *mockPacketConn) Close() error { return nil } func (m *mockPacketConn) LocalAddr() net.Addr { return nil } func (m *mockPacketConn) SetDeadline(t time.Time) error { return nil } func (m *mockPacketConn) SetReadDeadline(t time.Time) error { return nil } func (m *mockPacketConn) SetWriteDeadline(t time.Time) error { return nil } func TestOnSelectedCandidatePairChange(t *testing.T) { report := test.CheckRoutines(t) defer report() // avoid deadlocks? defer test.TimeOut(1 * time.Second).Stop() a, err := NewAgent(&AgentConfig{}) if err != nil { t.Fatalf("Failed to create agent: %s", err) } callbackCalled := make(chan struct{}, 1) if err = a.OnSelectedCandidatePairChange(func(local, remote Candidate) { close(callbackCalled) }); err != nil { t.Fatalf("Failed to set agent OnCandidatePairChange callback: %s", err) } hostConfig := &CandidateHostConfig{ Network: "udp", Address: "192.168.1.1", Port: 19216, Component: 1, } hostLocal, err := NewCandidateHost(hostConfig) if err != nil { t.Fatalf("Failed to construct local host candidate: %s", err) } relayConfig := &CandidateRelayConfig{ Network: "udp", Address: "1.2.3.4", Port: 12340, Component: 1, RelAddr: "4.3.2.1", RelPort: 43210, } relayRemote, err := NewCandidateRelay(relayConfig) if err != nil { t.Fatalf("Failed to construct remote relay candidate: %s", err) } // select the pair if err = a.run(context.Background(), func(ctx context.Context, agent *Agent) { p := newCandidatePair(hostLocal, relayRemote, false) agent.setSelectedPair(p) }); err != nil { t.Fatalf("Failed to setValidPair(): %s", err) } // ensure that the callback fired on setting the pair <-callbackCalled assert.NoError(t, a.Close()) } type BadAddr struct{} func (ba *BadAddr) Network() string { return "xxx" } func (ba *BadAddr) String() string { return "yyy" } func runAgentTest(t *testing.T, config *AgentConfig, task func(ctx context.Context, a *Agent)) { a, err := NewAgent(config) if err != nil { t.Fatalf("Error constructing ice.Agent") } if err := a.run(context.Background(), task); err != nil { t.Fatalf("Agent run failure: %v", err) } assert.NoError(t, a.Close()) } func TestHandlePeerReflexive(t *testing.T) { report := test.CheckRoutines(t) defer report() // Limit runtime in case of deadlocks lim := test.TimeOut(time.Second * 2) defer lim.Stop() t.Run("UDP prflx candidate from handleInbound()", func(t *testing.T) { var config AgentConfig runAgentTest(t, &config, func(ctx context.Context, a *Agent) { a.selector = &controllingSelector{agent: a, log: a.log} hostConfig := CandidateHostConfig{ Network: "udp", Address: "192.168.0.2", Port: 777, Component: 1, } local, err := NewCandidateHost(&hostConfig) local.conn = &mockPacketConn{} if err != nil { t.Fatalf("failed to create a new candidate: %v", err) } remote := &net.UDPAddr{IP: net.ParseIP("172.17.0.3"), Port: 999} msg, err := stun.Build(stun.BindingRequest, stun.TransactionID, stun.NewUsername(a.localUfrag+":"+a.remoteUfrag), UseCandidate(), AttrControlling(a.tieBreaker), PriorityAttr(local.Priority()), stun.NewShortTermIntegrity(a.localPwd), stun.Fingerprint, ) if err != nil { t.Fatal(err) } a.handleInbound(msg, local, remote) // length of remote candidate list must be one now if len(a.remoteCandidates) != 1 { t.Fatal("failed to add a network type to the remote candidate list") } // length of remote candidate list for a network type must be 1 set := a.remoteCandidates[local.NetworkType()] if len(set) != 1 { t.Fatal("failed to add prflx candidate to remote candidate list") } c := set[0] if c.Type() != CandidateTypePeerReflexive { t.Fatal("candidate type must be prflx") } if c.Address() != "172.17.0.3" { t.Fatal("IP address mismatch") } if c.Port() != 999 { t.Fatal("Port number mismatch") } }) }) t.Run("Bad network type with handleInbound()", func(t *testing.T) { var config AgentConfig runAgentTest(t, &config, func(ctx context.Context, a *Agent) { a.selector = &controllingSelector{agent: a, log: a.log} hostConfig := CandidateHostConfig{ Network: "tcp", Address: "192.168.0.2", Port: 777, Component: 1, } local, err := NewCandidateHost(&hostConfig) if err != nil { t.Fatalf("failed to create a new candidate: %v", err) } remote := &BadAddr{} a.handleInbound(nil, local, remote) if len(a.remoteCandidates) != 0 { t.Fatal("bad address should not be added to the remote candidate list") } }) }) t.Run("Success from unknown remote, prflx candidate MUST only be created via Binding Request", func(t *testing.T) { var config AgentConfig runAgentTest(t, &config, func(ctx context.Context, a *Agent) { a.selector = &controllingSelector{agent: a, log: a.log} tID := [stun.TransactionIDSize]byte{} copy(tID[:], "ABC") a.pendingBindingRequests = []bindingRequest{ {time.Now(), tID, &net.UDPAddr{}, false}, } hostConfig := CandidateHostConfig{ Network: "udp", Address: "192.168.0.2", Port: 777, Component: 1, } local, err := NewCandidateHost(&hostConfig) local.conn = &mockPacketConn{} if err != nil { t.Fatalf("failed to create a new candidate: %v", err) } remote := &net.UDPAddr{IP: net.ParseIP("172.17.0.3"), Port: 999} msg, err := stun.Build(stun.BindingSuccess, stun.NewTransactionIDSetter(tID), stun.NewShortTermIntegrity(a.remotePwd), stun.Fingerprint, ) if err != nil { t.Fatal(err) } a.handleInbound(msg, local, remote) if len(a.remoteCandidates) != 0 { t.Fatal("unknown remote was able to create a candidate") } }) }) } // Assert that Agent on startup sends message, and doesn't wait for connectivityTicker to fire // github.com/pion/ice/issues/15 func TestConnectivityOnStartup(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() // Create a network with two interfaces wan, err := vnet.NewRouter(&vnet.RouterConfig{ CIDR: "0.0.0.0/0", LoggerFactory: logging.NewDefaultLoggerFactory(), }) assert.NoError(t, err) net0, err := vnet.NewNet(&vnet.NetConfig{ StaticIPs: []string{"192.168.0.1"}, }) assert.NoError(t, err) assert.NoError(t, wan.AddNet(net0)) net1, err := vnet.NewNet(&vnet.NetConfig{ StaticIPs: []string{"192.168.0.2"}, }) assert.NoError(t, err) assert.NoError(t, wan.AddNet(net1)) assert.NoError(t, wan.Start()) aNotifier, aConnected := onConnected() bNotifier, bConnected := onConnected() KeepaliveInterval := time.Hour cfg0 := &AgentConfig{ NetworkTypes: supportedNetworkTypes(), MulticastDNSMode: MulticastDNSModeDisabled, Net: net0, KeepaliveInterval: &KeepaliveInterval, CheckInterval: &KeepaliveInterval, } aAgent, err := NewAgent(cfg0) require.NoError(t, err) require.NoError(t, aAgent.OnConnectionStateChange(aNotifier)) cfg1 := &AgentConfig{ NetworkTypes: supportedNetworkTypes(), MulticastDNSMode: MulticastDNSModeDisabled, Net: net1, KeepaliveInterval: &KeepaliveInterval, CheckInterval: &KeepaliveInterval, } bAgent, err := NewAgent(cfg1) require.NoError(t, err) require.NoError(t, bAgent.OnConnectionStateChange(bNotifier)) aConn, bConn := func(aAgent, bAgent *Agent) (*Conn, *Conn) { // Manual signaling aUfrag, aPwd, err := aAgent.GetLocalUserCredentials() assert.NoError(t, err) bUfrag, bPwd, err := bAgent.GetLocalUserCredentials() assert.NoError(t, err) gatherAndExchangeCandidates(aAgent, bAgent) accepted := make(chan struct{}) accepting := make(chan struct{}) var aConn *Conn origHdlr := aAgent.onConnectionStateChangeHdlr.Load() if origHdlr != nil { defer check(aAgent.OnConnectionStateChange(origHdlr.(func(ConnectionState)))) //nolint:forcetypeassert } check(aAgent.OnConnectionStateChange(func(s ConnectionState) { if s == ConnectionStateChecking { close(accepting) } if origHdlr != nil { origHdlr.(func(ConnectionState))(s) //nolint:forcetypeassert } })) go func() { var acceptErr error aConn, acceptErr = aAgent.Accept(context.TODO(), bUfrag, bPwd) check(acceptErr) close(accepted) }() <-accepting bConn, err := bAgent.Dial(context.TODO(), aUfrag, aPwd) check(err) // Ensure accepted <-accepted return aConn, bConn }(aAgent, bAgent) // Ensure pair selected // Note: this assumes ConnectionStateConnected is thrown after selecting the final pair <-aConnected <-bConnected assert.NoError(t, wan.Stop()) if !closePipe(t, aConn, bConn) { return } } func TestConnectivityLite(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() stunServerURL := &URL{ Scheme: SchemeTypeSTUN, Host: "1.2.3.4", Port: 3478, Proto: ProtoTypeUDP, } natType := &vnet.NATType{ MappingBehavior: vnet.EndpointIndependent, FilteringBehavior: vnet.EndpointIndependent, } v, err := buildVNet(natType, natType) require.NoError(t, err, "should succeed") defer v.close() aNotifier, aConnected := onConnected() bNotifier, bConnected := onConnected() cfg0 := &AgentConfig{ Urls: []*URL{stunServerURL}, NetworkTypes: supportedNetworkTypes(), MulticastDNSMode: MulticastDNSModeDisabled, Net: v.net0, } aAgent, err := NewAgent(cfg0) require.NoError(t, err) require.NoError(t, aAgent.OnConnectionStateChange(aNotifier)) cfg1 := &AgentConfig{ Urls: []*URL{}, Lite: true, CandidateTypes: []CandidateType{CandidateTypeHost}, NetworkTypes: supportedNetworkTypes(), MulticastDNSMode: MulticastDNSModeDisabled, Net: v.net1, } bAgent, err := NewAgent(cfg1) require.NoError(t, err) require.NoError(t, bAgent.OnConnectionStateChange(bNotifier)) aConn, bConn := connectWithVNet(aAgent, bAgent) // Ensure pair selected // Note: this assumes ConnectionStateConnected is thrown after selecting the final pair <-aConnected <-bConnected if !closePipe(t, aConn, bConn) { return } } func TestInboundValidity(t *testing.T) { report := test.CheckRoutines(t) defer report() buildMsg := func(class stun.MessageClass, username, key string) *stun.Message { msg, err := stun.Build(stun.NewType(stun.MethodBinding, class), stun.TransactionID, stun.NewUsername(username), stun.NewShortTermIntegrity(key), stun.Fingerprint, ) if err != nil { t.Fatal(err) } return msg } remote := &net.UDPAddr{IP: net.ParseIP("172.17.0.3"), Port: 999} hostConfig := CandidateHostConfig{ Network: "udp", Address: "192.168.0.2", Port: 777, Component: 1, } local, err := NewCandidateHost(&hostConfig) local.conn = &mockPacketConn{} if err != nil { t.Fatalf("failed to create a new candidate: %v", err) } t.Run("Invalid Binding requests should be discarded", func(t *testing.T) { a, err := NewAgent(&AgentConfig{}) if err != nil { t.Fatalf("Error constructing ice.Agent") } a.handleInbound(buildMsg(stun.ClassRequest, "invalid", a.localPwd), local, remote) if len(a.remoteCandidates) == 1 { t.Fatal("Binding with invalid Username was able to create prflx candidate") } a.handleInbound(buildMsg(stun.ClassRequest, a.localUfrag+":"+a.remoteUfrag, "Invalid"), local, remote) if len(a.remoteCandidates) == 1 { t.Fatal("Binding with invalid MessageIntegrity was able to create prflx candidate") } assert.NoError(t, a.Close()) }) t.Run("Invalid Binding success responses should be discarded", func(t *testing.T) { a, err := NewAgent(&AgentConfig{}) if err != nil { t.Fatalf("Error constructing ice.Agent") } a.handleInbound(buildMsg(stun.ClassSuccessResponse, a.localUfrag+":"+a.remoteUfrag, "Invalid"), local, remote) if len(a.remoteCandidates) == 1 { t.Fatal("Binding with invalid MessageIntegrity was able to create prflx candidate") } assert.NoError(t, a.Close()) }) t.Run("Discard non-binding messages", func(t *testing.T) { a, err := NewAgent(&AgentConfig{}) if err != nil { t.Fatalf("Error constructing ice.Agent") } a.handleInbound(buildMsg(stun.ClassErrorResponse, a.localUfrag+":"+a.remoteUfrag, "Invalid"), local, remote) if len(a.remoteCandidates) == 1 { t.Fatal("non-binding message was able to create prflxRemote") } assert.NoError(t, a.Close()) }) t.Run("Valid bind request", func(t *testing.T) { a, err := NewAgent(&AgentConfig{}) if err != nil { t.Fatalf("Error constructing ice.Agent") } err = a.run(context.Background(), func(ctx context.Context, a *Agent) { a.selector = &controllingSelector{agent: a, log: a.log} a.handleInbound(buildMsg(stun.ClassRequest, a.localUfrag+":"+a.remoteUfrag, a.localPwd), local, remote) if len(a.remoteCandidates) != 1 { t.Fatal("Binding with valid values was unable to create prflx candidate") } }) assert.NoError(t, err) assert.NoError(t, a.Close()) }) t.Run("Valid bind without fingerprint", func(t *testing.T) { var config AgentConfig runAgentTest(t, &config, func(ctx context.Context, a *Agent) { a.selector = &controllingSelector{agent: a, log: a.log} msg, err := stun.Build(stun.BindingRequest, stun.TransactionID, stun.NewUsername(a.localUfrag+":"+a.remoteUfrag), stun.NewShortTermIntegrity(a.localPwd), ) if err != nil { t.Fatal(err) } a.handleInbound(msg, local, remote) if len(a.remoteCandidates) != 1 { t.Fatal("Binding with valid values (but no fingerprint) was unable to create prflx candidate") } }) }) t.Run("Success with invalid TransactionID", func(t *testing.T) { a, err := NewAgent(&AgentConfig{}) if err != nil { t.Fatalf("Error constructing ice.Agent") } hostConfig := CandidateHostConfig{ Network: "udp", Address: "192.168.0.2", Port: 777, Component: 1, } local, err := NewCandidateHost(&hostConfig) local.conn = &mockPacketConn{} if err != nil { t.Fatalf("failed to create a new candidate: %v", err) } remote := &net.UDPAddr{IP: net.ParseIP("172.17.0.3"), Port: 999} tID := [stun.TransactionIDSize]byte{} copy(tID[:], "ABC") msg, err := stun.Build(stun.BindingSuccess, stun.NewTransactionIDSetter(tID), stun.NewShortTermIntegrity(a.remotePwd), stun.Fingerprint, ) assert.NoError(t, err) a.handleInbound(msg, local, remote) if len(a.remoteCandidates) != 0 { t.Fatal("unknown remote was able to create a candidate") } assert.NoError(t, a.Close()) }) } func TestInvalidAgentStarts(t *testing.T) { report := test.CheckRoutines(t) defer report() a, err := NewAgent(&AgentConfig{}) assert.NoError(t, err) ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) defer cancel() if _, err = a.Dial(ctx, "", "bar"); err != nil && !errors.Is(err, ErrRemoteUfragEmpty) { t.Fatal(err) } if _, err = a.Dial(ctx, "foo", ""); err != nil && !errors.Is(err, ErrRemotePwdEmpty) { t.Fatal(err) } if _, err = a.Dial(ctx, "foo", "bar"); err != nil && !errors.Is(err, ErrCanceledByCaller) { t.Fatal(err) } if _, err = a.Dial(context.TODO(), "foo", "bar"); err != nil && !errors.Is(err, ErrMultipleStart) { t.Fatal(err) } assert.NoError(t, a.Close()) } // Assert that Agent emits Connecting/Connected/Disconnected/Failed/Closed messages func TestConnectionStateCallback(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 5) defer lim.Stop() disconnectedDuration := time.Second failedDuration := time.Second KeepaliveInterval := time.Duration(0) cfg := &AgentConfig{ Urls: []*URL{}, NetworkTypes: supportedNetworkTypes(), DisconnectedTimeout: &disconnectedDuration, FailedTimeout: &failedDuration, KeepaliveInterval: &KeepaliveInterval, } aAgent, err := NewAgent(cfg) if err != nil { t.Error(err) } bAgent, err := NewAgent(cfg) if err != nil { t.Error(err) } isChecking := make(chan interface{}) isConnected := make(chan interface{}) isDisconnected := make(chan interface{}) isFailed := make(chan interface{}) isClosed := make(chan interface{}) err = aAgent.OnConnectionStateChange(func(c ConnectionState) { switch c { case ConnectionStateChecking: close(isChecking) case ConnectionStateConnected: close(isConnected) case ConnectionStateDisconnected: close(isDisconnected) case ConnectionStateFailed: close(isFailed) case ConnectionStateClosed: close(isClosed) } }) if err != nil { t.Error(err) } connect(aAgent, bAgent) <-isChecking <-isConnected <-isDisconnected <-isFailed assert.NoError(t, aAgent.Close()) assert.NoError(t, bAgent.Close()) <-isClosed } func TestInvalidGather(t *testing.T) { t.Run("Gather with no OnCandidate should error", func(t *testing.T) { a, err := NewAgent(&AgentConfig{}) if err != nil { t.Fatalf("Error constructing ice.Agent") } err = a.GatherCandidates() if !errors.Is(err, ErrNoOnCandidateHandler) { t.Fatal("trickle GatherCandidates succeeded without OnCandidate") } assert.NoError(t, a.Close()) }) } func TestCandidatePairStats(t *testing.T) { report := test.CheckRoutines(t) defer report() // avoid deadlocks? defer test.TimeOut(1 * time.Second).Stop() a, err := NewAgent(&AgentConfig{}) if err != nil { t.Fatalf("Failed to create agent: %s", err) } hostConfig := &CandidateHostConfig{ Network: "udp", Address: "192.168.1.1", Port: 19216, Component: 1, } hostLocal, err := NewCandidateHost(hostConfig) if err != nil { t.Fatalf("Failed to construct local host candidate: %s", err) } relayConfig := &CandidateRelayConfig{ Network: "udp", Address: "1.2.3.4", Port: 2340, Component: 1, RelAddr: "4.3.2.1", RelPort: 43210, } relayRemote, err := NewCandidateRelay(relayConfig) if err != nil { t.Fatalf("Failed to construct remote relay candidate: %s", err) } srflxConfig := &CandidateServerReflexiveConfig{ Network: "udp", Address: "10.10.10.2", Port: 19218, Component: 1, RelAddr: "4.3.2.1", RelPort: 43212, } srflxRemote, err := NewCandidateServerReflexive(srflxConfig) if err != nil { t.Fatalf("Failed to construct remote srflx candidate: %s", err) } prflxConfig := &CandidatePeerReflexiveConfig{ Network: "udp", Address: "10.10.10.2", Port: 19217, Component: 1, RelAddr: "4.3.2.1", RelPort: 43211, } prflxRemote, err := NewCandidatePeerReflexive(prflxConfig) if err != nil { t.Fatalf("Failed to construct remote prflx candidate: %s", err) } hostConfig = &CandidateHostConfig{ Network: "udp", Address: "1.2.3.5", Port: 12350, Component: 1, } hostRemote, err := NewCandidateHost(hostConfig) if err != nil { t.Fatalf("Failed to construct remote host candidate: %s", err) } for _, remote := range []Candidate{relayRemote, srflxRemote, prflxRemote, hostRemote} { p := a.findPair(hostLocal, remote) if p == nil { a.addPair(hostLocal, remote) } } p := a.findPair(hostLocal, prflxRemote) p.state = CandidatePairStateFailed stats := a.GetCandidatePairsStats() if len(stats) != 4 { t.Fatal("expected 4 candidate pairs stats") } var relayPairStat, srflxPairStat, prflxPairStat, hostPairStat CandidatePairStats for _, cps := range stats { if cps.LocalCandidateID != hostLocal.ID() { t.Fatal("invalid local candidate id") } switch cps.RemoteCandidateID { case relayRemote.ID(): relayPairStat = cps case srflxRemote.ID(): srflxPairStat = cps case prflxRemote.ID(): prflxPairStat = cps case hostRemote.ID(): hostPairStat = cps default: t.Fatal("invalid remote candidate ID") } } if relayPairStat.RemoteCandidateID != relayRemote.ID() { t.Fatal("missing host-relay pair stat") } if srflxPairStat.RemoteCandidateID != srflxRemote.ID() { t.Fatal("missing host-srflx pair stat") } if prflxPairStat.RemoteCandidateID != prflxRemote.ID() { t.Fatal("missing host-prflx pair stat") } if hostPairStat.RemoteCandidateID != hostRemote.ID() { t.Fatal("missing host-host pair stat") } if prflxPairStat.State != CandidatePairStateFailed { t.Fatalf("expected host-prflx pair to have state failed, it has state %s instead", prflxPairStat.State.String()) } assert.NoError(t, a.Close()) } func TestLocalCandidateStats(t *testing.T) { report := test.CheckRoutines(t) defer report() // avoid deadlocks? defer test.TimeOut(1 * time.Second).Stop() a, err := NewAgent(&AgentConfig{}) if err != nil { t.Fatalf("Failed to create agent: %s", err) } hostConfig := &CandidateHostConfig{ Network: "udp", Address: "192.168.1.1", Port: 19216, Component: 1, } hostLocal, err := NewCandidateHost(hostConfig) if err != nil { t.Fatalf("Failed to construct local host candidate: %s", err) } srflxConfig := &CandidateServerReflexiveConfig{ Network: "udp", Address: "192.168.1.1", Port: 19217, Component: 1, RelAddr: "4.3.2.1", RelPort: 43212, } srflxLocal, err := NewCandidateServerReflexive(srflxConfig) if err != nil { t.Fatalf("Failed to construct local srflx candidate: %s", err) } a.localCandidates[NetworkTypeUDP4] = []Candidate{hostLocal, srflxLocal} localStats := a.GetLocalCandidatesStats() if len(localStats) != 2 { t.Fatalf("expected 2 local candidates stats, got %d instead", len(localStats)) } var hostLocalStat, srflxLocalStat CandidateStats for _, stats := range localStats { var candidate Candidate switch stats.ID { case hostLocal.ID(): hostLocalStat = stats candidate = hostLocal case srflxLocal.ID(): srflxLocalStat = stats candidate = srflxLocal default: t.Fatal("invalid local candidate ID") } if stats.CandidateType != candidate.Type() { t.Fatal("invalid stats CandidateType") } if stats.Priority != candidate.Priority() { t.Fatal("invalid stats CandidateType") } if stats.IP != candidate.Address() { t.Fatal("invalid stats IP") } } if hostLocalStat.ID != hostLocal.ID() { t.Fatal("missing host local stat") } if srflxLocalStat.ID != srflxLocal.ID() { t.Fatal("missing srflx local stat") } assert.NoError(t, a.Close()) } func TestRemoteCandidateStats(t *testing.T) { report := test.CheckRoutines(t) defer report() // avoid deadlocks? defer test.TimeOut(1 * time.Second).Stop() a, err := NewAgent(&AgentConfig{}) if err != nil { t.Fatalf("Failed to create agent: %s", err) } relayConfig := &CandidateRelayConfig{ Network: "udp", Address: "1.2.3.4", Port: 12340, Component: 1, RelAddr: "4.3.2.1", RelPort: 43210, } relayRemote, err := NewCandidateRelay(relayConfig) if err != nil { t.Fatalf("Failed to construct remote relay candidate: %s", err) } srflxConfig := &CandidateServerReflexiveConfig{ Network: "udp", Address: "10.10.10.2", Port: 19218, Component: 1, RelAddr: "4.3.2.1", RelPort: 43212, } srflxRemote, err := NewCandidateServerReflexive(srflxConfig) if err != nil { t.Fatalf("Failed to construct remote srflx candidate: %s", err) } prflxConfig := &CandidatePeerReflexiveConfig{ Network: "udp", Address: "10.10.10.2", Port: 19217, Component: 1, RelAddr: "4.3.2.1", RelPort: 43211, } prflxRemote, err := NewCandidatePeerReflexive(prflxConfig) if err != nil { t.Fatalf("Failed to construct remote prflx candidate: %s", err) } hostConfig := &CandidateHostConfig{ Network: "udp", Address: "1.2.3.5", Port: 12350, Component: 1, } hostRemote, err := NewCandidateHost(hostConfig) if err != nil { t.Fatalf("Failed to construct remote host candidate: %s", err) } a.remoteCandidates[NetworkTypeUDP4] = []Candidate{relayRemote, srflxRemote, prflxRemote, hostRemote} remoteStats := a.GetRemoteCandidatesStats() if len(remoteStats) != 4 { t.Fatalf("expected 4 remote candidates stats, got %d instead", len(remoteStats)) } var relayRemoteStat, srflxRemoteStat, prflxRemoteStat, hostRemoteStat CandidateStats for _, stats := range remoteStats { var candidate Candidate switch stats.ID { case relayRemote.ID(): relayRemoteStat = stats candidate = relayRemote case srflxRemote.ID(): srflxRemoteStat = stats candidate = srflxRemote case prflxRemote.ID(): prflxRemoteStat = stats candidate = prflxRemote case hostRemote.ID(): hostRemoteStat = stats candidate = hostRemote default: t.Fatal("invalid remote candidate ID") } if stats.CandidateType != candidate.Type() { t.Fatal("invalid stats CandidateType") } if stats.Priority != candidate.Priority() { t.Fatal("invalid stats CandidateType") } if stats.IP != candidate.Address() { t.Fatal("invalid stats IP") } } if relayRemoteStat.ID != relayRemote.ID() { t.Fatal("missing relay remote stat") } if srflxRemoteStat.ID != srflxRemote.ID() { t.Fatal("missing srflx remote stat") } if prflxRemoteStat.ID != prflxRemote.ID() { t.Fatal("missing prflx remote stat") } if hostRemoteStat.ID != hostRemote.ID() { t.Fatal("missing host remote stat") } assert.NoError(t, a.Close()) } func TestInitExtIPMapping(t *testing.T) { report := test.CheckRoutines(t) defer report() // a.extIPMapper should be nil by default a, err := NewAgent(&AgentConfig{}) if err != nil { t.Fatalf("Failed to create agent: %v", err) } if a.extIPMapper != nil { t.Fatal("a.extIPMapper should be nil by default") } assert.NoError(t, a.Close()) // a.extIPMapper should be nil when NAT1To1IPs is a non-nil empty array a, err = NewAgent(&AgentConfig{ NAT1To1IPs: []string{}, NAT1To1IPCandidateType: CandidateTypeHost, }) if err != nil { t.Fatalf("Failed to create agent: %v", err) } if a.extIPMapper != nil { t.Fatal("a.extIPMapper should be nil by default") } assert.NoError(t, a.Close()) // NewAgent should return an error when 1:1 NAT for host candidate is enabled // but the candidate type does not appear in the CandidateTypes. _, err = NewAgent(&AgentConfig{ NAT1To1IPs: []string{"1.2.3.4"}, NAT1To1IPCandidateType: CandidateTypeHost, CandidateTypes: []CandidateType{CandidateTypeRelay}, }) if !errors.Is(err, ErrIneffectiveNAT1To1IPMappingHost) { t.Fatalf("Unexpected error: %v", err) } // NewAgent should return an error when 1:1 NAT for srflx candidate is enabled // but the candidate type does not appear in the CandidateTypes. _, err = NewAgent(&AgentConfig{ NAT1To1IPs: []string{"1.2.3.4"}, NAT1To1IPCandidateType: CandidateTypeServerReflexive, CandidateTypes: []CandidateType{CandidateTypeRelay}, }) if !errors.Is(err, ErrIneffectiveNAT1To1IPMappingSrflx) { t.Fatalf("Unexpected error: %v", err) } // NewAgent should return an error when 1:1 NAT for host candidate is enabled // along with mDNS with MulticastDNSModeQueryAndGather _, err = NewAgent(&AgentConfig{ NAT1To1IPs: []string{"1.2.3.4"}, NAT1To1IPCandidateType: CandidateTypeHost, MulticastDNSMode: MulticastDNSModeQueryAndGather, }) if !errors.Is(err, ErrMulticastDNSWithNAT1To1IPMapping) { t.Fatalf("Unexpected error: %v", err) } // NewAgent should return if newExternalIPMapper() returns an error. _, err = NewAgent(&AgentConfig{ NAT1To1IPs: []string{"bad.2.3.4"}, // bad IP NAT1To1IPCandidateType: CandidateTypeHost, }) if !errors.Is(err, ErrInvalidNAT1To1IPMapping) { t.Fatalf("Unexpected error: %v", err) } } func TestBindingRequestTimeout(t *testing.T) { report := test.CheckRoutines(t) defer report() const expectedRemovalCount = 2 a, err := NewAgent(&AgentConfig{}) assert.NoError(t, err) now := time.Now() a.pendingBindingRequests = append(a.pendingBindingRequests, bindingRequest{ timestamp: now, // valid }) a.pendingBindingRequests = append(a.pendingBindingRequests, bindingRequest{ timestamp: now.Add(-3900 * time.Millisecond), // valid }) a.pendingBindingRequests = append(a.pendingBindingRequests, bindingRequest{ timestamp: now.Add(-4100 * time.Millisecond), // invalid }) a.pendingBindingRequests = append(a.pendingBindingRequests, bindingRequest{ timestamp: now.Add(-75 * time.Hour), // invalid }) a.invalidatePendingBindingRequests(now) assert.Equal(t, expectedRemovalCount, len(a.pendingBindingRequests), "Binding invalidation due to timeout did not remove the correct number of binding requests") assert.NoError(t, a.Close()) } // TestAgentCredentials checks if local username fragments and passwords (if set) meet RFC standard // and ensure it's backwards compatible with previous versions of the pion/ice func TestAgentCredentials(t *testing.T) { report := test.CheckRoutines(t) defer report() // Make sure to pass Travis check by disabling the logs log := logging.NewDefaultLoggerFactory() log.DefaultLogLevel = logging.LogLevelDisabled // Agent should not require any of the usernames and password to be set // If set, they should follow the default 16/128 bits random number generator strategy agent, err := NewAgent(&AgentConfig{LoggerFactory: log}) assert.NoError(t, err) assert.GreaterOrEqual(t, len([]rune(agent.localUfrag))*8, 24) assert.GreaterOrEqual(t, len([]rune(agent.localPwd))*8, 128) assert.NoError(t, agent.Close()) // Should honor RFC standards // Local values MUST be unguessable, with at least 128 bits of // random number generator output used to generate the password, and // at least 24 bits of output to generate the username fragment. _, err = NewAgent(&AgentConfig{LocalUfrag: "xx", LoggerFactory: log}) assert.EqualError(t, err, ErrLocalUfragInsufficientBits.Error()) _, err = NewAgent(&AgentConfig{LocalPwd: "xxxxxx", LoggerFactory: log}) assert.EqualError(t, err, ErrLocalPwdInsufficientBits.Error()) } // Assert that Agent on Failure deletes all existing candidates // User can then do an ICE Restart to bring agent back func TestConnectionStateFailedDeleteAllCandidates(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 5) defer lim.Stop() oneSecond := time.Second KeepaliveInterval := time.Duration(0) cfg := &AgentConfig{ NetworkTypes: supportedNetworkTypes(), DisconnectedTimeout: &oneSecond, FailedTimeout: &oneSecond, KeepaliveInterval: &KeepaliveInterval, } aAgent, err := NewAgent(cfg) assert.NoError(t, err) bAgent, err := NewAgent(cfg) assert.NoError(t, err) isFailed := make(chan interface{}) assert.NoError(t, aAgent.OnConnectionStateChange(func(c ConnectionState) { if c == ConnectionStateFailed { close(isFailed) } })) connect(aAgent, bAgent) <-isFailed done := make(chan struct{}) assert.NoError(t, aAgent.run(context.Background(), func(ctx context.Context, agent *Agent) { assert.Equal(t, len(aAgent.remoteCandidates), 0) assert.Equal(t, len(aAgent.localCandidates), 0) close(done) })) <-done assert.NoError(t, aAgent.Close()) assert.NoError(t, bAgent.Close()) } // Assert that the ICE Agent can go directly from Connecting -> Failed on both sides func TestConnectionStateConnectingToFailed(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 5) defer lim.Stop() oneSecond := time.Second KeepaliveInterval := time.Duration(0) cfg := &AgentConfig{ DisconnectedTimeout: &oneSecond, FailedTimeout: &oneSecond, KeepaliveInterval: &KeepaliveInterval, } aAgent, err := NewAgent(cfg) assert.NoError(t, err) bAgent, err := NewAgent(cfg) assert.NoError(t, err) var isFailed sync.WaitGroup var isChecking sync.WaitGroup isFailed.Add(2) isChecking.Add(2) connectionStateCheck := func(c ConnectionState) { switch c { case ConnectionStateFailed: isFailed.Done() case ConnectionStateChecking: isChecking.Done() case ConnectionStateConnected: case ConnectionStateCompleted: t.Errorf("Unexpected ConnectionState: %v", c) } } assert.NoError(t, aAgent.OnConnectionStateChange(connectionStateCheck)) assert.NoError(t, bAgent.OnConnectionStateChange(connectionStateCheck)) go func() { _, err := aAgent.Accept(context.TODO(), "InvalidFrag", "InvalidPwd") assert.Error(t, err) }() go func() { _, err := bAgent.Dial(context.TODO(), "InvalidFrag", "InvalidPwd") assert.Error(t, err) }() isChecking.Wait() isFailed.Wait() assert.NoError(t, aAgent.Close()) assert.NoError(t, bAgent.Close()) } func TestAgentRestart(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() oneSecond := time.Second t.Run("Restart During Gather", func(t *testing.T) { connA, connB := pipe(&AgentConfig{ DisconnectedTimeout: &oneSecond, FailedTimeout: &oneSecond, }) ctx, cancel := context.WithCancel(context.Background()) assert.NoError(t, connB.agent.OnConnectionStateChange(func(c ConnectionState) { if c == ConnectionStateFailed || c == ConnectionStateDisconnected { cancel() } })) connA.agent.gatheringState = GatheringStateGathering assert.NoError(t, connA.agent.Restart("", "")) <-ctx.Done() assert.NoError(t, connA.agent.Close()) assert.NoError(t, connB.agent.Close()) }) t.Run("Restart When Closed", func(t *testing.T) { agent, err := NewAgent(&AgentConfig{}) assert.NoError(t, err) assert.NoError(t, agent.Close()) assert.Equal(t, ErrClosed, agent.Restart("", "")) }) t.Run("Restart One Side", func(t *testing.T) { connA, connB := pipe(&AgentConfig{ DisconnectedTimeout: &oneSecond, FailedTimeout: &oneSecond, }) ctx, cancel := context.WithCancel(context.Background()) assert.NoError(t, connB.agent.OnConnectionStateChange(func(c ConnectionState) { if c == ConnectionStateFailed || c == ConnectionStateDisconnected { cancel() } })) assert.NoError(t, connA.agent.Restart("", "")) <-ctx.Done() assert.NoError(t, connA.agent.Close()) assert.NoError(t, connB.agent.Close()) }) t.Run("Restart Both Sides", func(t *testing.T) { // Get all addresses of candidates concatenated generateCandidateAddressStrings := func(candidates []Candidate, err error) (out string) { assert.NoError(t, err) for _, c := range candidates { out += c.Address() + ":" out += strconv.Itoa(c.Port()) } return } // Store the original candidates, confirm that after we reconnect we have new pairs connA, connB := pipe(&AgentConfig{ DisconnectedTimeout: &oneSecond, FailedTimeout: &oneSecond, }) connAFirstCandidates := generateCandidateAddressStrings(connA.agent.GetLocalCandidates()) connBFirstCandidates := generateCandidateAddressStrings(connB.agent.GetLocalCandidates()) aNotifier, aConnected := onConnected() assert.NoError(t, connA.agent.OnConnectionStateChange(aNotifier)) bNotifier, bConnected := onConnected() assert.NoError(t, connB.agent.OnConnectionStateChange(bNotifier)) // Restart and Re-Signal assert.NoError(t, connA.agent.Restart("", "")) assert.NoError(t, connB.agent.Restart("", "")) // Exchange Candidates and Credentials ufrag, pwd, err := connB.agent.GetLocalUserCredentials() assert.NoError(t, err) assert.NoError(t, connA.agent.SetRemoteCredentials(ufrag, pwd)) ufrag, pwd, err = connA.agent.GetLocalUserCredentials() assert.NoError(t, err) assert.NoError(t, connB.agent.SetRemoteCredentials(ufrag, pwd)) gatherAndExchangeCandidates(connA.agent, connB.agent) // Wait until both have gone back to connected <-aConnected <-bConnected // Assert that we have new candidates each time assert.NotEqual(t, connAFirstCandidates, generateCandidateAddressStrings(connA.agent.GetLocalCandidates())) assert.NotEqual(t, connBFirstCandidates, generateCandidateAddressStrings(connB.agent.GetLocalCandidates())) assert.NoError(t, connA.agent.Close()) assert.NoError(t, connB.agent.Close()) }) } func TestGetRemoteCredentials(t *testing.T) { var config AgentConfig a, err := NewAgent(&config) if err != nil { t.Fatalf("Error constructing ice.Agent: %v", err) } a.remoteUfrag = "remoteUfrag" a.remotePwd = "remotePwd" actualUfrag, actualPwd, err := a.GetRemoteUserCredentials() assert.NoError(t, err) assert.Equal(t, actualUfrag, a.remoteUfrag) assert.Equal(t, actualPwd, a.remotePwd) assert.NoError(t, a.Close()) } func TestCloseInConnectionStateCallback(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 5) defer lim.Stop() disconnectedDuration := time.Second failedDuration := time.Second KeepaliveInterval := time.Duration(0) CheckInterval := 500 * time.Millisecond cfg := &AgentConfig{ Urls: []*URL{}, NetworkTypes: supportedNetworkTypes(), DisconnectedTimeout: &disconnectedDuration, FailedTimeout: &failedDuration, KeepaliveInterval: &KeepaliveInterval, CheckInterval: &CheckInterval, } aAgent, err := NewAgent(cfg) if err != nil { t.Error(err) } bAgent, err := NewAgent(cfg) if err != nil { t.Error(err) } isClosed := make(chan interface{}) isConnected := make(chan interface{}) err = aAgent.OnConnectionStateChange(func(c ConnectionState) { switch c { case ConnectionStateConnected: <-isConnected assert.NoError(t, aAgent.Close()) case ConnectionStateClosed: close(isClosed) } }) if err != nil { t.Error(err) } connect(aAgent, bAgent) close(isConnected) <-isClosed assert.NoError(t, bAgent.Close()) } func TestRunTaskInConnectionStateCallback(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 5) defer lim.Stop() oneSecond := time.Second KeepaliveInterval := time.Duration(0) CheckInterval := 50 * time.Millisecond cfg := &AgentConfig{ Urls: []*URL{}, NetworkTypes: supportedNetworkTypes(), DisconnectedTimeout: &oneSecond, FailedTimeout: &oneSecond, KeepaliveInterval: &KeepaliveInterval, CheckInterval: &CheckInterval, } aAgent, err := NewAgent(cfg) check(err) bAgent, err := NewAgent(cfg) check(err) isComplete := make(chan interface{}) err = aAgent.OnConnectionStateChange(func(c ConnectionState) { if c == ConnectionStateConnected { _, _, errCred := aAgent.GetLocalUserCredentials() assert.NoError(t, errCred) assert.NoError(t, aAgent.Restart("", "")) close(isComplete) } }) if err != nil { t.Error(err) } connect(aAgent, bAgent) <-isComplete assert.NoError(t, aAgent.Close()) assert.NoError(t, bAgent.Close()) } func TestRunTaskInSelectedCandidatePairChangeCallback(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 5) defer lim.Stop() oneSecond := time.Second KeepaliveInterval := time.Duration(0) CheckInterval := 50 * time.Millisecond cfg := &AgentConfig{ Urls: []*URL{}, NetworkTypes: supportedNetworkTypes(), DisconnectedTimeout: &oneSecond, FailedTimeout: &oneSecond, KeepaliveInterval: &KeepaliveInterval, CheckInterval: &CheckInterval, } aAgent, err := NewAgent(cfg) check(err) bAgent, err := NewAgent(cfg) check(err) isComplete := make(chan interface{}) isTested := make(chan interface{}) if err = aAgent.OnSelectedCandidatePairChange(func(Candidate, Candidate) { go func() { _, _, errCred := aAgent.GetLocalUserCredentials() assert.NoError(t, errCred) close(isTested) }() }); err != nil { t.Error(err) } if err = aAgent.OnConnectionStateChange(func(c ConnectionState) { if c == ConnectionStateConnected { close(isComplete) } }); err != nil { t.Error(err) } connect(aAgent, bAgent) <-isComplete <-isTested assert.NoError(t, aAgent.Close()) assert.NoError(t, bAgent.Close()) } // Assert that a Lite agent goes to disconnected and failed func TestLiteLifecycle(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() aNotifier, aConnected := onConnected() aAgent, err := NewAgent(&AgentConfig{ NetworkTypes: supportedNetworkTypes(), MulticastDNSMode: MulticastDNSModeDisabled, }) require.NoError(t, err) require.NoError(t, aAgent.OnConnectionStateChange(aNotifier)) disconnectedDuration := time.Second failedDuration := time.Second KeepaliveInterval := time.Duration(0) CheckInterval := 500 * time.Millisecond bAgent, err := NewAgent(&AgentConfig{ Lite: true, CandidateTypes: []CandidateType{CandidateTypeHost}, NetworkTypes: supportedNetworkTypes(), MulticastDNSMode: MulticastDNSModeDisabled, DisconnectedTimeout: &disconnectedDuration, FailedTimeout: &failedDuration, KeepaliveInterval: &KeepaliveInterval, CheckInterval: &CheckInterval, }) require.NoError(t, err) bConnected := make(chan interface{}) bDisconnected := make(chan interface{}) bFailed := make(chan interface{}) require.NoError(t, bAgent.OnConnectionStateChange(func(c ConnectionState) { fmt.Println(c) switch c { case ConnectionStateConnected: close(bConnected) case ConnectionStateDisconnected: close(bDisconnected) case ConnectionStateFailed: close(bFailed) } })) connectWithVNet(bAgent, aAgent) <-aConnected <-bConnected assert.NoError(t, aAgent.Close()) <-bDisconnected <-bFailed assert.NoError(t, bAgent.Close()) } func TestNilCandidate(t *testing.T) { a, err := NewAgent(&AgentConfig{}) assert.NoError(t, err) assert.NoError(t, a.AddRemoteCandidate(nil)) assert.NoError(t, a.Close()) } func TestNilCandidatePair(t *testing.T) { a, err := NewAgent(&AgentConfig{}) assert.NoError(t, err) a.setSelectedPair(nil) assert.NoError(t, a.Close()) } func TestGetSelectedCandidatePair(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() wan, err := vnet.NewRouter(&vnet.RouterConfig{ CIDR: "0.0.0.0/0", LoggerFactory: logging.NewDefaultLoggerFactory(), }) assert.NoError(t, err) net, err := vnet.NewNet(&vnet.NetConfig{ StaticIPs: []string{"192.168.0.1"}, }) assert.NoError(t, err) assert.NoError(t, wan.AddNet(net)) assert.NoError(t, wan.Start()) cfg := &AgentConfig{ NetworkTypes: supportedNetworkTypes(), Net: net, } aAgent, err := NewAgent(cfg) assert.NoError(t, err) bAgent, err := NewAgent(cfg) assert.NoError(t, err) aAgentPair, err := aAgent.GetSelectedCandidatePair() assert.NoError(t, err) assert.Nil(t, aAgentPair) bAgentPair, err := bAgent.GetSelectedCandidatePair() assert.NoError(t, err) assert.Nil(t, bAgentPair) connect(aAgent, bAgent) aAgentPair, err = aAgent.GetSelectedCandidatePair() assert.NoError(t, err) assert.NotNil(t, aAgentPair) bAgentPair, err = bAgent.GetSelectedCandidatePair() assert.NoError(t, err) assert.NotNil(t, bAgentPair) assert.True(t, bAgentPair.Local.Equal(aAgentPair.Remote)) assert.True(t, bAgentPair.Remote.Equal(aAgentPair.Local)) assert.NoError(t, wan.Stop()) assert.NoError(t, aAgent.Close()) assert.NoError(t, bAgent.Close()) } func TestAcceptAggressiveNomination(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() // Create a network with two interfaces wan, err := vnet.NewRouter(&vnet.RouterConfig{ CIDR: "0.0.0.0/0", LoggerFactory: logging.NewDefaultLoggerFactory(), }) assert.NoError(t, err) net0, err := vnet.NewNet(&vnet.NetConfig{ StaticIPs: []string{"192.168.0.1"}, }) assert.NoError(t, err) assert.NoError(t, wan.AddNet(net0)) net1, err := vnet.NewNet(&vnet.NetConfig{ StaticIPs: []string{"192.168.0.2", "192.168.0.3", "192.168.0.4"}, }) assert.NoError(t, err) assert.NoError(t, wan.AddNet(net1)) assert.NoError(t, wan.Start()) aNotifier, aConnected := onConnected() bNotifier, bConnected := onConnected() KeepaliveInterval := time.Hour cfg0 := &AgentConfig{ NetworkTypes: supportedNetworkTypes(), MulticastDNSMode: MulticastDNSModeDisabled, Net: net0, KeepaliveInterval: &KeepaliveInterval, CheckInterval: &KeepaliveInterval, AcceptAggressiveNomination: true, } var aAgent, bAgent *Agent aAgent, err = NewAgent(cfg0) require.NoError(t, err) require.NoError(t, aAgent.OnConnectionStateChange(aNotifier)) cfg1 := &AgentConfig{ NetworkTypes: supportedNetworkTypes(), MulticastDNSMode: MulticastDNSModeDisabled, Net: net1, KeepaliveInterval: &KeepaliveInterval, CheckInterval: &KeepaliveInterval, } bAgent, err = NewAgent(cfg1) require.NoError(t, err) require.NoError(t, bAgent.OnConnectionStateChange(bNotifier)) aConn, bConn := connect(aAgent, bAgent) // Ensure pair selected // Note: this assumes ConnectionStateConnected is thrown after selecting the final pair <-aConnected <-bConnected // Send new USE-CANDIDATE message with higher priority to update the selected pair buildMsg := func(class stun.MessageClass, username, key string, priority uint32) *stun.Message { msg, err1 := stun.Build(stun.NewType(stun.MethodBinding, class), stun.TransactionID, stun.NewUsername(username), stun.NewShortTermIntegrity(key), UseCandidate(), PriorityAttr(priority), stun.Fingerprint, ) if err1 != nil { t.Fatal(err1) } return msg } selectedCh := make(chan Candidate, 1) var expectNewSelectedCandidate Candidate err = aAgent.OnSelectedCandidatePairChange(func(_, remote Candidate) { selectedCh <- remote }) require.NoError(t, err) var bcandidates []Candidate bcandidates, err = bAgent.GetLocalCandidates() require.NoError(t, err) for _, c := range bcandidates { if c != bAgent.getSelectedPair().Local { if expectNewSelectedCandidate == nil { incr_priority: for _, candidates := range aAgent.remoteCandidates { for _, candidate := range candidates { if candidate.Equal(c) { candidate.(*CandidateHost).priorityOverride += 1000 //nolint:forcetypeassert break incr_priority } } } expectNewSelectedCandidate = c } _, err = c.writeTo(buildMsg(stun.ClassRequest, aAgent.localUfrag+":"+aAgent.remoteUfrag, aAgent.localPwd, c.Priority()).Raw, bAgent.getSelectedPair().Remote) require.NoError(t, err) } } time.Sleep(1 * time.Second) select { case selected := <-selectedCh: assert.True(t, selected.Equal(expectNewSelectedCandidate)) default: t.Fatal("No selected candidate pair") } assert.NoError(t, wan.Stop()) if !closePipe(t, aConn, bConn) { return } } ice-2.3.1/agent_udpmux_test.go000066400000000000000000000043061437620344400163550ustar00rootroot00000000000000//go:build !js // +build !js package ice import ( "net" "testing" "time" "github.com/pion/logging" "github.com/pion/transport/v2/test" "github.com/stretchr/testify/require" ) // TestMuxAgent is an end to end test over UDP mux, ensuring two agents could connect over mux func TestMuxAgent(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() const muxPort = 7686 caseAddrs := map[string]*net.UDPAddr{ "unspecified": {Port: muxPort}, "ipv4Loopback": {IP: net.IPv4(127, 0, 0, 1), Port: muxPort}, } for subTest, addr := range caseAddrs { muxAddr := addr t.Run(subTest, func(t *testing.T) { c, err := net.ListenUDP("udp", muxAddr) require.NoError(t, err) loggerFactory := logging.NewDefaultLoggerFactory() udpMux := NewUDPMuxDefault(UDPMuxParams{ Logger: loggerFactory.NewLogger("ice"), UDPConn: c, }) muxedA, err := NewAgent(&AgentConfig{ UDPMux: udpMux, CandidateTypes: []CandidateType{CandidateTypeHost}, NetworkTypes: []NetworkType{ NetworkTypeUDP4, }, }) require.NoError(t, err) a, err := NewAgent(&AgentConfig{ CandidateTypes: []CandidateType{CandidateTypeHost}, NetworkTypes: supportedNetworkTypes(), }) require.NoError(t, err) conn, muxedConn := connect(a, muxedA) pair := muxedA.getSelectedPair() require.NotNil(t, pair) require.Equal(t, muxPort, pair.Local.Port()) // send a packet to Mux data := []byte("hello world") _, err = conn.Write(data) require.NoError(t, err) buf := make([]byte, 1024) n, err := muxedConn.Read(buf) require.NoError(t, err) require.Equal(t, data, buf[:n]) // send a packet from Mux _, err = muxedConn.Write(data) require.NoError(t, err) n, err = conn.Read(buf) require.NoError(t, err) require.Equal(t, data, buf[:n]) // close it down require.NoError(t, conn.Close()) require.NoError(t, muxedConn.Close()) require.NoError(t, udpMux.Close()) // expect error when reading from closed mux _, err = muxedConn.Read(data) require.Error(t, err) // expect error when writing to closed mux _, err = muxedConn.Write(data) require.Error(t, err) }) } } ice-2.3.1/candidate.go000066400000000000000000000030761437620344400145350ustar00rootroot00000000000000package ice import ( "context" "net" "time" ) const ( receiveMTU = 8192 defaultLocalPreference = 65535 // ComponentRTP indicates that the candidate is used for RTP ComponentRTP uint16 = 1 // ComponentRTCP indicates that the candidate is used for RTCP ComponentRTCP ) // Candidate represents an ICE candidate type Candidate interface { // An arbitrary string used in the freezing algorithm to // group similar candidates. It is the same for two candidates that // have the same type, base IP address, protocol (UDP, TCP, etc.), // and STUN or TURN server. Foundation() string // ID is a unique identifier for just this candidate // Unlike the foundation this is different for each candidate ID() string // A component is a piece of a data stream. // An example is one for RTP, and one for RTCP Component() uint16 SetComponent(uint16) // The last time this candidate received traffic LastReceived() time.Time // The last time this candidate sent traffic LastSent() time.Time NetworkType() NetworkType Address() string Port() int Priority() uint32 // A transport address related to a // candidate, which is useful for diagnostics and other purposes RelatedAddress() *CandidateRelatedAddress String() string Type() CandidateType TCPType() TCPType Equal(other Candidate) bool Marshal() string addr() net.Addr agent() *Agent context() context.Context close() error copy() (Candidate, error) seen(outbound bool) start(a *Agent, conn net.PacketConn, initializedCh <-chan struct{}) writeTo(raw []byte, dst Candidate) (int, error) } ice-2.3.1/candidate_base.go000066400000000000000000000313671437620344400155330ustar00rootroot00000000000000package ice import ( "context" "errors" "fmt" "hash/crc32" "io" "net" "strconv" "strings" "sync/atomic" "time" "github.com/pion/stun" ) type candidateBase struct { id string networkType NetworkType candidateType CandidateType component uint16 address string port int relatedAddress *CandidateRelatedAddress tcpType TCPType resolvedAddr net.Addr lastSent atomic.Value lastReceived atomic.Value conn net.PacketConn currAgent *Agent closeCh chan struct{} closedCh chan struct{} foundationOverride string priorityOverride uint32 } // Done implements context.Context func (c *candidateBase) Done() <-chan struct{} { return c.closeCh } // Err implements context.Context func (c *candidateBase) Err() error { select { case <-c.closedCh: return ErrRunCanceled default: return nil } } // Deadline implements context.Context func (c *candidateBase) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false } // Value implements context.Context func (c *candidateBase) Value(key interface{}) interface{} { return nil } // ID returns Candidate ID func (c *candidateBase) ID() string { return c.id } func (c *candidateBase) Foundation() string { if c.foundationOverride != "" { return c.foundationOverride } return fmt.Sprintf("%d", crc32.ChecksumIEEE([]byte(c.Type().String()+c.address+c.networkType.String()))) } // Address returns Candidate Address func (c *candidateBase) Address() string { return c.address } // Port returns Candidate Port func (c *candidateBase) Port() int { return c.port } // Type returns candidate type func (c *candidateBase) Type() CandidateType { return c.candidateType } // NetworkType returns candidate NetworkType func (c *candidateBase) NetworkType() NetworkType { return c.networkType } // Component returns candidate component func (c *candidateBase) Component() uint16 { return c.component } func (c *candidateBase) SetComponent(component uint16) { c.component = component } // LocalPreference returns the local preference for this candidate func (c *candidateBase) LocalPreference() uint16 { if c.NetworkType().IsTCP() { // RFC 6544, section 4.2 // // In Section 4.1.2.1 of [RFC5245], a recommended formula for UDP ICE // candidate prioritization is defined. For TCP candidates, the same // formula and candidate type preferences SHOULD be used, and the // RECOMMENDED type preferences for the new candidate types defined in // this document (see Section 5) are 105 for NAT-assisted candidates and // 75 for UDP-tunneled candidates. // // (...) // // With TCP candidates, the local preference part of the recommended // priority formula is updated to also include the directionality // (active, passive, or simultaneous-open) of the TCP connection. The // RECOMMENDED local preference is then defined as: // // local preference = (2^13) * direction-pref + other-pref // // The direction-pref MUST be between 0 and 7 (both inclusive), with 7 // being the most preferred. The other-pref MUST be between 0 and 8191 // (both inclusive), with 8191 being the most preferred. It is // RECOMMENDED that the host, UDP-tunneled, and relayed TCP candidates // have the direction-pref assigned as follows: 6 for active, 4 for // passive, and 2 for S-O. For the NAT-assisted and server reflexive // candidates, the RECOMMENDED values are: 6 for S-O, 4 for active, and // 2 for passive. // // (...) // // If any two candidates have the same type-preference and direction- // pref, they MUST have a unique other-pref. With this specification, // this usually only happens with multi-homed hosts, in which case // other-pref is the preference for the particular IP address from which // the candidate was obtained. When there is only a single IP address, // this value SHOULD be set to the maximum allowed value (8191). var otherPref uint16 = 8191 directionPref := func() uint16 { switch c.Type() { case CandidateTypeHost, CandidateTypeRelay: switch c.tcpType { case TCPTypeActive: return 6 case TCPTypePassive: return 4 case TCPTypeSimultaneousOpen: return 2 case TCPTypeUnspecified: return 0 } case CandidateTypePeerReflexive, CandidateTypeServerReflexive: switch c.tcpType { case TCPTypeSimultaneousOpen: return 6 case TCPTypeActive: return 4 case TCPTypePassive: return 2 case TCPTypeUnspecified: return 0 } case CandidateTypeUnspecified: return 0 } return 0 }() return (1<<13)*directionPref + otherPref } return defaultLocalPreference } // RelatedAddress returns *CandidateRelatedAddress func (c *candidateBase) RelatedAddress() *CandidateRelatedAddress { return c.relatedAddress } func (c *candidateBase) TCPType() TCPType { return c.tcpType } // start runs the candidate using the provided connection func (c *candidateBase) start(a *Agent, conn net.PacketConn, initializedCh <-chan struct{}) { if c.conn != nil { c.agent().log.Warn("Can't start already started candidateBase") return } c.currAgent = a c.conn = conn c.closeCh = make(chan struct{}) c.closedCh = make(chan struct{}) go c.recvLoop(initializedCh) } func (c *candidateBase) recvLoop(initializedCh <-chan struct{}) { a := c.agent() defer close(c.closedCh) select { case <-initializedCh: case <-c.closeCh: return } buf := make([]byte, receiveMTU) for { n, srcAddr, err := c.conn.ReadFrom(buf) if err != nil { if !(errors.Is(err, io.EOF) || errors.Is(err, net.ErrClosed)) { a.log.Warnf("Failed to read from candidate %s: %v", c, err) } return } c.handleInboundPacket(buf[:n], srcAddr) } } func (c *candidateBase) handleInboundPacket(buf []byte, srcAddr net.Addr) { a := c.agent() if stun.IsMessage(buf) { m := &stun.Message{ Raw: make([]byte, len(buf)), } // Explicitly copy raw buffer so Message can own the memory. copy(m.Raw, buf) if err := m.Decode(); err != nil { a.log.Warnf("Failed to handle decode ICE from %s to %s: %v", c.addr(), srcAddr, err) return } if err := a.run(c, func(ctx context.Context, a *Agent) { a.handleInbound(m, c, srcAddr) }); err != nil { a.log.Warnf("Failed to handle message: %v", err) } return } if !a.validateNonSTUNTraffic(c, srcAddr) { //nolint:contextcheck a.log.Warnf("Discarded message from %s, not a valid remote candidate", c.addr()) return } // Note: This will return packetio.ErrFull if the buffer ever manages to fill up. if _, err := a.buf.Write(buf); err != nil { a.log.Warnf("Failed to write packet: %s", err) return } } // close stops the recvLoop func (c *candidateBase) close() error { // If conn has never been started will be nil if c.Done() == nil { return nil } // Assert that conn has not already been closed select { case <-c.Done(): return nil default: } var firstErr error // Unblock recvLoop close(c.closeCh) if err := c.conn.SetDeadline(time.Now()); err != nil { firstErr = err } // Close the conn if err := c.conn.Close(); err != nil && firstErr == nil { firstErr = err } if firstErr != nil { return firstErr } // Wait until the recvLoop is closed <-c.closedCh return nil } func (c *candidateBase) writeTo(raw []byte, dst Candidate) (int, error) { n, err := c.conn.WriteTo(raw, dst.addr()) if err != nil { // If the connection is closed, we should return the error if errors.Is(err, io.ErrClosedPipe) { return n, err } c.agent().log.Infof("%s: %v", errSendPacket, err) return n, nil } c.seen(true) return n, nil } // Priority computes the priority for this ICE Candidate func (c *candidateBase) Priority() uint32 { if c.priorityOverride != 0 { return c.priorityOverride } // The local preference MUST be an integer from 0 (lowest preference) to // 65535 (highest preference) inclusive. When there is only a single IP // address, this value SHOULD be set to 65535. If there are multiple // candidates for a particular component for a particular data stream // that have the same type, the local preference MUST be unique for each // one. return (1<<24)*uint32(c.Type().Preference()) + (1<<8)*uint32(c.LocalPreference()) + uint32(256-c.Component()) } // Equal is used to compare two candidateBases func (c *candidateBase) Equal(other Candidate) bool { return c.NetworkType() == other.NetworkType() && c.Type() == other.Type() && c.Address() == other.Address() && c.Port() == other.Port() && c.TCPType() == other.TCPType() && c.RelatedAddress().Equal(other.RelatedAddress()) } // String makes the candidateBase printable func (c *candidateBase) String() string { return fmt.Sprintf("%s %s %s%s", c.NetworkType(), c.Type(), net.JoinHostPort(c.Address(), strconv.Itoa(c.Port())), c.relatedAddress) } // LastReceived returns a time.Time indicating the last time // this candidate was received func (c *candidateBase) LastReceived() time.Time { if lastReceived, ok := c.lastReceived.Load().(time.Time); ok { return lastReceived } return time.Time{} } func (c *candidateBase) setLastReceived(t time.Time) { c.lastReceived.Store(t) } // LastSent returns a time.Time indicating the last time // this candidate was sent func (c *candidateBase) LastSent() time.Time { if lastSent, ok := c.lastSent.Load().(time.Time); ok { return lastSent } return time.Time{} } func (c *candidateBase) setLastSent(t time.Time) { c.lastSent.Store(t) } func (c *candidateBase) seen(outbound bool) { if outbound { c.setLastSent(time.Now()) } else { c.setLastReceived(time.Now()) } } func (c *candidateBase) addr() net.Addr { return c.resolvedAddr } func (c *candidateBase) agent() *Agent { return c.currAgent } func (c *candidateBase) context() context.Context { return c } func (c *candidateBase) copy() (Candidate, error) { return UnmarshalCandidate(c.Marshal()) } // Marshal returns the string representation of the ICECandidate func (c *candidateBase) Marshal() string { val := c.Foundation() if val == " " { val = "" } val = fmt.Sprintf("%s %d %s %d %s %d typ %s", val, c.Component(), c.NetworkType().NetworkShort(), c.Priority(), c.Address(), c.Port(), c.Type()) if c.tcpType != TCPTypeUnspecified { val += fmt.Sprintf(" tcptype %s", c.tcpType.String()) } if r := c.RelatedAddress(); r != nil && r.Address != "" && r.Port != 0 { val = fmt.Sprintf("%s raddr %s rport %d", val, r.Address, r.Port) } return val } // UnmarshalCandidate creates a Candidate from its string representation func UnmarshalCandidate(raw string) (Candidate, error) { split := strings.Fields(raw) // Foundation not specified: not RFC 8445 compliant but seen in the wild if len(raw) != 0 && raw[0] == ' ' { split = append([]string{" "}, split...) } if len(split) < 8 { return nil, fmt.Errorf("%w (%d)", errAttributeTooShortICECandidate, len(split)) } // Foundation foundation := split[0] // Component rawComponent, err := strconv.ParseUint(split[1], 10, 16) if err != nil { return nil, fmt.Errorf("%w: %v", errParseComponent, err) } component := uint16(rawComponent) // Protocol protocol := split[2] // Priority priorityRaw, err := strconv.ParseUint(split[3], 10, 32) if err != nil { return nil, fmt.Errorf("%w: %v", errParsePriority, err) } priority := uint32(priorityRaw) // Address address := split[4] // Port rawPort, err := strconv.ParseUint(split[5], 10, 16) if err != nil { return nil, fmt.Errorf("%w: %v", errParsePort, err) } port := int(rawPort) typ := split[7] relatedAddress := "" relatedPort := 0 tcpType := TCPTypeUnspecified if len(split) > 8 { split = split[8:] if split[0] == "raddr" { if len(split) < 4 { return nil, fmt.Errorf("%w: incorrect length", errParseRelatedAddr) } // RelatedAddress relatedAddress = split[1] // RelatedPort rawRelatedPort, parseErr := strconv.ParseUint(split[3], 10, 16) if parseErr != nil { return nil, fmt.Errorf("%w: %v", errParsePort, parseErr) } relatedPort = int(rawRelatedPort) } else if split[0] == "tcptype" { if len(split) < 2 { return nil, fmt.Errorf("%w: incorrect length", errParseTCPType) } tcpType = NewTCPType(split[1]) } } switch typ { case "host": return NewCandidateHost(&CandidateHostConfig{"", protocol, address, port, component, priority, foundation, tcpType}) case "srflx": return NewCandidateServerReflexive(&CandidateServerReflexiveConfig{"", protocol, address, port, component, priority, foundation, relatedAddress, relatedPort}) case "prflx": return NewCandidatePeerReflexive(&CandidatePeerReflexiveConfig{"", protocol, address, port, component, priority, foundation, relatedAddress, relatedPort}) case "relay": return NewCandidateRelay(&CandidateRelayConfig{"", protocol, address, port, component, priority, foundation, relatedAddress, relatedPort, "", nil}) default: } return nil, fmt.Errorf("%w (%s)", ErrUnknownCandidateTyp, typ) } ice-2.3.1/candidate_host.go000066400000000000000000000032211437620344400155620ustar00rootroot00000000000000package ice import ( "net" "strings" ) // CandidateHost is a candidate of type host type CandidateHost struct { candidateBase network string } // CandidateHostConfig is the config required to create a new CandidateHost type CandidateHostConfig struct { CandidateID string Network string Address string Port int Component uint16 Priority uint32 Foundation string TCPType TCPType } // NewCandidateHost creates a new host candidate func NewCandidateHost(config *CandidateHostConfig) (*CandidateHost, error) { candidateID := config.CandidateID if candidateID == "" { candidateID = globalCandidateIDGenerator.Generate() } c := &CandidateHost{ candidateBase: candidateBase{ id: candidateID, address: config.Address, candidateType: CandidateTypeHost, component: config.Component, port: config.Port, tcpType: config.TCPType, foundationOverride: config.Foundation, priorityOverride: config.Priority, }, network: config.Network, } if !strings.HasSuffix(config.Address, ".local") { ip := net.ParseIP(config.Address) if ip == nil { return nil, ErrAddressParseFailed } if err := c.setIP(ip); err != nil { return nil, err } } else { // Until mDNS candidate is resolved assume it is UDPv4 c.candidateBase.networkType = NetworkTypeUDP4 } return c, nil } func (c *CandidateHost) setIP(ip net.IP) error { networkType, err := determineNetworkType(c.network, ip) if err != nil { return err } c.candidateBase.networkType = networkType c.candidateBase.resolvedAddr = createAddr(networkType, ip, c.port) return nil } ice-2.3.1/candidate_peer_reflexive.go000066400000000000000000000030341437620344400176130ustar00rootroot00000000000000// Package ice ... // //nolint:dupl package ice import "net" // CandidatePeerReflexive ... type CandidatePeerReflexive struct { candidateBase } // CandidatePeerReflexiveConfig is the config required to create a new CandidatePeerReflexive type CandidatePeerReflexiveConfig struct { CandidateID string Network string Address string Port int Component uint16 Priority uint32 Foundation string RelAddr string RelPort int } // NewCandidatePeerReflexive creates a new peer reflective candidate func NewCandidatePeerReflexive(config *CandidatePeerReflexiveConfig) (*CandidatePeerReflexive, error) { ip := net.ParseIP(config.Address) if ip == nil { return nil, ErrAddressParseFailed } networkType, err := determineNetworkType(config.Network, ip) if err != nil { return nil, err } candidateID := config.CandidateID candidateIDGenerator := newCandidateIDGenerator() if candidateID == "" { candidateID = candidateIDGenerator.Generate() } return &CandidatePeerReflexive{ candidateBase: candidateBase{ id: candidateID, networkType: networkType, candidateType: CandidateTypePeerReflexive, address: config.Address, port: config.Port, resolvedAddr: createAddr(networkType, ip, config.Port), component: config.Component, foundationOverride: config.Foundation, priorityOverride: config.Priority, relatedAddress: &CandidateRelatedAddress{ Address: config.RelAddr, Port: config.RelPort, }, }, }, nil } ice-2.3.1/candidate_relay.go000066400000000000000000000041161437620344400157250ustar00rootroot00000000000000package ice import ( "net" ) // CandidateRelay ... type CandidateRelay struct { candidateBase relayProtocol string onClose func() error } // CandidateRelayConfig is the config required to create a new CandidateRelay type CandidateRelayConfig struct { CandidateID string Network string Address string Port int Component uint16 Priority uint32 Foundation string RelAddr string RelPort int RelayProtocol string OnClose func() error } // NewCandidateRelay creates a new relay candidate func NewCandidateRelay(config *CandidateRelayConfig) (*CandidateRelay, error) { candidateID := config.CandidateID if candidateID == "" { candidateID = globalCandidateIDGenerator.Generate() } ip := net.ParseIP(config.Address) if ip == nil { return nil, ErrAddressParseFailed } networkType, err := determineNetworkType(config.Network, ip) if err != nil { return nil, err } return &CandidateRelay{ candidateBase: candidateBase{ id: candidateID, networkType: networkType, candidateType: CandidateTypeRelay, address: config.Address, port: config.Port, resolvedAddr: &net.UDPAddr{IP: ip, Port: config.Port}, component: config.Component, foundationOverride: config.Foundation, priorityOverride: config.Priority, relatedAddress: &CandidateRelatedAddress{ Address: config.RelAddr, Port: config.RelPort, }, }, relayProtocol: config.RelayProtocol, onClose: config.OnClose, }, nil } // RelayProtocol returns the protocol used between the endpoint and the relay server. func (c *CandidateRelay) RelayProtocol() string { return c.relayProtocol } func (c *CandidateRelay) close() error { err := c.candidateBase.close() if c.onClose != nil { err = c.onClose() c.onClose = nil } return err } func (c *CandidateRelay) copy() (Candidate, error) { cc, err := c.candidateBase.copy() if err != nil { return nil, err } if ccr, ok := cc.(*CandidateRelay); ok { ccr.relayProtocol = c.relayProtocol } return cc, nil } ice-2.3.1/candidate_relay_test.go000066400000000000000000000035431437620344400167670ustar00rootroot00000000000000//go:build !js // +build !js package ice import ( "net" "strconv" "testing" "time" "github.com/pion/transport/v2/test" "github.com/pion/turn/v2" "github.com/stretchr/testify/assert" ) func optimisticAuthHandler(username string, realm string, srcAddr net.Addr) (key []byte, ok bool) { return turn.GenerateAuthKey("username", "pion.ly", "password"), true } func TestRelayOnlyConnection(t *testing.T) { // Limit runtime in case of deadlocks lim := test.TimeOut(time.Second * 30) defer lim.Stop() report := test.CheckRoutines(t) defer report() serverPort := randomPort(t) serverListener, err := net.ListenPacket("udp", "127.0.0.1:"+strconv.Itoa(serverPort)) assert.NoError(t, err) server, err := turn.NewServer(turn.ServerConfig{ Realm: "pion.ly", AuthHandler: optimisticAuthHandler, PacketConnConfigs: []turn.PacketConnConfig{ { PacketConn: serverListener, RelayAddressGenerator: &turn.RelayAddressGeneratorNone{Address: "127.0.0.1"}, }, }, }) assert.NoError(t, err) cfg := &AgentConfig{ NetworkTypes: supportedNetworkTypes(), Urls: []*URL{ { Scheme: SchemeTypeTURN, Host: "127.0.0.1", Username: "username", Password: "password", Port: serverPort, Proto: ProtoTypeUDP, }, }, CandidateTypes: []CandidateType{CandidateTypeRelay}, } aAgent, err := NewAgent(cfg) if err != nil { t.Fatal(err) } aNotifier, aConnected := onConnected() if err = aAgent.OnConnectionStateChange(aNotifier); err != nil { t.Fatal(err) } bAgent, err := NewAgent(cfg) if err != nil { t.Fatal(err) } bNotifier, bConnected := onConnected() if err = bAgent.OnConnectionStateChange(bNotifier); err != nil { t.Fatal(err) } connect(aAgent, bAgent) <-aConnected <-bConnected assert.NoError(t, aAgent.Close()) assert.NoError(t, bAgent.Close()) assert.NoError(t, server.Close()) } ice-2.3.1/candidate_server_reflexive.go000066400000000000000000000027421437620344400201730ustar00rootroot00000000000000package ice import "net" // CandidateServerReflexive ... type CandidateServerReflexive struct { candidateBase } // CandidateServerReflexiveConfig is the config required to create a new CandidateServerReflexive type CandidateServerReflexiveConfig struct { CandidateID string Network string Address string Port int Component uint16 Priority uint32 Foundation string RelAddr string RelPort int } // NewCandidateServerReflexive creates a new server reflective candidate func NewCandidateServerReflexive(config *CandidateServerReflexiveConfig) (*CandidateServerReflexive, error) { ip := net.ParseIP(config.Address) if ip == nil { return nil, ErrAddressParseFailed } networkType, err := determineNetworkType(config.Network, ip) if err != nil { return nil, err } candidateID := config.CandidateID if candidateID == "" { candidateID = globalCandidateIDGenerator.Generate() } return &CandidateServerReflexive{ candidateBase: candidateBase{ id: candidateID, networkType: networkType, candidateType: CandidateTypeServerReflexive, address: config.Address, port: config.Port, resolvedAddr: &net.UDPAddr{IP: ip, Port: config.Port}, component: config.Component, foundationOverride: config.Foundation, priorityOverride: config.Priority, relatedAddress: &CandidateRelatedAddress{ Address: config.RelAddr, Port: config.RelPort, }, }, }, nil } ice-2.3.1/candidate_server_reflexive_test.go000066400000000000000000000031741437620344400212320ustar00rootroot00000000000000//go:build !js // +build !js package ice import ( "net" "strconv" "testing" "time" "github.com/pion/transport/v2/test" "github.com/pion/turn/v2" "github.com/stretchr/testify/assert" ) func TestServerReflexiveOnlyConnection(t *testing.T) { report := test.CheckRoutines(t) defer report() // Limit runtime in case of deadlocks lim := test.TimeOut(time.Second * 30) defer lim.Stop() serverPort := randomPort(t) serverListener, err := net.ListenPacket("udp4", "127.0.0.1:"+strconv.Itoa(serverPort)) assert.NoError(t, err) server, err := turn.NewServer(turn.ServerConfig{ Realm: "pion.ly", AuthHandler: optimisticAuthHandler, PacketConnConfigs: []turn.PacketConnConfig{ { PacketConn: serverListener, RelayAddressGenerator: &turn.RelayAddressGeneratorNone{Address: "127.0.0.1"}, }, }, }) assert.NoError(t, err) cfg := &AgentConfig{ NetworkTypes: []NetworkType{NetworkTypeUDP4}, Urls: []*URL{ { Scheme: SchemeTypeSTUN, Host: "127.0.0.1", Port: serverPort, }, }, CandidateTypes: []CandidateType{CandidateTypeServerReflexive}, } aAgent, err := NewAgent(cfg) if err != nil { t.Fatal(err) } aNotifier, aConnected := onConnected() if err = aAgent.OnConnectionStateChange(aNotifier); err != nil { t.Fatal(err) } bAgent, err := NewAgent(cfg) if err != nil { t.Fatal(err) } bNotifier, bConnected := onConnected() if err = bAgent.OnConnectionStateChange(bNotifier); err != nil { t.Fatal(err) } connect(aAgent, bAgent) <-aConnected <-bConnected assert.NoError(t, aAgent.Close()) assert.NoError(t, bAgent.Close()) assert.NoError(t, server.Close()) } ice-2.3.1/candidate_test.go000066400000000000000000000233111437620344400155660ustar00rootroot00000000000000package ice import ( "net" "testing" "time" "github.com/pion/logging" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestCandidatePriority(t *testing.T) { for _, test := range []struct { Candidate Candidate WantPriority uint32 }{ { Candidate: &CandidateHost{ candidateBase: candidateBase{ candidateType: CandidateTypeHost, component: ComponentRTP, }, }, WantPriority: 2130706431, }, { Candidate: &CandidateHost{ candidateBase: candidateBase{ candidateType: CandidateTypeHost, component: ComponentRTP, networkType: NetworkTypeTCP4, tcpType: TCPTypeActive, }, }, WantPriority: 2128609279, }, { Candidate: &CandidateHost{ candidateBase: candidateBase{ candidateType: CandidateTypeHost, component: ComponentRTP, networkType: NetworkTypeTCP4, tcpType: TCPTypePassive, }, }, WantPriority: 2124414975, }, { Candidate: &CandidateHost{ candidateBase: candidateBase{ candidateType: CandidateTypeHost, component: ComponentRTP, networkType: NetworkTypeTCP4, tcpType: TCPTypeSimultaneousOpen, }, }, WantPriority: 2120220671, }, { Candidate: &CandidatePeerReflexive{ candidateBase: candidateBase{ candidateType: CandidateTypePeerReflexive, component: ComponentRTP, }, }, WantPriority: 1862270975, }, { Candidate: &CandidatePeerReflexive{ candidateBase: candidateBase{ candidateType: CandidateTypePeerReflexive, component: ComponentRTP, networkType: NetworkTypeTCP6, tcpType: TCPTypeSimultaneousOpen, }, }, WantPriority: 1860173823, }, { Candidate: &CandidatePeerReflexive{ candidateBase: candidateBase{ candidateType: CandidateTypePeerReflexive, component: ComponentRTP, networkType: NetworkTypeTCP6, tcpType: TCPTypeActive, }, }, WantPriority: 1855979519, }, { Candidate: &CandidatePeerReflexive{ candidateBase: candidateBase{ candidateType: CandidateTypePeerReflexive, component: ComponentRTP, networkType: NetworkTypeTCP6, tcpType: TCPTypePassive, }, }, WantPriority: 1851785215, }, { Candidate: &CandidateServerReflexive{ candidateBase: candidateBase{ candidateType: CandidateTypeServerReflexive, component: ComponentRTP, }, }, WantPriority: 1694498815, }, { Candidate: &CandidateRelay{ candidateBase: candidateBase{ candidateType: CandidateTypeRelay, component: ComponentRTP, }, }, WantPriority: 16777215, }, } { if got, want := test.Candidate.Priority(), test.WantPriority; got != want { t.Fatalf("Candidate(%v).Priority() = %d, want %d", test.Candidate, got, want) } } } func TestCandidateLastSent(t *testing.T) { candidate := candidateBase{} assert.Equal(t, candidate.LastSent(), time.Time{}) now := time.Now() candidate.setLastSent(now) assert.Equal(t, candidate.LastSent(), now) } func TestCandidateLastReceived(t *testing.T) { candidate := candidateBase{} assert.Equal(t, candidate.LastReceived(), time.Time{}) now := time.Now() candidate.setLastReceived(now) assert.Equal(t, candidate.LastReceived(), now) } func TestCandidateFoundation(t *testing.T) { // All fields are the same assert.Equal(t, (&candidateBase{ candidateType: CandidateTypeHost, networkType: NetworkTypeUDP4, address: "A", }).Foundation(), (&candidateBase{ candidateType: CandidateTypeHost, networkType: NetworkTypeUDP4, address: "A", }).Foundation()) // Different Address assert.NotEqual(t, (&candidateBase{ candidateType: CandidateTypeHost, networkType: NetworkTypeUDP4, address: "A", }).Foundation(), (&candidateBase{ candidateType: CandidateTypeHost, networkType: NetworkTypeUDP4, address: "B", }).Foundation()) // Different networkType assert.NotEqual(t, (&candidateBase{ candidateType: CandidateTypeHost, networkType: NetworkTypeUDP4, address: "A", }).Foundation(), (&candidateBase{ candidateType: CandidateTypeHost, networkType: NetworkTypeUDP6, address: "A", }).Foundation()) // Different candidateType assert.NotEqual(t, (&candidateBase{ candidateType: CandidateTypeHost, networkType: NetworkTypeUDP4, address: "A", }).Foundation(), (&candidateBase{ candidateType: CandidateTypePeerReflexive, networkType: NetworkTypeUDP4, address: "A", }).Foundation()) // Port has no effect assert.Equal(t, (&candidateBase{ candidateType: CandidateTypeHost, networkType: NetworkTypeUDP4, address: "A", port: 8080, }).Foundation(), (&candidateBase{ candidateType: CandidateTypeHost, networkType: NetworkTypeUDP4, address: "A", port: 80, }).Foundation()) } func TestCandidateMarshal(t *testing.T) { for _, test := range []struct { candidate Candidate marshaled string expectError bool }{ { &CandidateHost{ candidateBase{ networkType: NetworkTypeUDP6, candidateType: CandidateTypeHost, address: "fcd9:e3b8:12ce:9fc5:74a5:c6bb:d8b:e08a", port: 53987, priorityOverride: 500, foundationOverride: "750", }, "", }, "750 1 udp 500 fcd9:e3b8:12ce:9fc5:74a5:c6bb:d8b:e08a 53987 typ host", false, }, { &CandidateHost{ candidateBase{ networkType: NetworkTypeUDP4, candidateType: CandidateTypeHost, address: "10.0.75.1", port: 53634, }, "", }, "4273957277 1 udp 2130706431 10.0.75.1 53634 typ host", false, }, { &CandidateServerReflexive{ candidateBase{ networkType: NetworkTypeUDP4, candidateType: CandidateTypeServerReflexive, address: "191.228.238.68", port: 53991, relatedAddress: &CandidateRelatedAddress{"192.168.0.274", 53991}, }, }, "647372371 1 udp 1694498815 191.228.238.68 53991 typ srflx raddr 192.168.0.274 rport 53991", false, }, { &CandidateRelay{ candidateBase{ networkType: NetworkTypeUDP4, candidateType: CandidateTypeRelay, address: "50.0.0.1", port: 5000, relatedAddress: &CandidateRelatedAddress{"192.168.0.1", 5001}, }, "", nil, }, "848194626 1 udp 16777215 50.0.0.1 5000 typ relay raddr 192.168.0.1 rport 5001", false, }, { &CandidateHost{ candidateBase{ networkType: NetworkTypeTCP4, candidateType: CandidateTypeHost, address: "192.168.0.196", port: 0, tcpType: TCPTypeActive, }, "", }, "1052353102 1 tcp 2128609279 192.168.0.196 0 typ host tcptype active", false, }, { &CandidateHost{ candidateBase{ networkType: NetworkTypeUDP4, candidateType: CandidateTypeHost, address: "e2494022-4d9a-4c1e-a750-cc48d4f8d6ee.local", port: 60542, }, "", }, "1380287402 1 udp 2130706431 e2494022-4d9a-4c1e-a750-cc48d4f8d6ee.local 60542 typ host", false, }, // Missing Foundation { &CandidateHost{ candidateBase{ networkType: NetworkTypeUDP4, candidateType: CandidateTypeHost, address: "127.0.0.1", port: 80, priorityOverride: 500, foundationOverride: " ", }, "", }, " 1 udp 500 127.0.0.1 80 typ host", false, }, // Invalid candidates {nil, "", true}, {nil, "1938809241", true}, {nil, "1986380506 99999999 udp 2122063615 10.0.75.1 53634 typ host generation 0 network-id 2", true}, {nil, "1986380506 1 udp 99999999999 10.0.75.1 53634 typ host", true}, {nil, "4207374051 1 udp 1685790463 191.228.238.68 99999999 typ srflx raddr 192.168.0.278 rport 53991 generation 0 network-id 3", true}, {nil, "4207374051 1 udp 1685790463 191.228.238.68 53991 typ srflx raddr", true}, {nil, "4207374051 1 udp 1685790463 191.228.238.68 53991 typ srflx raddr 192.168.0.278 rport 99999999 generation 0 network-id 3", true}, {nil, "4207374051 INVALID udp 2130706431 10.0.75.1 53634 typ host", true}, {nil, "4207374051 1 udp INVALID 10.0.75.1 53634 typ host", true}, {nil, "4207374051 INVALID udp 2130706431 10.0.75.1 INVALID typ host", true}, {nil, "4207374051 1 udp 2130706431 10.0.75.1 53634 typ INVALID", true}, {nil, "4207374051 1 INVALID 2130706431 10.0.75.1 53634 typ host", true}, } { actualCandidate, err := UnmarshalCandidate(test.marshaled) if test.expectError { assert.Error(t, err) continue } assert.NoError(t, err) assert.True(t, test.candidate.Equal(actualCandidate)) assert.Equal(t, test.marshaled, actualCandidate.Marshal()) } } func TestCandidateWriteTo(t *testing.T) { listener, err := net.ListenTCP("tcp", &net.TCPAddr{ IP: net.IP{127, 0, 0, 1}, Port: 0, }) require.NoError(t, err, "error creating test tcp listener") conn, err := net.DialTCP("tcp", nil, listener.Addr().(*net.TCPAddr)) require.NoError(t, err, "error dialing test tcp conn") loggerFactory := logging.NewDefaultLoggerFactory() packetConn := newTCPPacketConn(tcpPacketParams{ ReadBuffer: 2048, Logger: loggerFactory.NewLogger("tcp-packet-conn"), }) err = packetConn.AddConn(conn, nil) require.NoError(t, err, "error adding test tcp conn to packet conn") c1 := &candidateBase{ conn: packetConn, currAgent: &Agent{ log: loggerFactory.NewLogger("agent"), }, } c2 := &candidateBase{ resolvedAddr: listener.Addr(), } _, err = c1.writeTo([]byte("test"), c2) assert.NoError(t, err, "writing to open conn") err = packetConn.Close() require.NoError(t, err, "error closing test tcp conn") _, err = c1.writeTo([]byte("test"), c2) assert.Error(t, err, "writing to closed conn") } ice-2.3.1/candidatepair.go000066400000000000000000000045431437620344400154110ustar00rootroot00000000000000package ice import ( "fmt" "github.com/pion/stun" ) func newCandidatePair(local, remote Candidate, controlling bool) *CandidatePair { return &CandidatePair{ iceRoleControlling: controlling, Remote: remote, Local: local, state: CandidatePairStateWaiting, } } // CandidatePair is a combination of a // local and remote candidate type CandidatePair struct { iceRoleControlling bool Remote Candidate Local Candidate bindingRequestCount uint16 state CandidatePairState nominated bool nominateOnBindingSuccess bool } func (p *CandidatePair) String() string { if p == nil { return "" } return fmt.Sprintf("prio %d (local, prio %d) %s <-> %s (remote, prio %d)", p.priority(), p.Local.Priority(), p.Local, p.Remote, p.Remote.Priority()) } func (p *CandidatePair) equal(other *CandidatePair) bool { if p == nil && other == nil { return true } if p == nil || other == nil { return false } return p.Local.Equal(other.Local) && p.Remote.Equal(other.Remote) } // RFC 5245 - 5.7.2. Computing Pair Priority and Ordering Pairs // Let G be the priority for the candidate provided by the controlling // agent. Let D be the priority for the candidate provided by the // controlled agent. // pair priority = 2^32*MIN(G,D) + 2*MAX(G,D) + (G>D?1:0) func (p *CandidatePair) priority() uint64 { var g, d uint32 if p.iceRoleControlling { g = p.Local.Priority() d = p.Remote.Priority() } else { g = p.Remote.Priority() d = p.Local.Priority() } // Just implement these here rather // than fooling around with the math package min := func(x, y uint32) uint64 { if x < y { return uint64(x) } return uint64(y) } max := func(x, y uint32) uint64 { if x > y { return uint64(x) } return uint64(y) } cmp := func(x, y uint32) uint64 { if x > y { return uint64(1) } return uint64(0) } // 1<<32 overflows uint32; and if both g && d are // maxUint32, this result would overflow uint64 return (1<<32-1)*min(g, d) + 2*max(g, d) + cmp(g, d) } func (p *CandidatePair) Write(b []byte) (int, error) { return p.Local.writeTo(b, p.Remote) } func (a *Agent) sendSTUN(msg *stun.Message, local, remote Candidate) { _, err := local.writeTo(msg.Raw, remote) if err != nil { a.log.Tracef("failed to send STUN message: %s", err) } } ice-2.3.1/candidatepair_state.go000066400000000000000000000020351437620344400166030ustar00rootroot00000000000000package ice // CandidatePairState represent the ICE candidate pair state type CandidatePairState int const ( // CandidatePairStateWaiting means a check has not been performed for // this pair CandidatePairStateWaiting = iota + 1 // CandidatePairStateInProgress means a check has been sent for this pair, // but the transaction is in progress. CandidatePairStateInProgress // CandidatePairStateFailed means a check for this pair was already done // and failed, either never producing any response or producing an unrecoverable // failure response. CandidatePairStateFailed // CandidatePairStateSucceeded means a check for this pair was already // done and produced a successful result. CandidatePairStateSucceeded ) func (c CandidatePairState) String() string { switch c { case CandidatePairStateWaiting: return "waiting" case CandidatePairStateInProgress: return "in-progress" case CandidatePairStateFailed: return "failed" case CandidatePairStateSucceeded: return "succeeded" } return "Unknown candidate pair state" } ice-2.3.1/candidatepair_test.go000066400000000000000000000050571437620344400164510ustar00rootroot00000000000000package ice import ( "testing" "github.com/stretchr/testify/assert" ) func hostCandidate() *CandidateHost { return &CandidateHost{ candidateBase: candidateBase{ candidateType: CandidateTypeHost, component: ComponentRTP, }, } } func prflxCandidate() *CandidatePeerReflexive { return &CandidatePeerReflexive{ candidateBase: candidateBase{ candidateType: CandidateTypePeerReflexive, component: ComponentRTP, }, } } func srflxCandidate() *CandidateServerReflexive { return &CandidateServerReflexive{ candidateBase: candidateBase{ candidateType: CandidateTypeServerReflexive, component: ComponentRTP, }, } } func relayCandidate() *CandidateRelay { return &CandidateRelay{ candidateBase: candidateBase{ candidateType: CandidateTypeRelay, component: ComponentRTP, }, } } func TestCandidatePairPriority(t *testing.T) { for _, test := range []struct { Pair *CandidatePair WantPriority uint64 }{ { Pair: newCandidatePair( hostCandidate(), hostCandidate(), false, ), WantPriority: 9151314440652587007, }, { Pair: newCandidatePair( hostCandidate(), hostCandidate(), true, ), WantPriority: 9151314440652587007, }, { Pair: newCandidatePair( hostCandidate(), prflxCandidate(), true, ), WantPriority: 7998392936314175488, }, { Pair: newCandidatePair( hostCandidate(), prflxCandidate(), false, ), WantPriority: 7998392936314175487, }, { Pair: newCandidatePair( hostCandidate(), srflxCandidate(), true, ), WantPriority: 7277816996102668288, }, { Pair: newCandidatePair( hostCandidate(), srflxCandidate(), false, ), WantPriority: 7277816996102668287, }, { Pair: newCandidatePair( hostCandidate(), relayCandidate(), true, ), WantPriority: 72057593987596288, }, { Pair: newCandidatePair( hostCandidate(), relayCandidate(), false, ), WantPriority: 72057593987596287, }, } { if got, want := test.Pair.priority(), test.WantPriority; got != want { t.Fatalf("CandidatePair(%v).Priority() = %d, want %d", test.Pair, got, want) } } } func TestCandidatePairEquality(t *testing.T) { pairA := newCandidatePair(hostCandidate(), srflxCandidate(), true) pairB := newCandidatePair(hostCandidate(), srflxCandidate(), false) if !pairA.equal(pairB) { t.Fatalf("Expected %v to equal %v", pairA, pairB) } } func TestNilCandidatePairString(t *testing.T) { var nilCandidatePair *CandidatePair assert.Equal(t, nilCandidatePair.String(), "") } ice-2.3.1/candidaterelatedaddress.go000066400000000000000000000013531437620344400174400ustar00rootroot00000000000000package ice import "fmt" // CandidateRelatedAddress convey transport addresses related to the // candidate, useful for diagnostics and other purposes. type CandidateRelatedAddress struct { Address string Port int } // String makes CandidateRelatedAddress printable func (c *CandidateRelatedAddress) String() string { if c == nil { return "" } return fmt.Sprintf(" related %s:%d", c.Address, c.Port) } // Equal allows comparing two CandidateRelatedAddresses. // The CandidateRelatedAddress are allowed to be nil. func (c *CandidateRelatedAddress) Equal(other *CandidateRelatedAddress) bool { if c == nil && other == nil { return true } return c != nil && other != nil && c.Address == other.Address && c.Port == other.Port } ice-2.3.1/candidatetype.go000066400000000000000000000026721437620344400154400ustar00rootroot00000000000000package ice // CandidateType represents the type of candidate type CandidateType byte // CandidateType enum const ( CandidateTypeUnspecified CandidateType = iota CandidateTypeHost CandidateTypeServerReflexive CandidateTypePeerReflexive CandidateTypeRelay ) // String makes CandidateType printable func (c CandidateType) String() string { switch c { case CandidateTypeHost: return "host" case CandidateTypeServerReflexive: return "srflx" case CandidateTypePeerReflexive: return "prflx" case CandidateTypeRelay: return "relay" case CandidateTypeUnspecified: return "Unknown candidate type" } return "Unknown candidate type" } // Preference returns the preference weight of a CandidateType // // 4.1.2.2. Guidelines for Choosing Type and Local Preferences // The RECOMMENDED values are 126 for host candidates, 100 // for server reflexive candidates, 110 for peer reflexive candidates, // and 0 for relayed candidates. func (c CandidateType) Preference() uint16 { switch c { case CandidateTypeHost: return 126 case CandidateTypePeerReflexive: return 110 case CandidateTypeServerReflexive: return 100 case CandidateTypeRelay, CandidateTypeUnspecified: return 0 } return 0 } func containsCandidateType(candidateType CandidateType, candidateTypeList []CandidateType) bool { if candidateTypeList == nil { return false } for _, ct := range candidateTypeList { if ct == candidateType { return true } } return false } ice-2.3.1/codecov.yml000066400000000000000000000005521437620344400144330ustar00rootroot00000000000000# # DO NOT EDIT THIS FILE # # It is automatically copied from https://github.com/pion/.goassets repository. # coverage: status: project: default: # Allow decreasing 2% of total coverage to avoid noise. threshold: 2% patch: default: target: 70% only_pulls: true ignore: - "examples/*" - "examples/**/*" ice-2.3.1/connectivity_vnet_test.go000066400000000000000000000371341437620344400174340ustar00rootroot00000000000000//go:build !js // +build !js package ice import ( "context" "fmt" "net" "sync/atomic" "testing" "time" "github.com/pion/logging" "github.com/pion/stun" "github.com/pion/transport/v2/test" "github.com/pion/transport/v2/vnet" "github.com/pion/turn/v2" "github.com/stretchr/testify/assert" ) const ( vnetGlobalIPA = "27.1.1.1" vnetLocalIPA = "192.168.0.1" vnetLocalSubnetMaskA = "24" vnetGlobalIPB = "28.1.1.1" vnetLocalIPB = "10.2.0.1" vnetLocalSubnetMaskB = "24" vnetSTUNServerIP = "1.2.3.4" vnetSTUNServerPort = 3478 ) type virtualNet struct { wan *vnet.Router net0 *vnet.Net net1 *vnet.Net server *turn.Server } func (v *virtualNet) close() { v.server.Close() // nolint:errcheck,gosec v.wan.Stop() // nolint:errcheck,gosec } func buildVNet(natType0, natType1 *vnet.NATType) (*virtualNet, error) { loggerFactory := logging.NewDefaultLoggerFactory() // WAN wan, err := vnet.NewRouter(&vnet.RouterConfig{ CIDR: "0.0.0.0/0", LoggerFactory: loggerFactory, }) if err != nil { return nil, err } wanNet, err := vnet.NewNet(&vnet.NetConfig{ StaticIP: vnetSTUNServerIP, // will be assigned to eth0 }) if err != nil { return nil, err } err = wan.AddNet(wanNet) if err != nil { return nil, err } // LAN 0 lan0, err := vnet.NewRouter(&vnet.RouterConfig{ StaticIPs: func() []string { if natType0.Mode == vnet.NATModeNAT1To1 { return []string{ vnetGlobalIPA + "/" + vnetLocalIPA, } } return []string{ vnetGlobalIPA, } }(), CIDR: vnetLocalIPA + "/" + vnetLocalSubnetMaskA, NATType: natType0, LoggerFactory: loggerFactory, }) if err != nil { return nil, err } net0, err := vnet.NewNet(&vnet.NetConfig{ StaticIPs: []string{vnetLocalIPA}, }) if err != nil { return nil, err } err = lan0.AddNet(net0) if err != nil { return nil, err } err = wan.AddRouter(lan0) if err != nil { return nil, err } // LAN 1 lan1, err := vnet.NewRouter(&vnet.RouterConfig{ StaticIPs: func() []string { if natType1.Mode == vnet.NATModeNAT1To1 { return []string{ vnetGlobalIPB + "/" + vnetLocalIPB, } } return []string{ vnetGlobalIPB, } }(), CIDR: vnetLocalIPB + "/" + vnetLocalSubnetMaskB, NATType: natType1, LoggerFactory: loggerFactory, }) if err != nil { return nil, err } net1, err := vnet.NewNet(&vnet.NetConfig{ StaticIPs: []string{vnetLocalIPB}, }) if err != nil { return nil, err } err = lan1.AddNet(net1) if err != nil { return nil, err } err = wan.AddRouter(lan1) if err != nil { return nil, err } // Start routers err = wan.Start() if err != nil { return nil, err } server, err := addVNetSTUN(wanNet, loggerFactory) if err != nil { return nil, err } return &virtualNet{ wan: wan, net0: net0, net1: net1, server: server, }, nil } func addVNetSTUN(wanNet *vnet.Net, loggerFactory logging.LoggerFactory) (*turn.Server, error) { // Run TURN(STUN) server credMap := map[string]string{} credMap["user"] = "pass" wanNetPacketConn, err := wanNet.ListenPacket("udp", fmt.Sprintf("%s:%d", vnetSTUNServerIP, vnetSTUNServerPort)) if err != nil { return nil, err } server, err := turn.NewServer(turn.ServerConfig{ AuthHandler: func(username, realm string, srcAddr net.Addr) (key []byte, ok bool) { if pw, ok := credMap[username]; ok { return turn.GenerateAuthKey(username, realm, pw), true } return nil, false }, PacketConnConfigs: []turn.PacketConnConfig{ { PacketConn: wanNetPacketConn, RelayAddressGenerator: &turn.RelayAddressGeneratorStatic{ RelayAddress: net.ParseIP(vnetSTUNServerIP), Address: "0.0.0.0", Net: wanNet, }, }, }, Realm: "pion.ly", LoggerFactory: loggerFactory, }) if err != nil { return nil, err } return server, err } func connectWithVNet(aAgent, bAgent *Agent) (*Conn, *Conn) { // Manual signaling aUfrag, aPwd, err := aAgent.GetLocalUserCredentials() check(err) bUfrag, bPwd, err := bAgent.GetLocalUserCredentials() check(err) gatherAndExchangeCandidates(aAgent, bAgent) accepted := make(chan struct{}) var aConn *Conn go func() { var acceptErr error aConn, acceptErr = aAgent.Accept(context.TODO(), bUfrag, bPwd) check(acceptErr) close(accepted) }() bConn, err := bAgent.Dial(context.TODO(), aUfrag, aPwd) check(err) // Ensure accepted <-accepted return aConn, bConn } type agentTestConfig struct { urls []*URL nat1To1IPCandidateType CandidateType } func pipeWithVNet(v *virtualNet, a0TestConfig, a1TestConfig *agentTestConfig) (*Conn, *Conn) { aNotifier, aConnected := onConnected() bNotifier, bConnected := onConnected() var nat1To1IPs []string if a0TestConfig.nat1To1IPCandidateType != CandidateTypeUnspecified { nat1To1IPs = []string{ vnetGlobalIPA, } } cfg0 := &AgentConfig{ Urls: a0TestConfig.urls, NetworkTypes: supportedNetworkTypes(), MulticastDNSMode: MulticastDNSModeDisabled, NAT1To1IPs: nat1To1IPs, NAT1To1IPCandidateType: a0TestConfig.nat1To1IPCandidateType, Net: v.net0, } aAgent, err := NewAgent(cfg0) if err != nil { panic(err) } err = aAgent.OnConnectionStateChange(aNotifier) if err != nil { panic(err) } if a1TestConfig.nat1To1IPCandidateType != CandidateTypeUnspecified { nat1To1IPs = []string{ vnetGlobalIPB, } } cfg1 := &AgentConfig{ Urls: a1TestConfig.urls, NetworkTypes: supportedNetworkTypes(), MulticastDNSMode: MulticastDNSModeDisabled, NAT1To1IPs: nat1To1IPs, NAT1To1IPCandidateType: a1TestConfig.nat1To1IPCandidateType, Net: v.net1, } bAgent, err := NewAgent(cfg1) if err != nil { panic(err) } err = bAgent.OnConnectionStateChange(bNotifier) if err != nil { panic(err) } aConn, bConn := connectWithVNet(aAgent, bAgent) // Ensure pair selected // Note: this assumes ConnectionStateConnected is thrown after selecting the final pair <-aConnected <-bConnected return aConn, bConn } func closePipe(t *testing.T, ca *Conn, cb *Conn) bool { err := ca.Close() if !assert.NoError(t, err, "should succeed") { return false } err = cb.Close() return assert.NoError(t, err, "should succeed") } func TestConnectivityVNet(t *testing.T) { report := test.CheckRoutines(t) defer report() stunServerURL := &URL{ Scheme: SchemeTypeSTUN, Host: vnetSTUNServerIP, Port: vnetSTUNServerPort, Proto: ProtoTypeUDP, } turnServerURL := &URL{ Scheme: SchemeTypeTURN, Host: vnetSTUNServerIP, Port: vnetSTUNServerPort, Username: "user", Password: "pass", Proto: ProtoTypeUDP, } t.Run("Full-cone NATs on both ends", func(t *testing.T) { loggerFactory := logging.NewDefaultLoggerFactory() log := loggerFactory.NewLogger("test") // buildVNet with a Full-cone NATs both LANs natType := &vnet.NATType{ MappingBehavior: vnet.EndpointIndependent, FilteringBehavior: vnet.EndpointIndependent, } v, err := buildVNet(natType, natType) if !assert.NoError(t, err, "should succeed") { return } defer v.close() log.Debug("Connecting...") a0TestConfig := &agentTestConfig{ urls: []*URL{ stunServerURL, }, } a1TestConfig := &agentTestConfig{ urls: []*URL{ stunServerURL, }, } ca, cb := pipeWithVNet(v, a0TestConfig, a1TestConfig) time.Sleep(1 * time.Second) log.Debug("Closing...") if !closePipe(t, ca, cb) { return } }) t.Run("Symmetric NATs on both ends", func(t *testing.T) { loggerFactory := logging.NewDefaultLoggerFactory() log := loggerFactory.NewLogger("test") // buildVNet with a Symmetric NATs for both LANs natType := &vnet.NATType{ MappingBehavior: vnet.EndpointAddrPortDependent, FilteringBehavior: vnet.EndpointAddrPortDependent, } v, err := buildVNet(natType, natType) if !assert.NoError(t, err, "should succeed") { return } defer v.close() log.Debug("Connecting...") a0TestConfig := &agentTestConfig{ urls: []*URL{ stunServerURL, turnServerURL, }, } a1TestConfig := &agentTestConfig{ urls: []*URL{ stunServerURL, }, } ca, cb := pipeWithVNet(v, a0TestConfig, a1TestConfig) log.Debug("Closing...") if !closePipe(t, ca, cb) { return } }) t.Run("1:1 NAT with host candidate vs Symmetric NATs", func(t *testing.T) { loggerFactory := logging.NewDefaultLoggerFactory() log := loggerFactory.NewLogger("test") // Agent0 is behind 1:1 NAT natType0 := &vnet.NATType{ Mode: vnet.NATModeNAT1To1, } // Agent1 is behind a symmetric NAT natType1 := &vnet.NATType{ MappingBehavior: vnet.EndpointAddrPortDependent, FilteringBehavior: vnet.EndpointAddrPortDependent, } v, err := buildVNet(natType0, natType1) if !assert.NoError(t, err, "should succeed") { return } defer v.close() log.Debug("Connecting...") a0TestConfig := &agentTestConfig{ urls: []*URL{}, nat1To1IPCandidateType: CandidateTypeHost, // Use 1:1 NAT IP as a host candidate } a1TestConfig := &agentTestConfig{ urls: []*URL{}, } ca, cb := pipeWithVNet(v, a0TestConfig, a1TestConfig) log.Debug("Closing...") if !closePipe(t, ca, cb) { return } }) t.Run("1:1 NAT with srflx candidate vs Symmetric NATs", func(t *testing.T) { loggerFactory := logging.NewDefaultLoggerFactory() log := loggerFactory.NewLogger("test") // Agent0 is behind 1:1 NAT natType0 := &vnet.NATType{ Mode: vnet.NATModeNAT1To1, } // Agent1 is behind a symmetric NAT natType1 := &vnet.NATType{ MappingBehavior: vnet.EndpointAddrPortDependent, FilteringBehavior: vnet.EndpointAddrPortDependent, } v, err := buildVNet(natType0, natType1) if !assert.NoError(t, err, "should succeed") { return } defer v.close() log.Debug("Connecting...") a0TestConfig := &agentTestConfig{ urls: []*URL{}, nat1To1IPCandidateType: CandidateTypeServerReflexive, // Use 1:1 NAT IP as a srflx candidate } a1TestConfig := &agentTestConfig{ urls: []*URL{}, } ca, cb := pipeWithVNet(v, a0TestConfig, a1TestConfig) log.Debug("Closing...") if !closePipe(t, ca, cb) { return } }) } // TestDisconnectedToConnected asserts that an agent can go to disconnected, and then return to connected successfully func TestDisconnectedToConnected(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 10) defer lim.Stop() loggerFactory := logging.NewDefaultLoggerFactory() // Create a network with two interfaces wan, err := vnet.NewRouter(&vnet.RouterConfig{ CIDR: "0.0.0.0/0", LoggerFactory: loggerFactory, }) assert.NoError(t, err) var dropAllData uint64 wan.AddChunkFilter(func(vnet.Chunk) bool { return atomic.LoadUint64(&dropAllData) != 1 }) net0, err := vnet.NewNet(&vnet.NetConfig{ StaticIPs: []string{"192.168.0.1"}, }) assert.NoError(t, err) assert.NoError(t, wan.AddNet(net0)) net1, err := vnet.NewNet(&vnet.NetConfig{ StaticIPs: []string{"192.168.0.2"}, }) assert.NoError(t, err) assert.NoError(t, wan.AddNet(net1)) assert.NoError(t, wan.Start()) disconnectTimeout := time.Second keepaliveInterval := time.Millisecond * 20 // Create two agents and connect them controllingAgent, err := NewAgent(&AgentConfig{ NetworkTypes: supportedNetworkTypes(), MulticastDNSMode: MulticastDNSModeDisabled, Net: net0, DisconnectedTimeout: &disconnectTimeout, KeepaliveInterval: &keepaliveInterval, CheckInterval: &keepaliveInterval, }) assert.NoError(t, err) controlledAgent, err := NewAgent(&AgentConfig{ NetworkTypes: supportedNetworkTypes(), MulticastDNSMode: MulticastDNSModeDisabled, Net: net1, DisconnectedTimeout: &disconnectTimeout, KeepaliveInterval: &keepaliveInterval, CheckInterval: &keepaliveInterval, }) assert.NoError(t, err) controllingStateChanges := make(chan ConnectionState, 100) assert.NoError(t, controllingAgent.OnConnectionStateChange(func(c ConnectionState) { controllingStateChanges <- c })) controlledStateChanges := make(chan ConnectionState, 100) assert.NoError(t, controlledAgent.OnConnectionStateChange(func(c ConnectionState) { controlledStateChanges <- c })) connectWithVNet(controllingAgent, controlledAgent) blockUntilStateSeen := func(expectedState ConnectionState, stateQueue chan ConnectionState) { for s := range stateQueue { if s == expectedState { return } } } // Assert we have gone to connected blockUntilStateSeen(ConnectionStateConnected, controllingStateChanges) blockUntilStateSeen(ConnectionStateConnected, controlledStateChanges) // Drop all packets, and block until we have gone to disconnected atomic.StoreUint64(&dropAllData, 1) blockUntilStateSeen(ConnectionStateDisconnected, controllingStateChanges) blockUntilStateSeen(ConnectionStateDisconnected, controlledStateChanges) // Allow all packets through again, block until we have gone to connected atomic.StoreUint64(&dropAllData, 0) blockUntilStateSeen(ConnectionStateConnected, controllingStateChanges) blockUntilStateSeen(ConnectionStateConnected, controlledStateChanges) assert.NoError(t, wan.Stop()) assert.NoError(t, controllingAgent.Close()) assert.NoError(t, controlledAgent.Close()) } // Agent.Write should use the best valid pair if a selected pair is not yet available func TestWriteUseValidPair(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 10) defer lim.Stop() loggerFactory := logging.NewDefaultLoggerFactory() // Create a network with two interfaces wan, err := vnet.NewRouter(&vnet.RouterConfig{ CIDR: "0.0.0.0/0", LoggerFactory: loggerFactory, }) assert.NoError(t, err) wan.AddChunkFilter(func(c vnet.Chunk) bool { if stun.IsMessage(c.UserData()) { m := &stun.Message{ Raw: c.UserData(), } if decErr := m.Decode(); decErr != nil { return false } else if m.Contains(stun.AttrUseCandidate) { return false } } return true }) net0, err := vnet.NewNet(&vnet.NetConfig{ StaticIPs: []string{"192.168.0.1"}, }) assert.NoError(t, err) assert.NoError(t, wan.AddNet(net0)) net1, err := vnet.NewNet(&vnet.NetConfig{ StaticIPs: []string{"192.168.0.2"}, }) assert.NoError(t, err) assert.NoError(t, wan.AddNet(net1)) assert.NoError(t, wan.Start()) // Create two agents and connect them controllingAgent, err := NewAgent(&AgentConfig{ NetworkTypes: supportedNetworkTypes(), MulticastDNSMode: MulticastDNSModeDisabled, Net: net0, }) assert.NoError(t, err) controlledAgent, err := NewAgent(&AgentConfig{ NetworkTypes: supportedNetworkTypes(), MulticastDNSMode: MulticastDNSModeDisabled, Net: net1, }) assert.NoError(t, err) gatherAndExchangeCandidates(controllingAgent, controlledAgent) controllingUfrag, controllingPwd, err := controllingAgent.GetLocalUserCredentials() assert.NoError(t, err) controlledUfrag, controlledPwd, err := controlledAgent.GetLocalUserCredentials() assert.NoError(t, err) assert.NoError(t, controllingAgent.startConnectivityChecks(true, controlledUfrag, controlledPwd)) assert.NoError(t, controlledAgent.startConnectivityChecks(false, controllingUfrag, controllingPwd)) testMessage := []byte("Test Message") go func() { for { if _, writeErr := (&Conn{agent: controllingAgent}).Write(testMessage); writeErr != nil { return } time.Sleep(20 * time.Millisecond) } }() readBuf := make([]byte, len(testMessage)) _, err = (&Conn{agent: controlledAgent}).Read(readBuf) assert.NoError(t, err) assert.Equal(t, readBuf, testMessage) assert.NoError(t, wan.Stop()) assert.NoError(t, controllingAgent.Close()) assert.NoError(t, controlledAgent.Close()) } ice-2.3.1/context.go000066400000000000000000000012351437620344400143000ustar00rootroot00000000000000package ice import ( "context" "time" ) func (a *Agent) context() context.Context { return agentContext(a.done) } type agentContext chan struct{} // Done implements context.Context func (a agentContext) Done() <-chan struct{} { return (chan struct{})(a) } // Err implements context.Context func (a agentContext) Err() error { select { case <-(chan struct{})(a): return ErrRunCanceled default: return nil } } // Deadline implements context.Context func (a agentContext) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false } // Value implements context.Context func (a agentContext) Value(key interface{}) interface{} { return nil } ice-2.3.1/errors.go000066400000000000000000000167311437620344400141370ustar00rootroot00000000000000package ice import "errors" var ( // ErrUnknownType indicates an error with Unknown info. ErrUnknownType = errors.New("Unknown") // ErrSchemeType indicates the scheme type could not be parsed. ErrSchemeType = errors.New("unknown scheme type") // ErrSTUNQuery indicates query arguments are provided in a STUN URL. ErrSTUNQuery = errors.New("queries not supported in stun address") // ErrInvalidQuery indicates an malformed query is provided. ErrInvalidQuery = errors.New("invalid query") // ErrHost indicates malformed hostname is provided. ErrHost = errors.New("invalid hostname") // ErrPort indicates malformed port is provided. ErrPort = errors.New("invalid port") // ErrLocalUfragInsufficientBits indicates local username fragment insufficient bits are provided. // Have to be at least 24 bits long ErrLocalUfragInsufficientBits = errors.New("local username fragment is less than 24 bits long") // ErrLocalPwdInsufficientBits indicates local password insufficient bits are provided. // Have to be at least 128 bits long ErrLocalPwdInsufficientBits = errors.New("local password is less than 128 bits long") // ErrProtoType indicates an unsupported transport type was provided. ErrProtoType = errors.New("invalid transport protocol type") // ErrClosed indicates the agent is closed ErrClosed = errors.New("the agent is closed") // ErrNoCandidatePairs indicates agent does not have a valid candidate pair ErrNoCandidatePairs = errors.New("no candidate pairs available") // ErrCanceledByCaller indicates agent connection was canceled by the caller ErrCanceledByCaller = errors.New("connecting canceled by caller") // ErrMultipleStart indicates agent was started twice ErrMultipleStart = errors.New("attempted to start agent twice") // ErrRemoteUfragEmpty indicates agent was started with an empty remote ufrag ErrRemoteUfragEmpty = errors.New("remote ufrag is empty") // ErrRemotePwdEmpty indicates agent was started with an empty remote pwd ErrRemotePwdEmpty = errors.New("remote pwd is empty") // ErrNoOnCandidateHandler indicates agent was started without OnCandidate ErrNoOnCandidateHandler = errors.New("no OnCandidate provided") // ErrMultipleGatherAttempted indicates GatherCandidates has been called multiple times ErrMultipleGatherAttempted = errors.New("attempting to gather candidates during gathering state") // ErrUsernameEmpty indicates agent was give TURN URL with an empty Username ErrUsernameEmpty = errors.New("username is empty") // ErrPasswordEmpty indicates agent was give TURN URL with an empty Password ErrPasswordEmpty = errors.New("password is empty") // ErrAddressParseFailed indicates we were unable to parse a candidate address ErrAddressParseFailed = errors.New("failed to parse address") // ErrLiteUsingNonHostCandidates indicates non host candidates were selected for a lite agent ErrLiteUsingNonHostCandidates = errors.New("lite agents must only use host candidates") // ErrUselessUrlsProvided indicates that one or more URL was provided to the agent but no host // candidate required them ErrUselessUrlsProvided = errors.New("agent does not need URL with selected candidate types") // ErrUnsupportedNAT1To1IPCandidateType indicates that the specified NAT1To1IPCandidateType is // unsupported ErrUnsupportedNAT1To1IPCandidateType = errors.New("unsupported 1:1 NAT IP candidate type") // ErrInvalidNAT1To1IPMapping indicates that the given 1:1 NAT IP mapping is invalid ErrInvalidNAT1To1IPMapping = errors.New("invalid 1:1 NAT IP mapping") // ErrExternalMappedIPNotFound in NAT1To1IPMapping ErrExternalMappedIPNotFound = errors.New("external mapped IP not found") // ErrMulticastDNSWithNAT1To1IPMapping indicates that the mDNS gathering cannot be used along // with 1:1 NAT IP mapping for host candidate. ErrMulticastDNSWithNAT1To1IPMapping = errors.New("mDNS gathering cannot be used with 1:1 NAT IP mapping for host candidate") // ErrIneffectiveNAT1To1IPMappingHost indicates that 1:1 NAT IP mapping for host candidate is // requested, but the host candidate type is disabled. ErrIneffectiveNAT1To1IPMappingHost = errors.New("1:1 NAT IP mapping for host candidate ineffective") // ErrIneffectiveNAT1To1IPMappingSrflx indicates that 1:1 NAT IP mapping for srflx candidate is // requested, but the srflx candidate type is disabled. ErrIneffectiveNAT1To1IPMappingSrflx = errors.New("1:1 NAT IP mapping for srflx candidate ineffective") // ErrInvalidMulticastDNSHostName indicates an invalid MulticastDNSHostName ErrInvalidMulticastDNSHostName = errors.New("invalid mDNS HostName, must end with .local and can only contain a single '.'") // ErrRunCanceled indicates a run operation was canceled by its individual done ErrRunCanceled = errors.New("run was canceled by done") // ErrTCPMuxNotInitialized indicates TCPMux is not initialized and that invalidTCPMux is used. ErrTCPMuxNotInitialized = errors.New("TCPMux is not initialized") // ErrTCPRemoteAddrAlreadyExists indicates we already have the connection with same remote addr. ErrTCPRemoteAddrAlreadyExists = errors.New("conn with same remote addr already exists") // ErrUnknownCandidateTyp indicates that a candidate had a unknown type value. ErrUnknownCandidateTyp = errors.New("unknown candidate typ") // ErrDetermineNetworkType indicates that the NetworkType was not able to be parsed ErrDetermineNetworkType = errors.New("unable to determine networkType") errSendPacket = errors.New("failed to send packet") errAttributeTooShortICECandidate = errors.New("attribute not long enough to be ICE candidate") errParseComponent = errors.New("could not parse component") errParsePriority = errors.New("could not parse priority") errParsePort = errors.New("could not parse port") errParseRelatedAddr = errors.New("could not parse related addresses") errParseTCPType = errors.New("could not parse TCP type") errGetXorMappedAddrResponse = errors.New("failed to get XOR-MAPPED-ADDRESS response") errConnectionAddrAlreadyExist = errors.New("connection with same remote address already exists") errReadingStreamingPacket = errors.New("error reading streaming packet") errWriting = errors.New("error writing to") errClosingConnection = errors.New("error closing connection") errMissingProtocolScheme = errors.New("missing protocol scheme") errTooManyColonsAddr = errors.New("too many colons in address") errRead = errors.New("unexpected error trying to read") errUnknownRole = errors.New("unknown role") errICEWriteSTUNMessage = errors.New("the ICE conn can't write STUN messages") errUDPMuxDisabled = errors.New("UDPMux is not enabled") errNoXorAddrMapping = errors.New("no address mapping") errSendSTUNPacket = errors.New("failed to send STUN packet") errXORMappedAddrTimeout = errors.New("timeout while waiting for XORMappedAddr") errNotImplemented = errors.New("not implemented yet") errNoUDPMuxAvailable = errors.New("no UDP mux is available") errNoTCPMuxAvailable = errors.New("no TCP mux is available") errInvalidAddress = errors.New("invalid address") // UDPMuxDefault should not listen on unspecified address, but to keep backward compatibility, don't return error now. // will be used in the future. // errListenUnspecified = errors.New("can't listen on unspecified address") ) ice-2.3.1/examples/000077500000000000000000000000001437620344400141025ustar00rootroot00000000000000ice-2.3.1/examples/ping-pong/000077500000000000000000000000001437620344400160005ustar00rootroot00000000000000ice-2.3.1/examples/ping-pong/README.md000066400000000000000000000015111437620344400172550ustar00rootroot00000000000000# ping-pong This example demonstrates how to connect two peers via ICE. Once started they send the current time between each other. Currently this example exchanges candidates over a HTTP server running on localhost. In a real world setup `pion/ice` will typically exchange auth and candidates via a signaling server. ## Instruction ### Run controlling ```sh go run main.go -controlling ``` ### Run controlled ```sh go run main.go ``` ### Press enter in both to start the connection! You will see terminal output showing the messages being sent back and forth ``` Local Agent is controlled Press 'Enter' when both processes have started ICE Connection State has changed: Checking ICE Connection State has changed: Connected Sent: 'fCFXXlnGmXdYjOy' Received: 'EpqTQYLQMUCjBDX' Sent: 'yhgOtrufSfVmvrR' Received: 'xYSTPxBPZKfgnFr' ``` ice-2.3.1/examples/ping-pong/main.go000066400000000000000000000076541437620344400172670ustar00rootroot00000000000000// Package main implements a simple example demonstrating a Pion-to-Pion ICE connection package main import ( "bufio" "context" "flag" "fmt" "net/http" "net/url" "os" "time" "github.com/pion/ice/v2" "github.com/pion/randutil" ) // nolint:gochecknoglobals var ( isControlling bool iceAgent *ice.Agent remoteAuthChannel chan string localHTTPPort, remoteHTTPPort int ) // HTTP Listener to get ICE Credentials from remote Peer func remoteAuth(w http.ResponseWriter, r *http.Request) { if err := r.ParseForm(); err != nil { panic(err) } remoteAuthChannel <- r.PostForm["ufrag"][0] remoteAuthChannel <- r.PostForm["pwd"][0] } // HTTP Listener to get ICE Candidate from remote Peer func remoteCandidate(w http.ResponseWriter, r *http.Request) { if err := r.ParseForm(); err != nil { panic(err) } c, err := ice.UnmarshalCandidate(r.PostForm["candidate"][0]) if err != nil { panic(err) } if err := iceAgent.AddRemoteCandidate(c); err != nil { //nolint:contextcheck panic(err) } } func main() { //nolint var ( err error conn *ice.Conn ) remoteAuthChannel = make(chan string, 3) flag.BoolVar(&isControlling, "controlling", false, "is ICE Agent controlling") flag.Parse() if isControlling { localHTTPPort = 9000 remoteHTTPPort = 9001 } else { localHTTPPort = 9001 remoteHTTPPort = 9000 } http.HandleFunc("/remoteAuth", remoteAuth) http.HandleFunc("/remoteCandidate", remoteCandidate) go func() { if err = http.ListenAndServe(fmt.Sprintf(":%d", localHTTPPort), nil); err != nil { //nolint:gosec panic(err) } }() if isControlling { fmt.Println("Local Agent is controlling") } else { fmt.Println("Local Agent is controlled") } fmt.Print("Press 'Enter' when both processes have started") if _, err = bufio.NewReader(os.Stdin).ReadBytes('\n'); err != nil { panic(err) } iceAgent, err = ice.NewAgent(&ice.AgentConfig{ NetworkTypes: []ice.NetworkType{ice.NetworkTypeUDP4}, }) if err != nil { panic(err) } // When we have gathered a new ICE Candidate send it to the remote peer if err = iceAgent.OnCandidate(func(c ice.Candidate) { if c == nil { return } _, err = http.PostForm(fmt.Sprintf("http://localhost:%d/remoteCandidate", remoteHTTPPort), //nolint url.Values{ "candidate": {c.Marshal()}, }) if err != nil { panic(err) } }); err != nil { panic(err) } // When ICE Connection state has change print to stdout if err = iceAgent.OnConnectionStateChange(func(c ice.ConnectionState) { fmt.Printf("ICE Connection State has changed: %s\n", c.String()) }); err != nil { panic(err) } // Get the local auth details and send to remote peer localUfrag, localPwd, err := iceAgent.GetLocalUserCredentials() if err != nil { panic(err) } _, err = http.PostForm(fmt.Sprintf("http://localhost:%d/remoteAuth", remoteHTTPPort), //nolint url.Values{ "ufrag": {localUfrag}, "pwd": {localPwd}, }) if err != nil { panic(err) } remoteUfrag := <-remoteAuthChannel remotePwd := <-remoteAuthChannel if err = iceAgent.GatherCandidates(); err != nil { panic(err) } // Start the ICE Agent. One side must be controlled, and the other must be controlling if isControlling { conn, err = iceAgent.Dial(context.TODO(), remoteUfrag, remotePwd) } else { conn, err = iceAgent.Accept(context.TODO(), remoteUfrag, remotePwd) } if err != nil { panic(err) } // Send messages in a loop to the remote peer go func() { for { time.Sleep(time.Second * 3) val, err := randutil.GenerateCryptoRandomString(15, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") if err != nil { panic(err) } if _, err = conn.Write([]byte(val)); err != nil { panic(err) } fmt.Printf("Sent: '%s'\n", val) } }() // Receive messages in a loop from the remote peer buf := make([]byte, 1500) for { n, err := conn.Read(buf) if err != nil { panic(err) } fmt.Printf("Received: '%s'\n", string(buf[:n])) } } ice-2.3.1/external_ip_mapper.go000066400000000000000000000064431437620344400165000ustar00rootroot00000000000000package ice import ( "net" "strings" ) func validateIPString(ipStr string) (net.IP, bool, error) { ip := net.ParseIP(ipStr) if ip == nil { return nil, false, ErrInvalidNAT1To1IPMapping } return ip, (ip.To4() != nil), nil } // ipMapping holds the mapping of local and external IP address for a particular IP family type ipMapping struct { ipSole net.IP // when non-nil, this is the sole external IP for one local IP assumed ipMap map[string]net.IP // local-to-external IP mapping (k: local, v: external) valid bool // if not set any external IP, valid is false } func (m *ipMapping) setSoleIP(ip net.IP) error { if m.ipSole != nil || len(m.ipMap) > 0 { return ErrInvalidNAT1To1IPMapping } m.ipSole = ip m.valid = true return nil } func (m *ipMapping) addIPMapping(locIP, extIP net.IP) error { if m.ipSole != nil { return ErrInvalidNAT1To1IPMapping } locIPStr := locIP.String() // check if dup of local IP if _, ok := m.ipMap[locIPStr]; ok { return ErrInvalidNAT1To1IPMapping } m.ipMap[locIPStr] = extIP m.valid = true return nil } func (m *ipMapping) findExternalIP(locIP net.IP) (net.IP, error) { if !m.valid { return locIP, nil } if m.ipSole != nil { return m.ipSole, nil } extIP, ok := m.ipMap[locIP.String()] if !ok { return nil, ErrExternalMappedIPNotFound } return extIP, nil } type externalIPMapper struct { ipv4Mapping ipMapping ipv6Mapping ipMapping candidateType CandidateType } func newExternalIPMapper(candidateType CandidateType, ips []string) (*externalIPMapper, error) { //nolint:gocognit if len(ips) == 0 { return nil, nil //nolint:nilnil } if candidateType == CandidateTypeUnspecified { candidateType = CandidateTypeHost // defaults to host } else if candidateType != CandidateTypeHost && candidateType != CandidateTypeServerReflexive { return nil, ErrUnsupportedNAT1To1IPCandidateType } m := &externalIPMapper{ ipv4Mapping: ipMapping{ipMap: map[string]net.IP{}}, ipv6Mapping: ipMapping{ipMap: map[string]net.IP{}}, candidateType: candidateType, } for _, extIPStr := range ips { ipPair := strings.Split(extIPStr, "/") if len(ipPair) == 0 || len(ipPair) > 2 { return nil, ErrInvalidNAT1To1IPMapping } extIP, isExtIPv4, err := validateIPString(ipPair[0]) if err != nil { return nil, err } if len(ipPair) == 1 { if isExtIPv4 { if err := m.ipv4Mapping.setSoleIP(extIP); err != nil { return nil, err } } else { if err := m.ipv6Mapping.setSoleIP(extIP); err != nil { return nil, err } } } else { locIP, isLocIPv4, err := validateIPString(ipPair[1]) if err != nil { return nil, err } if isExtIPv4 { if !isLocIPv4 { return nil, ErrInvalidNAT1To1IPMapping } if err := m.ipv4Mapping.addIPMapping(locIP, extIP); err != nil { return nil, err } } else { if isLocIPv4 { return nil, ErrInvalidNAT1To1IPMapping } if err := m.ipv6Mapping.addIPMapping(locIP, extIP); err != nil { return nil, err } } } } return m, nil } func (m *externalIPMapper) findExternalIP(localIPStr string) (net.IP, error) { locIP, isLocIPv4, err := validateIPString(localIPStr) if err != nil { return nil, err } if isLocIPv4 { return m.ipv4Mapping.findExternalIP(locIP) } return m.ipv6Mapping.findExternalIP(locIP) } ice-2.3.1/external_ip_mapper_test.go000066400000000000000000000235071437620344400175370ustar00rootroot00000000000000package ice import ( "net" "testing" "github.com/stretchr/testify/assert" ) func TestExternalIPMapper(t *testing.T) { t.Run("validateIPString", func(t *testing.T) { var ip net.IP var isIPv4 bool var err error ip, isIPv4, err = validateIPString("1.2.3.4") assert.NoError(t, err, "should succeed") assert.True(t, isIPv4, "should be true") assert.Equal(t, "1.2.3.4", ip.String(), "should be true") ip, isIPv4, err = validateIPString("2601:4567::5678") assert.NoError(t, err, "should succeed") assert.False(t, isIPv4, "should be false") assert.Equal(t, "2601:4567::5678", ip.String(), "should be true") _, _, err = validateIPString("bad.6.6.6") assert.Error(t, err, "should fail") }) t.Run("newExternalIPMapper", func(t *testing.T) { var m *externalIPMapper var err error // ips being nil should succeed but mapper will be nil also m, err = newExternalIPMapper(CandidateTypeUnspecified, nil) assert.NoError(t, err, "should succeed") assert.Nil(t, m, "should be nil") // ips being empty should succeed but mapper will still be nil m, err = newExternalIPMapper(CandidateTypeUnspecified, []string{}) assert.NoError(t, err, "should succeed") assert.Nil(t, m, "should be nil") // IPv4 with no explicit local IP, defaults to CandidateTypeHost m, err = newExternalIPMapper(CandidateTypeUnspecified, []string{ "1.2.3.4", }) assert.NoError(t, err, "should succeed") assert.NotNil(t, m, "should not be nil") assert.Equal(t, CandidateTypeHost, m.candidateType, "should match") assert.NotNil(t, m.ipv4Mapping.ipSole) assert.Nil(t, m.ipv6Mapping.ipSole) assert.Equal(t, 0, len(m.ipv4Mapping.ipMap), "should match") assert.Equal(t, 0, len(m.ipv6Mapping.ipMap), "should match") // IPv4 with no explicit local IP, using CandidateTypeServerReflexive m, err = newExternalIPMapper(CandidateTypeServerReflexive, []string{ "1.2.3.4", }) assert.NoError(t, err, "should succeed") assert.NotNil(t, m, "should not be nil") assert.Equal(t, CandidateTypeServerReflexive, m.candidateType, "should match") assert.NotNil(t, m.ipv4Mapping.ipSole) assert.Nil(t, m.ipv6Mapping.ipSole) assert.Equal(t, 0, len(m.ipv4Mapping.ipMap), "should match") assert.Equal(t, 0, len(m.ipv6Mapping.ipMap), "should match") // IPv4 with no explicit local IP, defaults to CandidateTypeHost m, err = newExternalIPMapper(CandidateTypeUnspecified, []string{ "2601:4567::5678", }) assert.NoError(t, err, "should succeed") assert.NotNil(t, m, "should not be nil") assert.Equal(t, CandidateTypeHost, m.candidateType, "should match") assert.Nil(t, m.ipv4Mapping.ipSole) assert.NotNil(t, m.ipv6Mapping.ipSole) assert.Equal(t, 0, len(m.ipv4Mapping.ipMap), "should match") assert.Equal(t, 0, len(m.ipv6Mapping.ipMap), "should match") // IPv4 and IPv6 in the mix m, err = newExternalIPMapper(CandidateTypeUnspecified, []string{ "1.2.3.4", "2601:4567::5678", }) assert.NoError(t, err, "should succeed") assert.NotNil(t, m, "should not be nil") assert.Equal(t, CandidateTypeHost, m.candidateType, "should match") assert.NotNil(t, m.ipv4Mapping.ipSole) assert.NotNil(t, m.ipv6Mapping.ipSole) assert.Equal(t, 0, len(m.ipv4Mapping.ipMap), "should match") assert.Equal(t, 0, len(m.ipv6Mapping.ipMap), "should match") // Unsupported candidate type - CandidateTypePeerReflexive m, err = newExternalIPMapper(CandidateTypePeerReflexive, []string{ "1.2.3.4", }) assert.Error(t, err, "should fail") assert.Nil(t, m, "should be nil") // Unsupported candidate type - CandidateTypeRelay m, err = newExternalIPMapper(CandidateTypePeerReflexive, []string{ "1.2.3.4", }) assert.Error(t, err, "should fail") assert.Nil(t, m, "should be nil") // Cannot duplicate mapping IPv4 family m, err = newExternalIPMapper(CandidateTypeServerReflexive, []string{ "1.2.3.4", "5.6.7.8", }) assert.Error(t, err, "should fail") assert.Nil(t, m, "should be nil") // Cannot duplicate mapping IPv6 family m, err = newExternalIPMapper(CandidateTypeServerReflexive, []string{ "2201::1", "2201::0002", }) assert.Error(t, err, "should fail") assert.Nil(t, m, "should be nil") // Invalide external IP string m, err = newExternalIPMapper(CandidateTypeServerReflexive, []string{ "bad.2.3.4", }) assert.Error(t, err, "should fail") assert.Nil(t, m, "should be nil") // Invalide local IP string m, err = newExternalIPMapper(CandidateTypeServerReflexive, []string{ "1.2.3.4/10.0.0.bad", }) assert.Error(t, err, "should fail") assert.Nil(t, m, "should be nil") }) t.Run("newExternalIPMapper with explicit local IP", func(t *testing.T) { var m *externalIPMapper var err error // IPv4 with explicit local IP, defaults to CandidateTypeHost m, err = newExternalIPMapper(CandidateTypeUnspecified, []string{ "1.2.3.4/10.0.0.1", }) assert.NoError(t, err, "should succeed") assert.NotNil(t, m, "should not be nil") assert.Equal(t, CandidateTypeHost, m.candidateType, "should match") assert.Nil(t, m.ipv4Mapping.ipSole) assert.Nil(t, m.ipv6Mapping.ipSole) assert.Equal(t, 1, len(m.ipv4Mapping.ipMap), "should match") assert.Equal(t, 0, len(m.ipv6Mapping.ipMap), "should match") // Cannot assign two ext IPs for one local IPv4 m, err = newExternalIPMapper(CandidateTypeUnspecified, []string{ "1.2.3.4/10.0.0.1", "1.2.3.5/10.0.0.1", }) assert.Error(t, err, "should fail") assert.Nil(t, m, "should be nil") // Cannot assign two ext IPs for one local IPv6 m, err = newExternalIPMapper(CandidateTypeUnspecified, []string{ "2200::1/fe80::1", "2200::0002/fe80::1", }) assert.Error(t, err, "should fail") assert.Nil(t, m, "should be nil") // Cannot mix different IP family in a pair (1) m, err = newExternalIPMapper(CandidateTypeUnspecified, []string{ "2200::1/10.0.0.1", }) assert.Error(t, err, "should fail") assert.Nil(t, m, "should be nil") // Cannot mix different IP family in a pair (2) m, err = newExternalIPMapper(CandidateTypeUnspecified, []string{ "1.2.3.4/fe80::1", }) assert.Error(t, err, "should fail") assert.Nil(t, m, "should be nil") // Invalid pair m, err = newExternalIPMapper(CandidateTypeUnspecified, []string{ "1.2.3.4/192.168.0.2/10.0.0.1", }) assert.Error(t, err, "should fail") assert.Nil(t, m, "should be nil") }) t.Run("newExternalIPMapper with implicit and explicit local IP", func(t *testing.T) { // Mixing implicit and explicit local IPs not allowed _, err := newExternalIPMapper(CandidateTypeUnspecified, []string{ "1.2.3.4", "1.2.3.5/10.0.0.1", }) assert.Error(t, err, "should fail") // Mixing implicit and explicit local IPs not allowed _, err = newExternalIPMapper(CandidateTypeUnspecified, []string{ "1.2.3.5/10.0.0.1", "1.2.3.4", }) assert.Error(t, err, "should fail") }) t.Run("findExternalIP without explicit local IP", func(t *testing.T) { var m *externalIPMapper var err error var extIP net.IP // IPv4 with explicit local IP, defaults to CandidateTypeHost m, err = newExternalIPMapper(CandidateTypeUnspecified, []string{ "1.2.3.4", "2200::1", }) assert.NoError(t, err, "should succeed") assert.NotNil(t, m, "should not be nil") assert.NotNil(t, m.ipv4Mapping.ipSole) assert.NotNil(t, m.ipv6Mapping.ipSole) // find external IPv4 extIP, err = m.findExternalIP("10.0.0.1") assert.NoError(t, err, "should succeed") assert.Equal(t, "1.2.3.4", extIP.String(), "should match") // find external IPv6 extIP, err = m.findExternalIP("fe80::0001") // use '0001' instead of '1' on purpose assert.NoError(t, err, "should succeed") assert.Equal(t, "2200::1", extIP.String(), "should match") // Bad local IP string _, err = m.findExternalIP("really.bad") assert.Error(t, err, "should fail") }) t.Run("findExternalIP with explicit local IP", func(t *testing.T) { var m *externalIPMapper var err error var extIP net.IP // IPv4 with explicit local IP, defaults to CandidateTypeHost m, err = newExternalIPMapper(CandidateTypeUnspecified, []string{ "1.2.3.4/10.0.0.1", "1.2.3.5/10.0.0.2", "2200::1/fe80::1", "2200::2/fe80::2", }) assert.NoError(t, err, "should succeed") assert.NotNil(t, m, "should not be nil") // find external IPv4 extIP, err = m.findExternalIP("10.0.0.1") assert.NoError(t, err, "should succeed") assert.Equal(t, "1.2.3.4", extIP.String(), "should match") extIP, err = m.findExternalIP("10.0.0.2") assert.NoError(t, err, "should succeed") assert.Equal(t, "1.2.3.5", extIP.String(), "should match") _, err = m.findExternalIP("10.0.0.3") assert.Error(t, err, "should fail") // find external IPv6 extIP, err = m.findExternalIP("fe80::0001") // use '0001' instead of '1' on purpose assert.NoError(t, err, "should succeed") assert.Equal(t, "2200::1", extIP.String(), "should match") extIP, err = m.findExternalIP("fe80::0002") // use '0002' instead of '2' on purpose assert.NoError(t, err, "should succeed") assert.Equal(t, "2200::2", extIP.String(), "should match") _, err = m.findExternalIP("fe80::3") assert.Error(t, err, "should fail") // Bad local IP string _, err = m.findExternalIP("really.bad") assert.Error(t, err, "should fail") }) t.Run("findExternalIP with empty map", func(t *testing.T) { var m *externalIPMapper var err error m, err = newExternalIPMapper(CandidateTypeUnspecified, []string{ "1.2.3.4", }) assert.NoError(t, err, "should succeed") // attempt to find IPv6 that does not exist in the map extIP, err := m.findExternalIP("fe80::1") assert.NoError(t, err, "should succeed") assert.Equal(t, "fe80::1", extIP.String(), "should match") m, err = newExternalIPMapper(CandidateTypeUnspecified, []string{ "2200::1", }) assert.NoError(t, err, "should succeed") // attempt to find IPv4 that does not exist in the map extIP, err = m.findExternalIP("10.0.0.1") assert.NoError(t, err, "should succeed") assert.Equal(t, "10.0.0.1", extIP.String(), "should match") }) } ice-2.3.1/gather.go000066400000000000000000000521031437620344400140660ustar00rootroot00000000000000package ice import ( "context" "crypto/tls" "errors" "fmt" "io" "net" "reflect" "sync" "time" "github.com/pion/dtls/v2" "github.com/pion/ice/v2/internal/fakenet" stunx "github.com/pion/ice/v2/internal/stun" "github.com/pion/logging" "github.com/pion/turn/v2" ) const ( stunGatherTimeout = time.Second * 5 ) // Close a net.Conn and log if we have a failure func closeConnAndLog(c io.Closer, log logging.LeveledLogger, msg string) { if c == nil || (reflect.ValueOf(c).Kind() == reflect.Ptr && reflect.ValueOf(c).IsNil()) { log.Warnf("Conn is not allocated (%s)", msg) return } log.Warnf(msg) if err := c.Close(); err != nil { log.Warnf("Failed to close conn: %v", err) } } // GatherCandidates initiates the trickle based gathering process. func (a *Agent) GatherCandidates() error { var gatherErr error if runErr := a.run(a.context(), func(ctx context.Context, agent *Agent) { if a.gatheringState != GatheringStateNew { gatherErr = ErrMultipleGatherAttempted return } else if a.onCandidateHdlr.Load() == nil { gatherErr = ErrNoOnCandidateHandler return } a.gatherCandidateCancel() // Cancel previous gathering routine ctx, cancel := context.WithCancel(ctx) a.gatherCandidateCancel = cancel a.gatherCandidateDone = make(chan struct{}) go a.gatherCandidates(ctx) }); runErr != nil { return runErr } return gatherErr } func (a *Agent) gatherCandidates(ctx context.Context) { defer close(a.gatherCandidateDone) if err := a.setGatheringState(GatheringStateGathering); err != nil { //nolint:contextcheck a.log.Warnf("failed to set gatheringState to GatheringStateGathering: %v", err) return } var wg sync.WaitGroup for _, t := range a.candidateTypes { switch t { case CandidateTypeHost: wg.Add(1) go func() { a.gatherCandidatesLocal(ctx, a.networkTypes) wg.Done() }() case CandidateTypeServerReflexive: wg.Add(1) go func() { if a.udpMuxSrflx != nil { a.gatherCandidatesSrflxUDPMux(ctx, a.urls, a.networkTypes) } else { a.gatherCandidatesSrflx(ctx, a.urls, a.networkTypes) } wg.Done() }() if a.extIPMapper != nil && a.extIPMapper.candidateType == CandidateTypeServerReflexive { wg.Add(1) go func() { a.gatherCandidatesSrflxMapped(ctx, a.networkTypes) wg.Done() }() } case CandidateTypeRelay: wg.Add(1) go func() { a.gatherCandidatesRelay(ctx, a.urls) wg.Done() }() case CandidateTypePeerReflexive, CandidateTypeUnspecified: } } // Block until all STUN and TURN URLs have been gathered (or timed out) wg.Wait() if err := a.setGatheringState(GatheringStateComplete); err != nil { //nolint:contextcheck a.log.Warnf("failed to set gatheringState to GatheringStateComplete: %v", err) } } func (a *Agent) gatherCandidatesLocal(ctx context.Context, networkTypes []NetworkType) { //nolint:gocognit networks := map[string]struct{}{} for _, networkType := range networkTypes { if networkType.IsTCP() { networks[tcp] = struct{}{} } else { networks[udp] = struct{}{} } } // when UDPMux is enabled, skip other UDP candidates if a.udpMux != nil { if err := a.gatherCandidatesLocalUDPMux(ctx); err != nil { a.log.Warnf("could not create host candidate for UDPMux: %s", err) } delete(networks, udp) } localIPs, err := localInterfaces(a.net, a.interfaceFilter, a.ipFilter, networkTypes, a.includeLoopback) if err != nil { a.log.Warnf("failed to iterate local interfaces, host candidates will not be gathered %s", err) return } for _, ip := range localIPs { mappedIP := ip if a.mDNSMode != MulticastDNSModeQueryAndGather && a.extIPMapper != nil && a.extIPMapper.candidateType == CandidateTypeHost { if _mappedIP, innerErr := a.extIPMapper.findExternalIP(ip.String()); innerErr == nil { mappedIP = _mappedIP } else { a.log.Warnf("1:1 NAT mapping is enabled but no external IP is found for %s", ip.String()) } } address := mappedIP.String() if a.mDNSMode == MulticastDNSModeQueryAndGather { address = a.mDNSName } for network := range networks { type connAndPort struct { conn net.PacketConn port int } var ( conns []connAndPort tcpType TCPType ) switch network { case tcp: // Handle ICE TCP passive mode var muxConns []net.PacketConn if multi, ok := a.tcpMux.(AllConnsGetter); ok { a.log.Debugf("GetAllConns by ufrag: %s", a.localUfrag) muxConns, err = multi.GetAllConns(a.localUfrag, mappedIP.To4() == nil, ip) if err != nil { if !errors.Is(err, ErrTCPMuxNotInitialized) { a.log.Warnf("error getting all tcp conns by ufrag: %s %s %s", network, ip, a.localUfrag) } continue } } else { a.log.Debugf("GetConn by ufrag: %s", a.localUfrag) conn, err := a.tcpMux.GetConnByUfrag(a.localUfrag, mappedIP.To4() == nil, ip) if err != nil { if !errors.Is(err, ErrTCPMuxNotInitialized) { a.log.Warnf("error getting tcp conn by ufrag: %s %s %s", network, ip, a.localUfrag) } continue } muxConns = []net.PacketConn{conn} } // Extract the port for each PacketConn we got. for _, conn := range muxConns { if tcpConn, ok := conn.LocalAddr().(*net.TCPAddr); ok { conns = append(conns, connAndPort{conn, tcpConn.Port}) } else { a.log.Warnf("failed to get port of conn from TCPMux: %s %s %s", network, ip, a.localUfrag) } } if len(conns) == 0 { // Didn't succeed with any, try the next network. continue } tcpType = TCPTypePassive // is there a way to verify that the listen address is even // accessible from the current interface. case udp: conn, err := listenUDPInPortRange(a.net, a.log, int(a.portMax), int(a.portMin), network, &net.UDPAddr{IP: ip, Port: 0}) if err != nil { a.log.Warnf("could not listen %s %s", network, ip) continue } if udpConn, ok := conn.LocalAddr().(*net.UDPAddr); ok { conns = append(conns, connAndPort{conn, udpConn.Port}) } else { a.log.Warnf("failed to get port of UDPAddr from ListenUDPInPortRange: %s %s %s", network, ip, a.localUfrag) continue } } for _, connAndPort := range conns { hostConfig := CandidateHostConfig{ Network: network, Address: address, Port: connAndPort.port, Component: ComponentRTP, TCPType: tcpType, } c, err := NewCandidateHost(&hostConfig) if err != nil { closeConnAndLog(connAndPort.conn, a.log, fmt.Sprintf("Failed to create host candidate: %s %s %d: %v", network, mappedIP, connAndPort.port, err)) continue } if a.mDNSMode == MulticastDNSModeQueryAndGather { if err = c.setIP(ip); err != nil { closeConnAndLog(connAndPort.conn, a.log, fmt.Sprintf("Failed to create host candidate: %s %s %d: %v", network, mappedIP, connAndPort.port, err)) continue } } if err := a.addCandidate(ctx, c, connAndPort.conn); err != nil { if closeErr := c.close(); closeErr != nil { a.log.Warnf("Failed to close candidate: %v", closeErr) } a.log.Warnf("Failed to append to localCandidates and run onCandidateHdlr: %v", err) } } } } } func (a *Agent) gatherCandidatesLocalUDPMux(ctx context.Context) error { //nolint:gocognit if a.udpMux == nil { return errUDPMuxDisabled } localAddresses := a.udpMux.GetListenAddresses() for _, addr := range localAddresses { udpAddr, ok := addr.(*net.UDPAddr) if !ok { return errInvalidAddress } candidateIP := udpAddr.IP if a.extIPMapper != nil && a.extIPMapper.candidateType == CandidateTypeHost { if mappedIP, innerErr := a.extIPMapper.findExternalIP(candidateIP.String()); innerErr != nil { a.log.Warnf("1:1 NAT mapping is enabled but no external IP is found for %s", candidateIP.String()) continue } else { candidateIP = mappedIP } } conn, err := a.udpMux.GetConn(a.localUfrag, udpAddr) if err != nil { return err } hostConfig := CandidateHostConfig{ Network: udp, Address: candidateIP.String(), Port: udpAddr.Port, Component: ComponentRTP, } c, err := NewCandidateHost(&hostConfig) if err != nil { closeConnAndLog(conn, a.log, fmt.Sprintf("Failed to create host mux candidate: %s %d: %v", candidateIP, udpAddr.Port, err)) continue } if err := a.addCandidate(ctx, c, conn); err != nil { if closeErr := c.close(); closeErr != nil { a.log.Warnf("Failed to close candidate: %v", closeErr) } closeConnAndLog(conn, a.log, fmt.Sprintf("Failed to add candidate: %s %d: %v", candidateIP, udpAddr.Port, err)) continue } } return nil } func (a *Agent) gatherCandidatesSrflxMapped(ctx context.Context, networkTypes []NetworkType) { var wg sync.WaitGroup defer wg.Wait() for _, networkType := range networkTypes { if networkType.IsTCP() { continue } network := networkType.String() wg.Add(1) go func() { defer wg.Done() conn, err := listenUDPInPortRange(a.net, a.log, int(a.portMax), int(a.portMin), network, &net.UDPAddr{IP: nil, Port: 0}) if err != nil { a.log.Warnf("Failed to listen %s: %v", network, err) return } lAddr, ok := conn.LocalAddr().(*net.UDPAddr) if !ok { closeConnAndLog(conn, a.log, "1:1 NAT mapping is enabled but LocalAddr is not a UDPAddr") return } mappedIP, err := a.extIPMapper.findExternalIP(lAddr.IP.String()) if err != nil { closeConnAndLog(conn, a.log, fmt.Sprintf("1:1 NAT mapping is enabled but no external IP is found for %s", lAddr.IP.String())) return } srflxConfig := CandidateServerReflexiveConfig{ Network: network, Address: mappedIP.String(), Port: lAddr.Port, Component: ComponentRTP, RelAddr: lAddr.IP.String(), RelPort: lAddr.Port, } c, err := NewCandidateServerReflexive(&srflxConfig) if err != nil { closeConnAndLog(conn, a.log, fmt.Sprintf("Failed to create server reflexive candidate: %s %s %d: %v", network, mappedIP.String(), lAddr.Port, err)) return } if err := a.addCandidate(ctx, c, conn); err != nil { if closeErr := c.close(); closeErr != nil { a.log.Warnf("Failed to close candidate: %v", closeErr) } a.log.Warnf("Failed to append to localCandidates and run onCandidateHdlr: %v", err) } }() } } func (a *Agent) gatherCandidatesSrflxUDPMux(ctx context.Context, urls []*URL, networkTypes []NetworkType) { //nolint:gocognit var wg sync.WaitGroup defer wg.Wait() for _, networkType := range networkTypes { if networkType.IsTCP() { continue } for i := range urls { for _, listenAddr := range a.udpMuxSrflx.GetListenAddresses() { udpAddr, ok := listenAddr.(*net.UDPAddr) if !ok { a.log.Warn("Failed to cast udpMuxSrflx listen address to UDPAddr") continue } wg.Add(1) go func(url URL, network string, localAddr *net.UDPAddr) { defer wg.Done() hostPort := fmt.Sprintf("%s:%d", url.Host, url.Port) serverAddr, err := a.net.ResolveUDPAddr(network, hostPort) if err != nil { a.log.Warnf("failed to resolve stun host: %s: %v", hostPort, err) return } xorAddr, err := a.udpMuxSrflx.GetXORMappedAddr(serverAddr, stunGatherTimeout) if err != nil { a.log.Warnf("could not get server reflexive address %s %s: %v", network, url, err) return } conn, err := a.udpMuxSrflx.GetConnForURL(a.localUfrag, url.String(), localAddr) if err != nil { a.log.Warnf("could not find connection in UDPMuxSrflx %s %s: %v", network, url, err) return } ip := xorAddr.IP port := xorAddr.Port srflxConfig := CandidateServerReflexiveConfig{ Network: network, Address: ip.String(), Port: port, Component: ComponentRTP, RelAddr: localAddr.IP.String(), RelPort: localAddr.Port, } c, err := NewCandidateServerReflexive(&srflxConfig) if err != nil { closeConnAndLog(conn, a.log, fmt.Sprintf("Failed to create server reflexive candidate: %s %s %d: %v", network, ip, port, err)) return } if err := a.addCandidate(ctx, c, conn); err != nil { if closeErr := c.close(); closeErr != nil { a.log.Warnf("Failed to close candidate: %v", closeErr) } a.log.Warnf("Failed to append to localCandidates and run onCandidateHdlr: %v", err) } }(*urls[i], networkType.String(), udpAddr) } } } } func (a *Agent) gatherCandidatesSrflx(ctx context.Context, urls []*URL, networkTypes []NetworkType) { //nolint:gocognit var wg sync.WaitGroup defer wg.Wait() for _, networkType := range networkTypes { if networkType.IsTCP() { continue } for i := range urls { wg.Add(1) go func(url URL, network string) { defer wg.Done() hostPort := fmt.Sprintf("%s:%d", url.Host, url.Port) serverAddr, err := a.net.ResolveUDPAddr(network, hostPort) if err != nil { a.log.Warnf("failed to resolve stun host: %s: %v", hostPort, err) return } conn, err := listenUDPInPortRange(a.net, a.log, int(a.portMax), int(a.portMin), network, &net.UDPAddr{IP: nil, Port: 0}) if err != nil { closeConnAndLog(conn, a.log, fmt.Sprintf("Failed to listen for %s: %v", serverAddr.String(), err)) return } // If the agent closes midway through the connection // we end it early to prevent close delay. cancelCtx, cancelFunc := context.WithCancel(ctx) defer cancelFunc() go func() { select { case <-cancelCtx.Done(): return case <-a.done: _ = conn.Close() } }() xorAddr, err := stunx.GetXORMappedAddr(conn, serverAddr, stunGatherTimeout) if err != nil { closeConnAndLog(conn, a.log, fmt.Sprintf("could not get server reflexive address %s %s: %v", network, url, err)) return } ip := xorAddr.IP port := xorAddr.Port lAddr := conn.LocalAddr().(*net.UDPAddr) //nolint:forcetypeassert srflxConfig := CandidateServerReflexiveConfig{ Network: network, Address: ip.String(), Port: port, Component: ComponentRTP, RelAddr: lAddr.IP.String(), RelPort: lAddr.Port, } c, err := NewCandidateServerReflexive(&srflxConfig) if err != nil { closeConnAndLog(conn, a.log, fmt.Sprintf("Failed to create server reflexive candidate: %s %s %d: %v", network, ip, port, err)) return } if err := a.addCandidate(ctx, c, conn); err != nil { if closeErr := c.close(); closeErr != nil { a.log.Warnf("Failed to close candidate: %v", closeErr) } a.log.Warnf("Failed to append to localCandidates and run onCandidateHdlr: %v", err) } }(*urls[i], networkType.String()) } } } func (a *Agent) gatherCandidatesRelay(ctx context.Context, urls []*URL) { //nolint:gocognit var wg sync.WaitGroup defer wg.Wait() network := NetworkTypeUDP4.String() for i := range urls { switch { case urls[i].Scheme != SchemeTypeTURN && urls[i].Scheme != SchemeTypeTURNS: continue case urls[i].Username == "": a.log.Errorf("Failed to gather relay candidates: %v", ErrUsernameEmpty) return case urls[i].Password == "": a.log.Errorf("Failed to gather relay candidates: %v", ErrPasswordEmpty) return } wg.Add(1) go func(url URL) { defer wg.Done() TURNServerAddr := fmt.Sprintf("%s:%d", url.Host, url.Port) var ( locConn net.PacketConn err error RelAddr string RelPort int relayProtocol string ) switch { case url.Proto == ProtoTypeUDP && url.Scheme == SchemeTypeTURN: if locConn, err = a.net.ListenPacket(network, "0.0.0.0:0"); err != nil { a.log.Warnf("Failed to listen %s: %v", network, err) return } RelAddr = locConn.LocalAddr().(*net.UDPAddr).IP.String() //nolint:forcetypeassert RelPort = locConn.LocalAddr().(*net.UDPAddr).Port //nolint:forcetypeassert relayProtocol = udp case a.proxyDialer != nil && url.Proto == ProtoTypeTCP && (url.Scheme == SchemeTypeTURN || url.Scheme == SchemeTypeTURNS): conn, connectErr := a.proxyDialer.Dial(NetworkTypeTCP4.String(), TURNServerAddr) if connectErr != nil { a.log.Warnf("Failed to Dial TCP Addr %s via proxy dialer: %v", TURNServerAddr, connectErr) return } RelAddr = conn.LocalAddr().(*net.TCPAddr).IP.String() //nolint:forcetypeassert RelPort = conn.LocalAddr().(*net.TCPAddr).Port //nolint:forcetypeassert if url.Scheme == SchemeTypeTURN { relayProtocol = tcp } else if url.Scheme == SchemeTypeTURNS { relayProtocol = "tls" } locConn = turn.NewSTUNConn(conn) case url.Proto == ProtoTypeTCP && url.Scheme == SchemeTypeTURN: tcpAddr, connectErr := a.net.ResolveTCPAddr(NetworkTypeTCP4.String(), TURNServerAddr) if connectErr != nil { a.log.Warnf("Failed to resolve TCP Addr %s: %v", TURNServerAddr, connectErr) return } conn, connectErr := a.net.DialTCP(NetworkTypeTCP4.String(), nil, tcpAddr) if connectErr != nil { a.log.Warnf("Failed to Dial TCP Addr %s: %v", TURNServerAddr, connectErr) return } RelAddr = conn.LocalAddr().(*net.TCPAddr).IP.String() //nolint:forcetypeassert RelPort = conn.LocalAddr().(*net.TCPAddr).Port //nolint:forcetypeassert relayProtocol = tcp locConn = turn.NewSTUNConn(conn) case url.Proto == ProtoTypeUDP && url.Scheme == SchemeTypeTURNS: udpAddr, connectErr := a.net.ResolveUDPAddr(network, TURNServerAddr) if connectErr != nil { a.log.Warnf("Failed to resolve UDP Addr %s: %v", TURNServerAddr, connectErr) return } udpConn, dialErr := a.net.DialUDP("udp", nil, udpAddr) if dialErr != nil { a.log.Warnf("Failed to dial DTLS Address %s: %v", TURNServerAddr, dialErr) return } conn, connectErr := dtls.ClientWithContext(ctx, udpConn, &dtls.Config{ ServerName: url.Host, InsecureSkipVerify: a.insecureSkipVerify, //nolint:gosec }) if connectErr != nil { a.log.Warnf("Failed to create DTLS client: %v", TURNServerAddr, connectErr) return } RelAddr = conn.LocalAddr().(*net.UDPAddr).IP.String() //nolint:forcetypeassert RelPort = conn.LocalAddr().(*net.UDPAddr).Port //nolint:forcetypeassert relayProtocol = "dtls" locConn = &fakenet.PacketConn{Conn: conn} case url.Proto == ProtoTypeTCP && url.Scheme == SchemeTypeTURNS: tcpAddr, resolvErr := a.net.ResolveTCPAddr(NetworkTypeTCP4.String(), TURNServerAddr) if resolvErr != nil { a.log.Warnf("Failed to resolve relay address %s: %v", TURNServerAddr, resolvErr) return } tcpConn, dialErr := a.net.DialTCP(NetworkTypeTCP4.String(), nil, tcpAddr) if dialErr != nil { a.log.Warnf("Failed to connect to relay: %v", dialErr) return } conn := tls.Client(tcpConn, &tls.Config{ ServerName: url.Host, InsecureSkipVerify: a.insecureSkipVerify, //nolint:gosec }) if hsErr := conn.HandshakeContext(ctx); hsErr != nil { if closeErr := tcpConn.Close(); closeErr != nil { a.log.Errorf("Failed to close relay connection: %v", closeErr) } a.log.Warnf("Failed to connect to relay: %v", hsErr) return } RelAddr = conn.LocalAddr().(*net.TCPAddr).IP.String() //nolint:forcetypeassert RelPort = conn.LocalAddr().(*net.TCPAddr).Port //nolint:forcetypeassert relayProtocol = "tls" locConn = turn.NewSTUNConn(conn) default: a.log.Warnf("Unable to handle URL in gatherCandidatesRelay %v", url) return } client, err := turn.NewClient(&turn.ClientConfig{ TURNServerAddr: TURNServerAddr, Conn: locConn, Username: url.Username, Password: url.Password, LoggerFactory: a.loggerFactory, Net: a.net, }) if err != nil { closeConnAndLog(locConn, a.log, fmt.Sprintf("Failed to build new turn.Client %s %s", TURNServerAddr, err)) return } if err = client.Listen(); err != nil { client.Close() closeConnAndLog(locConn, a.log, fmt.Sprintf("Failed to listen on turn.Client %s %s", TURNServerAddr, err)) return } relayConn, err := client.Allocate() if err != nil { client.Close() closeConnAndLog(locConn, a.log, fmt.Sprintf("Failed to allocate on turn.Client %s %s", TURNServerAddr, err)) return } rAddr := relayConn.LocalAddr().(*net.UDPAddr) //nolint:forcetypeassert relayConfig := CandidateRelayConfig{ Network: network, Component: ComponentRTP, Address: rAddr.IP.String(), Port: rAddr.Port, RelAddr: RelAddr, RelPort: RelPort, RelayProtocol: relayProtocol, OnClose: func() error { client.Close() return locConn.Close() }, } relayConnClose := func() { if relayConErr := relayConn.Close(); relayConErr != nil { a.log.Warnf("Failed to close relay %v", relayConErr) } } candidate, err := NewCandidateRelay(&relayConfig) if err != nil { relayConnClose() client.Close() closeConnAndLog(locConn, a.log, fmt.Sprintf("Failed to create relay candidate: %s %s: %v", network, rAddr.String(), err)) return } if err := a.addCandidate(ctx, candidate, relayConn); err != nil { relayConnClose() if closeErr := candidate.close(); closeErr != nil { a.log.Warnf("Failed to close candidate: %v", closeErr) } a.log.Warnf("Failed to append to localCandidates and run onCandidateHdlr: %v", err) } }(*urls[i]) } } ice-2.3.1/gather_test.go000066400000000000000000000526031437620344400151320ustar00rootroot00000000000000//go:build !js // +build !js package ice import ( "context" "crypto/tls" "io" "net" "net/url" "reflect" "sort" "strconv" "sync" "sync/atomic" "testing" "time" "github.com/pion/dtls/v2" "github.com/pion/dtls/v2/pkg/crypto/selfsign" "github.com/pion/logging" "github.com/pion/stun" "github.com/pion/transport/v2/test" "github.com/pion/turn/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/proxy" ) func TestListenUDP(t *testing.T) { a, err := NewAgent(&AgentConfig{}) assert.NoError(t, err) localIPs, err := localInterfaces(a.net, a.interfaceFilter, a.ipFilter, []NetworkType{NetworkTypeUDP4}, false) assert.NotEqual(t, len(localIPs), 0, "localInterfaces found no interfaces, unable to test") assert.NoError(t, err) ip := localIPs[0] conn, err := listenUDPInPortRange(a.net, a.log, 0, 0, udp, &net.UDPAddr{IP: ip, Port: 0}) assert.NoError(t, err, "listenUDP error with no port restriction") assert.NotNil(t, conn, "listenUDP error with no port restriction return a nil conn") _, err = listenUDPInPortRange(a.net, a.log, 4999, 5000, udp, &net.UDPAddr{IP: ip, Port: 0}) assert.Equal(t, err, ErrPort, "listenUDP with invalid port range did not return ErrPort") conn, err = listenUDPInPortRange(a.net, a.log, 5000, 5000, udp, &net.UDPAddr{IP: ip, Port: 0}) assert.NoError(t, err, "listenUDP error with no port restriction") assert.NotNil(t, conn, "listenUDP error with no port restriction return a nil conn") _, port, err := net.SplitHostPort(conn.LocalAddr().String()) assert.NoError(t, err) assert.Equal(t, port, "5000", "listenUDP with port restriction of 5000 listened on incorrect port") portMin := 5100 portMax := 5109 total := portMax - portMin + 1 result := make([]int, 0, total) portRange := make([]int, 0, total) for i := 0; i < total; i++ { conn, err = listenUDPInPortRange(a.net, a.log, portMax, portMin, udp, &net.UDPAddr{IP: ip, Port: 0}) assert.NoError(t, err, "listenUDP error with no port restriction") assert.NotNil(t, conn, "listenUDP error with no port restriction return a nil conn") _, port, err = net.SplitHostPort(conn.LocalAddr().String()) if err != nil { t.Fatal(err) } p, _ := strconv.Atoi(port) if p < portMin || p > portMax { t.Fatalf("listenUDP with port restriction [%d, %d] listened on incorrect port (%s)", portMin, portMax, port) } result = append(result, p) portRange = append(portRange, portMin+i) } if sort.IntsAreSorted(result) { t.Fatalf("listenUDP with port restriction [%d, %d], ports result should be random", portMin, portMax) } sort.Ints(result) if !reflect.DeepEqual(result, portRange) { t.Fatalf("listenUDP with port restriction [%d, %d], got:%v, want:%v", portMin, portMax, result, portRange) } _, err = listenUDPInPortRange(a.net, a.log, portMax, portMin, udp, &net.UDPAddr{IP: ip, Port: 0}) assert.Equal(t, err, ErrPort, "listenUDP with port restriction [%d, %d], did not return ErrPort", portMin, portMax) assert.NoError(t, a.Close()) } func TestLoopbackCandidate(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() type testCase struct { name string agentConfig *AgentConfig loExpected bool } mux, err := NewMultiUDPMuxFromPort(12500) assert.NoError(t, err) muxWithLo, errlo := NewMultiUDPMuxFromPort(12501, UDPMuxFromPortWithLoopback()) assert.NoError(t, errlo) testCases := []testCase{ { name: "mux should not have loopback candidate", agentConfig: &AgentConfig{ NetworkTypes: []NetworkType{NetworkTypeUDP4, NetworkTypeUDP6}, UDPMux: mux, }, loExpected: false, }, { name: "mux with loopback should not have loopback candidate", agentConfig: &AgentConfig{ NetworkTypes: []NetworkType{NetworkTypeUDP4, NetworkTypeUDP6}, UDPMux: muxWithLo, }, loExpected: true, }, { name: "includeloopback enabled", agentConfig: &AgentConfig{ NetworkTypes: []NetworkType{NetworkTypeUDP4, NetworkTypeUDP6}, IncludeLoopback: true, }, loExpected: true, }, { name: "includeloopback disabled", agentConfig: &AgentConfig{ NetworkTypes: []NetworkType{NetworkTypeUDP4, NetworkTypeUDP6}, IncludeLoopback: false, }, loExpected: false, }, } for _, tc := range testCases { tcase := tc t.Run(tcase.name, func(t *testing.T) { a, err := NewAgent(tc.agentConfig) assert.NoError(t, err) candidateGathered, candidateGatheredFunc := context.WithCancel(context.Background()) var loopback int32 assert.NoError(t, a.OnCandidate(func(c Candidate) { if c != nil { if net.ParseIP(c.Address()).IsLoopback() { atomic.StoreInt32(&loopback, 1) } } else { candidateGatheredFunc() return } t.Log(c.NetworkType(), c.Priority(), c) })) assert.NoError(t, a.GatherCandidates()) <-candidateGathered.Done() assert.NoError(t, a.Close()) assert.Equal(t, tcase.loExpected, atomic.LoadInt32(&loopback) == 1) }) } assert.NoError(t, mux.Close()) assert.NoError(t, muxWithLo.Close()) } // Assert that STUN gathering is done concurrently func TestSTUNConcurrency(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() serverPort := randomPort(t) serverListener, err := net.ListenPacket("udp4", "127.0.0.1:"+strconv.Itoa(serverPort)) assert.NoError(t, err) server, err := turn.NewServer(turn.ServerConfig{ Realm: "pion.ly", AuthHandler: optimisticAuthHandler, PacketConnConfigs: []turn.PacketConnConfig{ { PacketConn: serverListener, RelayAddressGenerator: &turn.RelayAddressGeneratorNone{Address: "127.0.0.1"}, }, }, }) assert.NoError(t, err) urls := []*URL{} for i := 0; i <= 10; i++ { urls = append(urls, &URL{ Scheme: SchemeTypeSTUN, Host: "127.0.0.1", Port: serverPort + 1, }) } urls = append(urls, &URL{ Scheme: SchemeTypeSTUN, Host: "127.0.0.1", Port: serverPort, }) listener, err := net.ListenTCP("tcp", &net.TCPAddr{ IP: net.IP{127, 0, 0, 1}, }) require.NoError(t, err) defer func() { _ = listener.Close() }() a, err := NewAgent(&AgentConfig{ NetworkTypes: supportedNetworkTypes(), Urls: urls, CandidateTypes: []CandidateType{CandidateTypeHost, CandidateTypeServerReflexive}, TCPMux: NewTCPMuxDefault( TCPMuxParams{ Listener: listener, Logger: logging.NewDefaultLoggerFactory().NewLogger("ice"), ReadBufferSize: 8, }, ), }) assert.NoError(t, err) candidateGathered, candidateGatheredFunc := context.WithCancel(context.Background()) assert.NoError(t, a.OnCandidate(func(c Candidate) { if c == nil { candidateGatheredFunc() return } t.Log(c.NetworkType(), c.Priority(), c) })) assert.NoError(t, a.GatherCandidates()) <-candidateGathered.Done() assert.NoError(t, a.Close()) assert.NoError(t, server.Close()) } // Assert that TURN gathering is done concurrently func TestTURNConcurrency(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() runTest := func(protocol ProtoType, scheme SchemeType, packetConn net.PacketConn, listener net.Listener, serverPort int) { packetConnConfigs := []turn.PacketConnConfig{} if packetConn != nil { packetConnConfigs = append(packetConnConfigs, turn.PacketConnConfig{ PacketConn: packetConn, RelayAddressGenerator: &turn.RelayAddressGeneratorNone{Address: "127.0.0.1"}, }) } listenerConfigs := []turn.ListenerConfig{} if listener != nil { listenerConfigs = append(listenerConfigs, turn.ListenerConfig{ Listener: listener, RelayAddressGenerator: &turn.RelayAddressGeneratorNone{Address: "127.0.0.1"}, }) } server, err := turn.NewServer(turn.ServerConfig{ Realm: "pion.ly", AuthHandler: optimisticAuthHandler, PacketConnConfigs: packetConnConfigs, ListenerConfigs: listenerConfigs, }) assert.NoError(t, err) urls := []*URL{} for i := 0; i <= 10; i++ { urls = append(urls, &URL{ Scheme: scheme, Host: "127.0.0.1", Username: "username", Password: "password", Proto: protocol, Port: serverPort + 1 + i, }) } urls = append(urls, &URL{ Scheme: scheme, Host: "127.0.0.1", Username: "username", Password: "password", Proto: protocol, Port: serverPort, }) a, err := NewAgent(&AgentConfig{ CandidateTypes: []CandidateType{CandidateTypeRelay}, InsecureSkipVerify: true, NetworkTypes: supportedNetworkTypes(), Urls: urls, }) assert.NoError(t, err) candidateGathered, candidateGatheredFunc := context.WithCancel(context.Background()) assert.NoError(t, a.OnCandidate(func(c Candidate) { if c != nil { candidateGatheredFunc() } })) assert.NoError(t, a.GatherCandidates()) <-candidateGathered.Done() assert.NoError(t, a.Close()) assert.NoError(t, server.Close()) } t.Run("UDP Relay", func(t *testing.T) { serverPort := randomPort(t) serverListener, err := net.ListenPacket("udp", "127.0.0.1:"+strconv.Itoa(serverPort)) assert.NoError(t, err) runTest(ProtoTypeUDP, SchemeTypeTURN, serverListener, nil, serverPort) }) t.Run("TCP Relay", func(t *testing.T) { serverPort := randomPort(t) serverListener, err := net.Listen("tcp", "127.0.0.1:"+strconv.Itoa(serverPort)) assert.NoError(t, err) runTest(ProtoTypeTCP, SchemeTypeTURN, nil, serverListener, serverPort) }) t.Run("TLS Relay", func(t *testing.T) { certificate, genErr := selfsign.GenerateSelfSigned() assert.NoError(t, genErr) serverPort := randomPort(t) serverListener, err := tls.Listen("tcp", "127.0.0.1:"+strconv.Itoa(serverPort), &tls.Config{ //nolint:gosec Certificates: []tls.Certificate{certificate}, }) assert.NoError(t, err) runTest(ProtoTypeTCP, SchemeTypeTURNS, nil, serverListener, serverPort) }) t.Run("DTLS Relay", func(t *testing.T) { certificate, genErr := selfsign.GenerateSelfSigned() assert.NoError(t, genErr) serverPort := randomPort(t) serverListener, err := dtls.Listen("udp", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: serverPort}, &dtls.Config{ Certificates: []tls.Certificate{certificate}, }) assert.NoError(t, err) runTest(ProtoTypeUDP, SchemeTypeTURNS, nil, serverListener, serverPort) }) } // Assert that STUN and TURN gathering are done concurrently func TestSTUNTURNConcurrency(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 8) defer lim.Stop() serverPort := randomPort(t) serverListener, err := net.ListenPacket("udp4", "127.0.0.1:"+strconv.Itoa(serverPort)) assert.NoError(t, err) server, err := turn.NewServer(turn.ServerConfig{ Realm: "pion.ly", AuthHandler: optimisticAuthHandler, PacketConnConfigs: []turn.PacketConnConfig{ { PacketConn: serverListener, RelayAddressGenerator: &turn.RelayAddressGeneratorNone{Address: "127.0.0.1"}, }, }, }) assert.NoError(t, err) urls := []*URL{} for i := 0; i <= 10; i++ { urls = append(urls, &URL{ Scheme: SchemeTypeSTUN, Host: "127.0.0.1", Port: serverPort + 1, }) } urls = append(urls, &URL{ Scheme: SchemeTypeTURN, Proto: ProtoTypeUDP, Host: "127.0.0.1", Port: serverPort, Username: "username", Password: "password", }) a, err := NewAgent(&AgentConfig{ NetworkTypes: supportedNetworkTypes(), Urls: urls, CandidateTypes: []CandidateType{CandidateTypeServerReflexive, CandidateTypeRelay}, }) assert.NoError(t, err) { gatherLim := test.TimeOut(time.Second * 3) // As TURN and STUN should be checked in parallel, this should complete before the default STUN timeout (5s) candidateGathered, candidateGatheredFunc := context.WithCancel(context.Background()) assert.NoError(t, a.OnCandidate(func(c Candidate) { if c != nil { candidateGatheredFunc() } })) assert.NoError(t, a.GatherCandidates()) <-candidateGathered.Done() gatherLim.Stop() } assert.NoError(t, a.Close()) assert.NoError(t, server.Close()) } // Assert that srflx candidates can be gathered from TURN servers // // When TURN servers are utilized, both types of candidates // (i.e. srflx and relay) are obtained from the TURN server. // // https://tools.ietf.org/html/rfc5245#section-2.1 func TestTURNSrflx(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() serverPort := randomPort(t) serverListener, err := net.ListenPacket("udp4", "127.0.0.1:"+strconv.Itoa(serverPort)) assert.NoError(t, err) server, err := turn.NewServer(turn.ServerConfig{ Realm: "pion.ly", AuthHandler: optimisticAuthHandler, PacketConnConfigs: []turn.PacketConnConfig{ { PacketConn: serverListener, RelayAddressGenerator: &turn.RelayAddressGeneratorNone{Address: "127.0.0.1"}, }, }, }) assert.NoError(t, err) urls := []*URL{{ Scheme: SchemeTypeTURN, Proto: ProtoTypeUDP, Host: "127.0.0.1", Port: serverPort, Username: "username", Password: "password", }} a, err := NewAgent(&AgentConfig{ NetworkTypes: supportedNetworkTypes(), Urls: urls, CandidateTypes: []CandidateType{CandidateTypeServerReflexive, CandidateTypeRelay}, }) assert.NoError(t, err) candidateGathered, candidateGatheredFunc := context.WithCancel(context.Background()) assert.NoError(t, a.OnCandidate(func(c Candidate) { if c != nil && c.Type() == CandidateTypeServerReflexive { candidateGatheredFunc() } })) assert.NoError(t, a.GatherCandidates()) <-candidateGathered.Done() assert.NoError(t, a.Close()) assert.NoError(t, server.Close()) } func TestCloseConnLog(t *testing.T) { a, err := NewAgent(&AgentConfig{}) assert.NoError(t, err) closeConnAndLog(nil, a.log, "normal nil") var nc *net.UDPConn closeConnAndLog(nc, a.log, "nil ptr") assert.NoError(t, a.Close()) } type mockProxy struct { proxyWasDialed func() } type mockConn struct{} func (m *mockConn) Read(b []byte) (n int, err error) { return 0, io.EOF } func (m *mockConn) Write(b []byte) (int, error) { return 0, io.EOF } func (m *mockConn) Close() error { return io.EOF } func (m *mockConn) LocalAddr() net.Addr { return &net.TCPAddr{} } func (m *mockConn) RemoteAddr() net.Addr { return &net.TCPAddr{} } func (m *mockConn) SetDeadline(t time.Time) error { return io.EOF } func (m *mockConn) SetReadDeadline(t time.Time) error { return io.EOF } func (m *mockConn) SetWriteDeadline(t time.Time) error { return io.EOF } func (m *mockProxy) Dial(network, addr string) (net.Conn, error) { m.proxyWasDialed() return &mockConn{}, nil } func TestTURNProxyDialer(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() proxyWasDialed, proxyWasDialedFunc := context.WithCancel(context.Background()) proxy.RegisterDialerType("tcp", func(*url.URL, proxy.Dialer) (proxy.Dialer, error) { return &mockProxy{proxyWasDialedFunc}, nil }) tcpProxyURI, err := url.Parse("tcp://fakeproxy:3128") assert.NoError(t, err) proxyDialer, err := proxy.FromURL(tcpProxyURI, proxy.Direct) assert.NoError(t, err) a, err := NewAgent(&AgentConfig{ CandidateTypes: []CandidateType{CandidateTypeRelay}, NetworkTypes: supportedNetworkTypes(), Urls: []*URL{ { Scheme: SchemeTypeTURN, Host: "127.0.0.1", Username: "username", Password: "password", Proto: ProtoTypeTCP, Port: 5000, }, }, ProxyDialer: proxyDialer, }) assert.NoError(t, err) candidateGatherFinish, candidateGatherFinishFunc := context.WithCancel(context.Background()) assert.NoError(t, a.OnCandidate(func(c Candidate) { if c == nil { candidateGatherFinishFunc() } })) assert.NoError(t, a.GatherCandidates()) <-candidateGatherFinish.Done() <-proxyWasDialed.Done() assert.NoError(t, a.Close()) } // Assert that candidates are given for each mux in a MultiUDPMux func TestMultiUDPMuxUsage(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() var expectedPorts []int var udpMuxInstances []UDPMux for i := 0; i < 3; i++ { port := randomPort(t) conn, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IP{127, 0, 0, 1}, Port: port}) assert.NoError(t, err) defer func() { _ = conn.Close() }() expectedPorts = append(expectedPorts, port) muxDefault := NewUDPMuxDefault(UDPMuxParams{UDPConn: conn}) udpMuxInstances = append(udpMuxInstances, muxDefault) idx := i defer func() { _ = udpMuxInstances[idx].Close() }() } a, err := NewAgent(&AgentConfig{ NetworkTypes: supportedNetworkTypes(), CandidateTypes: []CandidateType{CandidateTypeHost}, UDPMux: NewMultiUDPMuxDefault(udpMuxInstances...), }) assert.NoError(t, err) candidateCh := make(chan Candidate) assert.NoError(t, a.OnCandidate(func(c Candidate) { if c == nil { close(candidateCh) return } candidateCh <- c })) assert.NoError(t, a.GatherCandidates()) portFound := make(map[int]bool) for c := range candidateCh { portFound[c.Port()] = true assert.True(t, c.NetworkType().IsUDP(), "All candidates should be UDP") } assert.Len(t, portFound, len(expectedPorts)) for _, port := range expectedPorts { assert.True(t, portFound[port], "There should be a candidate for each UDP mux port") } assert.NoError(t, a.Close()) } // Assert that candidates are given for each mux in a MultiTCPMux func TestMultiTCPMuxUsage(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() var expectedPorts []int var tcpMuxInstances []TCPMux for i := 0; i < 3; i++ { port := randomPort(t) listener, err := net.ListenTCP("tcp", &net.TCPAddr{ IP: net.IP{127, 0, 0, 1}, Port: port, }) assert.NoError(t, err) defer func() { _ = listener.Close() }() expectedPorts = append(expectedPorts, port) tcpMuxInstances = append(tcpMuxInstances, NewTCPMuxDefault(TCPMuxParams{ Listener: listener, ReadBufferSize: 8, })) } a, err := NewAgent(&AgentConfig{ NetworkTypes: supportedNetworkTypes(), CandidateTypes: []CandidateType{CandidateTypeHost}, TCPMux: NewMultiTCPMuxDefault(tcpMuxInstances...), }) assert.NoError(t, err) candidateCh := make(chan Candidate) assert.NoError(t, a.OnCandidate(func(c Candidate) { if c == nil { close(candidateCh) return } candidateCh <- c })) assert.NoError(t, a.GatherCandidates()) portFound := make(map[int]bool) for c := range candidateCh { if c.NetworkType().IsTCP() { portFound[c.Port()] = true } } assert.Len(t, portFound, len(expectedPorts)) for _, port := range expectedPorts { assert.True(t, portFound[port], "There should be a candidate for each TCP mux port") } assert.NoError(t, a.Close()) } // Assert that UniversalUDPMux is used while gathering when configured in the Agent func TestUniversalUDPMuxUsage(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() conn, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IP{127, 0, 0, 1}, Port: randomPort(t)}) assert.NoError(t, err) defer func() { _ = conn.Close() }() udpMuxSrflx := &universalUDPMuxMock{ conn: conn, } numSTUNS := 3 urls := []*URL{} for i := 0; i < numSTUNS; i++ { urls = append(urls, &URL{ Scheme: SchemeTypeSTUN, Host: "127.0.0.1", Port: 3478 + i, }) } a, err := NewAgent(&AgentConfig{ NetworkTypes: supportedNetworkTypes(), Urls: urls, CandidateTypes: []CandidateType{CandidateTypeServerReflexive}, UDPMuxSrflx: udpMuxSrflx, }) assert.NoError(t, err) candidateGathered, candidateGatheredFunc := context.WithCancel(context.Background()) assert.NoError(t, a.OnCandidate(func(c Candidate) { if c == nil { candidateGatheredFunc() return } t.Log(c.NetworkType(), c.Priority(), c) })) assert.NoError(t, a.GatherCandidates()) <-candidateGathered.Done() assert.NoError(t, a.Close()) // twice because of 2 STUN servers configured assert.Equal(t, numSTUNS, udpMuxSrflx.getXORMappedAddrUsedTimes, "expected times that GetXORMappedAddr should be called") // one for Restart() when agent has been initialized and one time when Close() the agent assert.Equal(t, 2, udpMuxSrflx.removeConnByUfragTimes, "expected times that RemoveConnByUfrag should be called") // twice because of 2 STUN servers configured assert.Equal(t, numSTUNS, udpMuxSrflx.getConnForURLTimes, "expected times that GetConnForURL should be called") } type universalUDPMuxMock struct { UDPMux getXORMappedAddrUsedTimes int removeConnByUfragTimes int getConnForURLTimes int mu sync.Mutex conn *net.UDPConn } func (m *universalUDPMuxMock) GetRelayedAddr(turnAddr net.Addr, deadline time.Duration) (*net.Addr, error) { return nil, errNotImplemented } func (m *universalUDPMuxMock) GetConnForURL(ufrag string, url string, addr net.Addr) (net.PacketConn, error) { m.mu.Lock() defer m.mu.Unlock() m.getConnForURLTimes++ return m.conn, nil } func (m *universalUDPMuxMock) GetXORMappedAddr(serverAddr net.Addr, deadline time.Duration) (*stun.XORMappedAddress, error) { m.mu.Lock() defer m.mu.Unlock() m.getXORMappedAddrUsedTimes++ return &stun.XORMappedAddress{IP: net.IP{100, 64, 0, 1}, Port: 77878}, nil } func (m *universalUDPMuxMock) RemoveConnByUfrag(ufrag string) { m.mu.Lock() defer m.mu.Unlock() m.removeConnByUfragTimes++ } func (m *universalUDPMuxMock) GetListenAddresses() []net.Addr { return []net.Addr{m.conn.LocalAddr()} } ice-2.3.1/gather_vnet_test.go000066400000000000000000000301671437620344400161670ustar00rootroot00000000000000//go:build !js // +build !js package ice import ( "context" "errors" "fmt" "net" "testing" "github.com/pion/logging" "github.com/pion/transport/v2/test" "github.com/pion/transport/v2/vnet" "github.com/stretchr/testify/assert" ) func TestVNetGather(t *testing.T) { report := test.CheckRoutines(t) defer report() loggerFactory := logging.NewDefaultLoggerFactory() // log := loggerFactory.NewLogger("test") t.Run("No local IP address", func(t *testing.T) { n, err := vnet.NewNet(&vnet.NetConfig{}) assert.NoError(t, err) a, err := NewAgent(&AgentConfig{ Net: n, }) assert.NoError(t, err) localIPs, err := localInterfaces(a.net, a.interfaceFilter, a.ipFilter, []NetworkType{NetworkTypeUDP4}, false) if len(localIPs) > 0 { t.Fatal("should return no local IP") } else if err != nil { t.Fatal(err) } assert.NoError(t, a.Close()) }) t.Run("Gather a dynamic IP address", func(t *testing.T) { cider := "1.2.3.0/24" _, ipNet, err := net.ParseCIDR(cider) if err != nil { t.Fatalf("Failed to parse CIDR: %s", err) } r, err := vnet.NewRouter(&vnet.RouterConfig{ CIDR: cider, LoggerFactory: loggerFactory, }) if err != nil { t.Fatalf("Failed to create a router: %s", err) } nw, err := vnet.NewNet(&vnet.NetConfig{}) if err != nil { t.Fatalf("Failed to create a Net: %s", err) } err = r.AddNet(nw) if err != nil { t.Fatalf("Failed to add a Net to the router: %s", err) } a, err := NewAgent(&AgentConfig{ Net: nw, }) assert.NoError(t, err) localIPs, err := localInterfaces(a.net, a.interfaceFilter, a.ipFilter, []NetworkType{NetworkTypeUDP4}, false) if len(localIPs) == 0 { t.Fatal("should have one local IP") } else if err != nil { t.Fatal(err) } for _, ip := range localIPs { if ip.IsLoopback() { t.Fatal("should not return loopback IP") } if !ipNet.Contains(ip) { t.Fatal("should be contained in the CIDR") } } assert.NoError(t, a.Close()) }) t.Run("listenUDP", func(t *testing.T) { r, err := vnet.NewRouter(&vnet.RouterConfig{ CIDR: "1.2.3.0/24", LoggerFactory: loggerFactory, }) if err != nil { t.Fatalf("Failed to create a router: %s", err) } nw, err := vnet.NewNet(&vnet.NetConfig{}) if err != nil { t.Fatalf("Failed to create a Net: %s", err) } err = r.AddNet(nw) if err != nil { t.Fatalf("Failed to add a Net to the router: %s", err) } a, err := NewAgent(&AgentConfig{Net: nw}) if err != nil { t.Fatalf("Failed to create agent: %s", err) } localIPs, err := localInterfaces(a.net, a.interfaceFilter, a.ipFilter, []NetworkType{NetworkTypeUDP4}, false) if len(localIPs) == 0 { t.Fatal("localInterfaces found no interfaces, unable to test") } else if err != nil { t.Fatal(err) } ip := localIPs[0] conn, err := listenUDPInPortRange(a.net, a.log, 0, 0, udp, &net.UDPAddr{IP: ip, Port: 0}) if err != nil { t.Fatalf("listenUDP error with no port restriction %v", err) } else if conn == nil { t.Fatalf("listenUDP error with no port restriction return a nil conn") } err = conn.Close() if err != nil { t.Fatalf("failed to close conn") } _, err = listenUDPInPortRange(a.net, a.log, 4999, 5000, udp, &net.UDPAddr{IP: ip, Port: 0}) if !errors.Is(err, ErrPort) { t.Fatal("listenUDP with invalid port range did not return ErrPort") } conn, err = listenUDPInPortRange(a.net, a.log, 5000, 5000, udp, &net.UDPAddr{IP: ip, Port: 0}) if err != nil { t.Fatalf("listenUDP error with no port restriction %v", err) } else if conn == nil { t.Fatalf("listenUDP error with no port restriction return a nil conn") } _, port, err := net.SplitHostPort(conn.LocalAddr().String()) if err != nil { t.Fatal(err) } else if port != "5000" { t.Fatalf("listenUDP with port restriction of 5000 listened on incorrect port (%s)", port) } assert.NoError(t, conn.Close()) assert.NoError(t, a.Close()) }) } func TestVNetGatherWithNAT1To1(t *testing.T) { report := test.CheckRoutines(t) defer report() loggerFactory := logging.NewDefaultLoggerFactory() log := loggerFactory.NewLogger("test") t.Run("gather 1:1 NAT external IPs as host candidates", func(t *testing.T) { externalIP0 := "1.2.3.4" externalIP1 := "1.2.3.5" localIP0 := "10.0.0.1" localIP1 := "10.0.0.2" map0 := fmt.Sprintf("%s/%s", externalIP0, localIP0) map1 := fmt.Sprintf("%s/%s", externalIP1, localIP1) wan, err := vnet.NewRouter(&vnet.RouterConfig{ CIDR: "1.2.3.0/24", LoggerFactory: loggerFactory, }) assert.NoError(t, err, "should succeed") lan, err := vnet.NewRouter(&vnet.RouterConfig{ CIDR: "10.0.0.0/24", StaticIPs: []string{map0, map1}, NATType: &vnet.NATType{ Mode: vnet.NATModeNAT1To1, }, LoggerFactory: loggerFactory, }) assert.NoError(t, err, "should succeed") err = wan.AddRouter(lan) assert.NoError(t, err, "should succeed") nw, err := vnet.NewNet(&vnet.NetConfig{ StaticIPs: []string{localIP0, localIP1}, }) if err != nil { t.Fatalf("Failed to create a Net: %s", err) } err = lan.AddNet(nw) assert.NoError(t, err, "should succeed") a, err := NewAgent(&AgentConfig{ NetworkTypes: []NetworkType{ NetworkTypeUDP4, }, NAT1To1IPs: []string{map0, map1}, Net: nw, }) assert.NoError(t, err, "should succeed") defer a.Close() // nolint:errcheck done := make(chan struct{}) err = a.OnCandidate(func(c Candidate) { if c == nil { close(done) } }) assert.NoError(t, err, "should succeed") err = a.GatherCandidates() assert.NoError(t, err, "should succeed") log.Debug("wait for gathering is done...") <-done log.Debug("gathering is done") candidates, err := a.GetLocalCandidates() assert.NoError(t, err, "should succeed") if len(candidates) != 2 { t.Fatal("There must be two candidates") } lAddr := [2]*net.UDPAddr{nil, nil} for i, candi := range candidates { lAddr[i] = candi.(*CandidateHost).conn.LocalAddr().(*net.UDPAddr) //nolint:forcetypeassert if candi.Port() != lAddr[i].Port { t.Fatalf("Unexpected candidate port: %d", candi.Port()) } } if candidates[0].Address() == externalIP0 { if candidates[1].Address() != externalIP1 { t.Fatalf("Unexpected candidate IP: %s", candidates[1].Address()) } if lAddr[0].IP.String() != localIP0 { t.Fatalf("Unexpected listen IP: %s", lAddr[0].IP.String()) } if lAddr[1].IP.String() != localIP1 { t.Fatalf("Unexpected listen IP: %s", lAddr[1].IP.String()) } } else if candidates[0].Address() == externalIP1 { if candidates[1].Address() != externalIP0 { t.Fatalf("Unexpected candidate IP: %s", candidates[1].Address()) } if lAddr[0].IP.String() != localIP1 { t.Fatalf("Unexpected listen IP: %s", lAddr[0].IP.String()) } if lAddr[1].IP.String() != localIP0 { t.Fatalf("Unexpected listen IP: %s", lAddr[1].IP.String()) } } }) t.Run("gather 1:1 NAT external IPs as srflx candidates", func(t *testing.T) { wan, err := vnet.NewRouter(&vnet.RouterConfig{ CIDR: "1.2.3.0/24", LoggerFactory: loggerFactory, }) assert.NoError(t, err, "should succeed") lan, err := vnet.NewRouter(&vnet.RouterConfig{ CIDR: "10.0.0.0/24", StaticIPs: []string{ "1.2.3.4/10.0.0.1", }, NATType: &vnet.NATType{ Mode: vnet.NATModeNAT1To1, }, LoggerFactory: loggerFactory, }) assert.NoError(t, err, "should succeed") err = wan.AddRouter(lan) assert.NoError(t, err, "should succeed") nw, err := vnet.NewNet(&vnet.NetConfig{ StaticIPs: []string{ "10.0.0.1", }, }) if err != nil { t.Fatalf("Failed to create a Net: %s", err) } err = lan.AddNet(nw) assert.NoError(t, err, "should succeed") a, err := NewAgent(&AgentConfig{ NetworkTypes: []NetworkType{ NetworkTypeUDP4, }, NAT1To1IPs: []string{ "1.2.3.4", }, NAT1To1IPCandidateType: CandidateTypeServerReflexive, Net: nw, }) assert.NoError(t, err, "should succeed") defer a.Close() // nolint:errcheck done := make(chan struct{}) err = a.OnCandidate(func(c Candidate) { if c == nil { close(done) } }) assert.NoError(t, err, "should succeed") err = a.GatherCandidates() assert.NoError(t, err, "should succeed") log.Debug("wait for gathering is done...") <-done log.Debug("gathering is done") candidates, err := a.GetLocalCandidates() assert.NoError(t, err, "should succeed") if len(candidates) != 2 { t.Fatalf("Expected two candidates. actually %d", len(candidates)) } var candiHost *CandidateHost var candiSrflx *CandidateServerReflexive for _, candidate := range candidates { switch candi := candidate.(type) { case *CandidateHost: candiHost = candi case *CandidateServerReflexive: candiSrflx = candi default: t.Fatal("Unexpected candidate type") } } assert.NotNil(t, candiHost, "should not be nil") assert.Equal(t, "10.0.0.1", candiHost.Address(), "should match") assert.NotNil(t, candiSrflx, "should not be nil") assert.Equal(t, "1.2.3.4", candiSrflx.Address(), "should match") }) } func TestVNetGatherWithInterfaceFilter(t *testing.T) { report := test.CheckRoutines(t) defer report() loggerFactory := logging.NewDefaultLoggerFactory() r, err := vnet.NewRouter(&vnet.RouterConfig{ CIDR: "1.2.3.0/24", LoggerFactory: loggerFactory, }) if err != nil { t.Fatalf("Failed to create a router: %s", err) } nw, err := vnet.NewNet(&vnet.NetConfig{}) if err != nil { t.Fatalf("Failed to create a Net: %s", err) } if err = r.AddNet(nw); err != nil { t.Fatalf("Failed to add a Net to the router: %s", err) } t.Run("InterfaceFilter should exclude the interface", func(t *testing.T) { a, err := NewAgent(&AgentConfig{ Net: nw, InterfaceFilter: func(interfaceName string) bool { assert.Equal(t, "eth0", interfaceName) return false }, }) assert.NoError(t, err) localIPs, err := localInterfaces(a.net, a.interfaceFilter, a.ipFilter, []NetworkType{NetworkTypeUDP4}, false) if err != nil { t.Fatal(err) } else if len(localIPs) != 0 { t.Fatal("InterfaceFilter should have excluded everything") } assert.NoError(t, a.Close()) }) t.Run("IPFilter should exclude the IP", func(t *testing.T) { a, err := NewAgent(&AgentConfig{ Net: nw, IPFilter: func(ip net.IP) bool { assert.Equal(t, net.IP{1, 2, 3, 1}, ip) return false }, }) assert.NoError(t, err) localIPs, err := localInterfaces(a.net, a.interfaceFilter, a.ipFilter, []NetworkType{NetworkTypeUDP4}, false) if err != nil { t.Fatal(err) } else if len(localIPs) != 0 { t.Fatal("IPFilter should have excluded everything") } assert.NoError(t, a.Close()) }) t.Run("InterfaceFilter should not exclude the interface", func(t *testing.T) { a, err := NewAgent(&AgentConfig{ Net: nw, InterfaceFilter: func(interfaceName string) bool { assert.Equal(t, "eth0", interfaceName) return true }, }) assert.NoError(t, err) localIPs, err := localInterfaces(a.net, a.interfaceFilter, a.ipFilter, []NetworkType{NetworkTypeUDP4}, false) if err != nil { t.Fatal(err) } else if len(localIPs) == 0 { t.Fatal("InterfaceFilter should not have excluded anything") } assert.NoError(t, a.Close()) }) } func TestVNetGather_TURNConnectionLeak(t *testing.T) { report := test.CheckRoutines(t) defer report() turnServerURL := &URL{ Scheme: SchemeTypeTURN, Host: vnetSTUNServerIP, Port: vnetSTUNServerPort, Username: "user", Password: "pass", Proto: ProtoTypeUDP, } // buildVNet with a Symmetric NATs for both LANs natType := &vnet.NATType{ MappingBehavior: vnet.EndpointAddrPortDependent, FilteringBehavior: vnet.EndpointAddrPortDependent, } v, err := buildVNet(natType, natType) if !assert.NoError(t, err, "should succeed") { return } defer v.close() cfg0 := &AgentConfig{ Urls: []*URL{ turnServerURL, }, NetworkTypes: supportedNetworkTypes(), MulticastDNSMode: MulticastDNSModeDisabled, NAT1To1IPs: []string{vnetGlobalIPA}, Net: v.net0, } aAgent, err := NewAgent(cfg0) if !assert.NoError(t, err, "should succeed") { return } aAgent.gatherCandidatesRelay(context.Background(), []*URL{turnServerURL}) // Assert relay conn leak on close. assert.NoError(t, aAgent.Close()) } ice-2.3.1/go.mod000066400000000000000000000007331437620344400133750ustar00rootroot00000000000000module github.com/pion/ice/v2 go 1.13 require ( github.com/google/uuid v1.3.0 github.com/kr/pretty v0.1.0 // indirect github.com/pion/dtls/v2 v2.2.6 github.com/pion/logging v0.2.2 github.com/pion/mdns v0.0.7 github.com/pion/randutil v0.1.0 github.com/pion/stun v0.4.0 github.com/pion/transport/v2 v2.0.2 github.com/pion/turn/v2 v2.1.0 github.com/stretchr/testify v1.8.1 golang.org/x/net v0.7.0 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect ) ice-2.3.1/go.sum000066400000000000000000000161661437620344400134310ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/pion/dtls/v2 v2.2.6 h1:yXMxKr0Skd+Ub6A8UqXTRLSywskx93ooMRHsQUtd+Z4= github.com/pion/dtls/v2 v2.2.6/go.mod h1:t8fWJCIquY5rlQZwA2yWxUS1+OCrAdXrhVKXB5oD/wY= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= github.com/pion/mdns v0.0.7 h1:P0UB4Sr6xDWEox0kTVxF0LmQihtCbSAdW0H2nEgkA3U= github.com/pion/mdns v0.0.7/go.mod h1:4iP2UbeFhLI/vWju/bw6ZfwjJzk0z8DNValjGxR/dD8= github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= github.com/pion/stun v0.4.0 h1:vgRrbBE2htWHy7l3Zsxckk7rkjnjOsSM7PHZnBwo8rk= github.com/pion/stun v0.4.0/go.mod h1:QPsh1/SbXASntw3zkkrIk3ZJVKz4saBY2G7S10P3wCw= github.com/pion/transport/v2 v2.0.0/go.mod h1:HS2MEBJTwD+1ZI2eSXSvHJx/HnzQqRy2/LXxt6eVMHc= github.com/pion/transport/v2 v2.0.2 h1:St+8o+1PEzPT51O9bv+tH/KYYLMNR5Vwm5Z3Qkjsywg= github.com/pion/transport/v2 v2.0.2/go.mod h1:vrz6bUbFr/cjdwbnxq8OdDDzHf7JJfGsIRkxfpZoTA0= github.com/pion/turn/v2 v2.1.0 h1:5wGHSgGhJhP/RpabkUb/T9PdsAjkGLS6toYz5HNzoSI= github.com/pion/turn/v2 v2.1.0/go.mod h1:yrT5XbXSGX1VFSF31A3c1kCNB5bBZgk/uu5LET162qs= github.com/pion/udp/v2 v2.0.1 h1:xP0z6WNux1zWEjhC7onRA3EwwSliXqu1ElUZAQhUP54= github.com/pion/udp/v2 v2.0.1/go.mod h1:B7uvTMP00lzWdyMr/1PVZXtV3wpPIxBRd4Wl6AksXn8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= ice-2.3.1/ice.go000066400000000000000000000037221437620344400133570ustar00rootroot00000000000000package ice // ConnectionState is an enum showing the state of a ICE Connection type ConnectionState int // List of supported States const ( // ConnectionStateNew ICE agent is gathering addresses ConnectionStateNew = iota + 1 // ConnectionStateChecking ICE agent has been given local and remote candidates, and is attempting to find a match ConnectionStateChecking // ConnectionStateConnected ICE agent has a pairing, but is still checking other pairs ConnectionStateConnected // ConnectionStateCompleted ICE agent has finished ConnectionStateCompleted // ConnectionStateFailed ICE agent never could successfully connect ConnectionStateFailed // ConnectionStateDisconnected ICE agent connected successfully, but has entered a failed state ConnectionStateDisconnected // ConnectionStateClosed ICE agent has finished and is no longer handling requests ConnectionStateClosed ) func (c ConnectionState) String() string { switch c { case ConnectionStateNew: return "New" case ConnectionStateChecking: return "Checking" case ConnectionStateConnected: return "Connected" case ConnectionStateCompleted: return "Completed" case ConnectionStateFailed: return "Failed" case ConnectionStateDisconnected: return "Disconnected" case ConnectionStateClosed: return "Closed" default: return "Invalid" } } // GatheringState describes the state of the candidate gathering process type GatheringState int const ( // GatheringStateNew indicates candidate gathering is not yet started GatheringStateNew GatheringState = iota + 1 // GatheringStateGathering indicates candidate gathering is ongoing GatheringStateGathering // GatheringStateComplete indicates candidate gathering has been completed GatheringStateComplete ) func (t GatheringState) String() string { switch t { case GatheringStateNew: return "new" case GatheringStateGathering: return "gathering" case GatheringStateComplete: return "complete" default: return ErrUnknownType.Error() } } ice-2.3.1/ice_test.go000066400000000000000000000022211437620344400144070ustar00rootroot00000000000000package ice import ( "testing" "github.com/stretchr/testify/assert" ) func TestConnectedState_String(t *testing.T) { testCases := []struct { connectionState ConnectionState expectedString string }{ {ConnectionState(Unknown), "Invalid"}, {ConnectionStateNew, "New"}, {ConnectionStateChecking, "Checking"}, {ConnectionStateConnected, "Connected"}, {ConnectionStateCompleted, "Completed"}, {ConnectionStateFailed, "Failed"}, {ConnectionStateDisconnected, "Disconnected"}, {ConnectionStateClosed, "Closed"}, } for i, testCase := range testCases { assert.Equal(t, testCase.expectedString, testCase.connectionState.String(), "testCase: %d %v", i, testCase, ) } } func TestGatheringState_String(t *testing.T) { testCases := []struct { gatheringState GatheringState expectedString string }{ {GatheringState(Unknown), ErrUnknownType.Error()}, {GatheringStateNew, "new"}, {GatheringStateGathering, "gathering"}, {GatheringStateComplete, "complete"}, } for i, testCase := range testCases { assert.Equal(t, testCase.expectedString, testCase.gatheringState.String(), "testCase: %d %v", i, testCase, ) } } ice-2.3.1/icecontrol.go000066400000000000000000000047201437620344400147570ustar00rootroot00000000000000package ice import ( "encoding/binary" "github.com/pion/stun" ) // tiebreaker is common helper for ICE-{CONTROLLED,CONTROLLING} // and represents the so-called tiebreaker number. type tiebreaker uint64 const tiebreakerSize = 8 // 64 bit // AddToAs adds tiebreaker value to m as t attribute. func (a tiebreaker) AddToAs(m *stun.Message, t stun.AttrType) error { v := make([]byte, tiebreakerSize) binary.BigEndian.PutUint64(v, uint64(a)) m.Add(t, v) return nil } // GetFromAs decodes tiebreaker value in message getting it as for t type. func (a *tiebreaker) GetFromAs(m *stun.Message, t stun.AttrType) error { v, err := m.Get(t) if err != nil { return err } if err = stun.CheckSize(t, len(v), tiebreakerSize); err != nil { return err } *a = tiebreaker(binary.BigEndian.Uint64(v)) return nil } // AttrControlled represents ICE-CONTROLLED attribute. type AttrControlled uint64 // AddTo adds ICE-CONTROLLED to message. func (c AttrControlled) AddTo(m *stun.Message) error { return tiebreaker(c).AddToAs(m, stun.AttrICEControlled) } // GetFrom decodes ICE-CONTROLLED from message. func (c *AttrControlled) GetFrom(m *stun.Message) error { return (*tiebreaker)(c).GetFromAs(m, stun.AttrICEControlled) } // AttrControlling represents ICE-CONTROLLING attribute. type AttrControlling uint64 // AddTo adds ICE-CONTROLLING to message. func (c AttrControlling) AddTo(m *stun.Message) error { return tiebreaker(c).AddToAs(m, stun.AttrICEControlling) } // GetFrom decodes ICE-CONTROLLING from message. func (c *AttrControlling) GetFrom(m *stun.Message) error { return (*tiebreaker)(c).GetFromAs(m, stun.AttrICEControlling) } // AttrControl is helper that wraps ICE-{CONTROLLED,CONTROLLING}. type AttrControl struct { Role Role Tiebreaker uint64 } // AddTo adds ICE-CONTROLLED or ICE-CONTROLLING attribute depending on Role. func (c AttrControl) AddTo(m *stun.Message) error { if c.Role == Controlling { return tiebreaker(c.Tiebreaker).AddToAs(m, stun.AttrICEControlling) } return tiebreaker(c.Tiebreaker).AddToAs(m, stun.AttrICEControlled) } // GetFrom decodes Role and Tiebreaker value from message. func (c *AttrControl) GetFrom(m *stun.Message) error { if m.Contains(stun.AttrICEControlling) { c.Role = Controlling return (*tiebreaker)(&c.Tiebreaker).GetFromAs(m, stun.AttrICEControlling) } if m.Contains(stun.AttrICEControlled) { c.Role = Controlled return (*tiebreaker)(&c.Tiebreaker).GetFromAs(m, stun.AttrICEControlled) } return stun.ErrAttributeNotFound } ice-2.3.1/icecontrol_test.go000066400000000000000000000064611437620344400160220ustar00rootroot00000000000000package ice import ( "errors" "testing" "github.com/pion/stun" ) func TestControlled_GetFrom(t *testing.T) { //nolint:dupl m := new(stun.Message) var c AttrControlled if err := c.GetFrom(m); !errors.Is(err, stun.ErrAttributeNotFound) { t.Error("unexpected error") } if err := m.Build(stun.BindingRequest, &c); err != nil { t.Error(err) } m1 := new(stun.Message) if _, err := m1.Write(m.Raw); err != nil { t.Error(err) } var c1 AttrControlled if err := c1.GetFrom(m1); err != nil { t.Error(err) } if c1 != c { t.Error("not equal") } t.Run("IncorrectSize", func(t *testing.T) { m3 := new(stun.Message) m3.Add(stun.AttrICEControlled, make([]byte, 100)) var c2 AttrControlled if err := c2.GetFrom(m3); !stun.IsAttrSizeInvalid(err) { t.Error("should error") } }) } func TestControlling_GetFrom(t *testing.T) { //nolint:dupl m := new(stun.Message) var c AttrControlling if err := c.GetFrom(m); !errors.Is(err, stun.ErrAttributeNotFound) { t.Error("unexpected error") } if err := m.Build(stun.BindingRequest, &c); err != nil { t.Error(err) } m1 := new(stun.Message) if _, err := m1.Write(m.Raw); err != nil { t.Error(err) } var c1 AttrControlling if err := c1.GetFrom(m1); err != nil { t.Error(err) } if c1 != c { t.Error("not equal") } t.Run("IncorrectSize", func(t *testing.T) { m3 := new(stun.Message) m3.Add(stun.AttrICEControlling, make([]byte, 100)) var c2 AttrControlling if err := c2.GetFrom(m3); !stun.IsAttrSizeInvalid(err) { t.Error("should error") } }) } func TestControl_GetFrom(t *testing.T) { t.Run("Blank", func(t *testing.T) { m := new(stun.Message) var c AttrControl if err := c.GetFrom(m); !errors.Is(err, stun.ErrAttributeNotFound) { t.Error("unexpected error") } }) t.Run("Controlling", func(t *testing.T) { //nolint:dupl m := new(stun.Message) var c AttrControl if err := c.GetFrom(m); !errors.Is(err, stun.ErrAttributeNotFound) { t.Error("unexpected error") } c.Role = Controlling c.Tiebreaker = 4321 if err := m.Build(stun.BindingRequest, &c); err != nil { t.Error(err) } m1 := new(stun.Message) if _, err := m1.Write(m.Raw); err != nil { t.Error(err) } var c1 AttrControl if err := c1.GetFrom(m1); err != nil { t.Error(err) } if c1 != c { t.Error("not equal") } t.Run("IncorrectSize", func(t *testing.T) { m3 := new(stun.Message) m3.Add(stun.AttrICEControlling, make([]byte, 100)) var c2 AttrControl if err := c2.GetFrom(m3); !stun.IsAttrSizeInvalid(err) { t.Error("should error") } }) }) t.Run("Controlled", func(t *testing.T) { //nolint:dupl m := new(stun.Message) var c AttrControl if err := c.GetFrom(m); !errors.Is(err, stun.ErrAttributeNotFound) { t.Error("unexpected error") } c.Role = Controlled c.Tiebreaker = 1234 if err := m.Build(stun.BindingRequest, &c); err != nil { t.Error(err) } m1 := new(stun.Message) if _, err := m1.Write(m.Raw); err != nil { t.Error(err) } var c1 AttrControl if err := c1.GetFrom(m1); err != nil { t.Error(err) } if c1 != c { t.Error("not equal") } t.Run("IncorrectSize", func(t *testing.T) { m3 := new(stun.Message) m3.Add(stun.AttrICEControlling, make([]byte, 100)) var c2 AttrControl if err := c2.GetFrom(m3); !stun.IsAttrSizeInvalid(err) { t.Error("should error") } }) }) } ice-2.3.1/internal/000077500000000000000000000000001437620344400141005ustar00rootroot00000000000000ice-2.3.1/internal/atomic/000077500000000000000000000000001437620344400153545ustar00rootroot00000000000000ice-2.3.1/internal/atomic/atomic.go000066400000000000000000000006471437620344400171660ustar00rootroot00000000000000// Package atomic contains custom atomic types package atomic import "sync/atomic" // Error is an atomic error type Error struct { v atomic.Value } // Store updates the value of the atomic variable func (a *Error) Store(err error) { a.v.Store(struct{ error }{err}) } // Load retrieves the current value of the atomic variable func (a *Error) Load() error { err, _ := a.v.Load().(struct{ error }) return err.error } ice-2.3.1/internal/fakenet/000077500000000000000000000000001437620344400155155ustar00rootroot00000000000000ice-2.3.1/internal/fakenet/packet_conn.go000066400000000000000000000011161437620344400203270ustar00rootroot00000000000000// Package fakenet contains fake network abstractions package fakenet import ( "net" ) // Compile-time assertion var _ net.PacketConn = (*PacketConn)(nil) // PacketConn wraps a net.Conn and emulates net.PacketConn type PacketConn struct { net.Conn } // ReadFrom reads a packet from the connection, func (f *PacketConn) ReadFrom(p []byte) (n int, addr net.Addr, err error) { n, err = f.Conn.Read(p) addr = f.Conn.RemoteAddr() return } // WriteTo writes a packet with payload p to addr. func (f *PacketConn) WriteTo(p []byte, addr net.Addr) (int, error) { return f.Conn.Write(p) } ice-2.3.1/internal/internal.go000066400000000000000000000001341437620344400162410ustar00rootroot00000000000000// Package internal implements internal functionality for Pions ICE module package internal ice-2.3.1/internal/stun/000077500000000000000000000000001437620344400150715ustar00rootroot00000000000000ice-2.3.1/internal/stun/stun.go000066400000000000000000000034101437620344400164070ustar00rootroot00000000000000// Package stun contains ICE specific STUN code package stun import ( "errors" "fmt" "net" "time" "github.com/pion/stun" ) var ( errGetXorMappedAddrResponse = errors.New("failed to get XOR-MAPPED-ADDRESS response") errMismatchUsername = errors.New("username mismatch") ) // GetXORMappedAddr initiates a stun requests to serverAddr using conn, reads the response and returns // the XORMappedAddress returned by the STUN server. func GetXORMappedAddr(conn net.PacketConn, serverAddr net.Addr, timeout time.Duration) (*stun.XORMappedAddress, error) { if timeout > 0 { if err := conn.SetReadDeadline(time.Now().Add(timeout)); err != nil { return nil, err } // Reset timeout after completion defer conn.SetReadDeadline(time.Time{}) //nolint:errcheck } req, err := stun.Build(stun.BindingRequest, stun.TransactionID) if err != nil { return nil, err } if _, err = conn.WriteTo(req.Raw, serverAddr); err != nil { return nil, err } const maxMessageSize = 1280 buf := make([]byte, maxMessageSize) n, _, err := conn.ReadFrom(buf) if err != nil { return nil, err } res := &stun.Message{Raw: buf[:n]} if err = res.Decode(); err != nil { return nil, err } var addr stun.XORMappedAddress if err = addr.GetFrom(res); err != nil { return nil, fmt.Errorf("%w: %v", errGetXorMappedAddrResponse, err) } return &addr, nil } // AssertUsername checks that the given STUN message m has a USERNAME attribute with a given value func AssertUsername(m *stun.Message, expectedUsername string) error { var username stun.Username if err := username.GetFrom(m); err != nil { return err } else if string(username) != expectedUsername { return fmt.Errorf("%w expected(%x) actual(%x)", errMismatchUsername, expectedUsername, string(username)) } return nil } ice-2.3.1/mdns.go000066400000000000000000000040131437620344400135520ustar00rootroot00000000000000package ice import ( "github.com/google/uuid" "github.com/pion/logging" "github.com/pion/mdns" "github.com/pion/transport/v2" "golang.org/x/net/ipv4" ) // MulticastDNSMode represents the different Multicast modes ICE can run in type MulticastDNSMode byte // MulticastDNSMode enum const ( // MulticastDNSModeDisabled means remote mDNS candidates will be discarded, and local host candidates will use IPs MulticastDNSModeDisabled MulticastDNSMode = iota + 1 // MulticastDNSModeQueryOnly means remote mDNS candidates will be accepted, and local host candidates will use IPs MulticastDNSModeQueryOnly // MulticastDNSModeQueryAndGather means remote mDNS candidates will be accepted, and local host candidates will use mDNS MulticastDNSModeQueryAndGather ) func generateMulticastDNSName() (string, error) { // https://tools.ietf.org/id/draft-ietf-rtcweb-mdns-ice-candidates-02.html#gathering // The unique name MUST consist of a version 4 UUID as defined in [RFC4122], followed by “.local”. u, err := uuid.NewRandom() return u.String() + ".local", err } func createMulticastDNS(n transport.Net, mDNSMode MulticastDNSMode, mDNSName string, log logging.LeveledLogger) (*mdns.Conn, MulticastDNSMode, error) { if mDNSMode == MulticastDNSModeDisabled { return nil, mDNSMode, nil } addr, mdnsErr := n.ResolveUDPAddr("udp4", mdns.DefaultAddress) if mdnsErr != nil { return nil, mDNSMode, mdnsErr } l, mdnsErr := n.ListenUDP("udp4", addr) if mdnsErr != nil { // If ICE fails to start MulticastDNS server just warn the user and continue log.Errorf("Failed to enable mDNS, continuing in mDNS disabled mode: (%s)", mdnsErr) return nil, MulticastDNSModeDisabled, nil } switch mDNSMode { case MulticastDNSModeQueryOnly: conn, err := mdns.Server(ipv4.NewPacketConn(l), &mdns.Config{}) return conn, mDNSMode, err case MulticastDNSModeQueryAndGather: conn, err := mdns.Server(ipv4.NewPacketConn(l), &mdns.Config{ LocalNames: []string{mDNSName}, }) return conn, mDNSMode, err default: return nil, mDNSMode, nil } } ice-2.3.1/mdns_test.go000066400000000000000000000067521437620344400146250ustar00rootroot00000000000000//go:build !js // +build !js package ice import ( "context" "regexp" "testing" "time" "github.com/pion/transport/v2/test" "github.com/stretchr/testify/assert" ) func TestMulticastDNSOnlyConnection(t *testing.T) { report := test.CheckRoutines(t) defer report() // Limit runtime in case of deadlocks lim := test.TimeOut(time.Second * 30) defer lim.Stop() cfg := &AgentConfig{ NetworkTypes: []NetworkType{NetworkTypeUDP4}, CandidateTypes: []CandidateType{CandidateTypeHost}, MulticastDNSMode: MulticastDNSModeQueryAndGather, } aAgent, err := NewAgent(cfg) if err != nil { t.Fatal(err) } aNotifier, aConnected := onConnected() if err = aAgent.OnConnectionStateChange(aNotifier); err != nil { t.Fatal(err) } bAgent, err := NewAgent(cfg) if err != nil { t.Fatal(err) } bNotifier, bConnected := onConnected() if err = bAgent.OnConnectionStateChange(bNotifier); err != nil { t.Fatal(err) } connect(aAgent, bAgent) <-aConnected <-bConnected assert.NoError(t, aAgent.Close()) assert.NoError(t, bAgent.Close()) } func TestMulticastDNSMixedConnection(t *testing.T) { report := test.CheckRoutines(t) defer report() // Limit runtime in case of deadlocks lim := test.TimeOut(time.Second * 30) defer lim.Stop() aAgent, err := NewAgent(&AgentConfig{ NetworkTypes: []NetworkType{NetworkTypeUDP4}, CandidateTypes: []CandidateType{CandidateTypeHost}, MulticastDNSMode: MulticastDNSModeQueryAndGather, }) if err != nil { t.Fatal(err) } aNotifier, aConnected := onConnected() if err = aAgent.OnConnectionStateChange(aNotifier); err != nil { t.Fatal(err) } bAgent, err := NewAgent(&AgentConfig{ NetworkTypes: []NetworkType{NetworkTypeUDP4}, CandidateTypes: []CandidateType{CandidateTypeHost}, MulticastDNSMode: MulticastDNSModeQueryOnly, }) if err != nil { t.Fatal(err) } bNotifier, bConnected := onConnected() if err = bAgent.OnConnectionStateChange(bNotifier); err != nil { t.Fatal(err) } connect(aAgent, bAgent) <-aConnected <-bConnected assert.NoError(t, aAgent.Close()) assert.NoError(t, bAgent.Close()) } func TestMulticastDNSStaticHostName(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() _, err := NewAgent(&AgentConfig{ NetworkTypes: []NetworkType{NetworkTypeUDP4}, CandidateTypes: []CandidateType{CandidateTypeHost}, MulticastDNSMode: MulticastDNSModeQueryAndGather, MulticastDNSHostName: "invalidHostName", }) assert.Equal(t, err, ErrInvalidMulticastDNSHostName) agent, err := NewAgent(&AgentConfig{ NetworkTypes: []NetworkType{NetworkTypeUDP4}, CandidateTypes: []CandidateType{CandidateTypeHost}, MulticastDNSMode: MulticastDNSModeQueryAndGather, MulticastDNSHostName: "validName.local", }) assert.NoError(t, err) correctHostName, resolveFunc := context.WithCancel(context.Background()) assert.NoError(t, agent.OnCandidate(func(c Candidate) { if c != nil && c.Address() == "validName.local" { resolveFunc() } })) assert.NoError(t, agent.GatherCandidates()) <-correctHostName.Done() assert.NoError(t, agent.Close()) } func TestGenerateMulticastDNSName(t *testing.T) { name, err := generateMulticastDNSName() if err != nil { t.Fatal(err) } isMDNSName := regexp.MustCompile( `^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}.local+$`, ).MatchString if !isMDNSName(name) { t.Fatalf("mDNS name must be UUID v4 + \".local\" suffix, got %s", name) } } ice-2.3.1/net.go000066400000000000000000000053121437620344400134020ustar00rootroot00000000000000package ice import ( "net" "github.com/pion/logging" "github.com/pion/transport/v2" ) // The conditions of invalidation written below are defined in // https://tools.ietf.org/html/rfc8445#section-5.1.1.1 func isSupportedIPv6(ip net.IP) bool { if len(ip) != net.IPv6len || isZeros(ip[0:12]) || // !(IPv4-compatible IPv6) ip[0] == 0xfe && ip[1]&0xc0 == 0xc0 || // !(IPv6 site-local unicast) ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() { return false } return true } func isZeros(ip net.IP) bool { for i := 0; i < len(ip); i++ { if ip[i] != 0 { return false } } return true } func localInterfaces(n transport.Net, interfaceFilter func(string) bool, ipFilter func(net.IP) bool, networkTypes []NetworkType, includeLoopback bool) ([]net.IP, error) { //nolint:gocognit ips := []net.IP{} ifaces, err := n.Interfaces() if err != nil { return ips, err } var IPv4Requested, IPv6Requested bool for _, typ := range networkTypes { if typ.IsIPv4() { IPv4Requested = true } if typ.IsIPv6() { IPv6Requested = true } } for _, iface := range ifaces { if iface.Flags&net.FlagUp == 0 { continue // interface down } if (iface.Flags&net.FlagLoopback != 0) && !includeLoopback { continue // loopback interface } if interfaceFilter != nil && !interfaceFilter(iface.Name) { continue } addrs, err := iface.Addrs() if err != nil { continue } for _, addr := range addrs { var ip net.IP switch addr := addr.(type) { case *net.IPNet: ip = addr.IP case *net.IPAddr: ip = addr.IP } if ip == nil || (ip.IsLoopback() && !includeLoopback) { continue } if ipv4 := ip.To4(); ipv4 == nil { if !IPv6Requested { continue } else if !isSupportedIPv6(ip) { continue } } else if !IPv4Requested { continue } if ipFilter != nil && !ipFilter(ip) { continue } ips = append(ips, ip) } } return ips, nil } func listenUDPInPortRange(n transport.Net, log logging.LeveledLogger, portMax, portMin int, network string, lAddr *net.UDPAddr) (transport.UDPConn, error) { if (lAddr.Port != 0) || ((portMin == 0) && (portMax == 0)) { return n.ListenUDP(network, lAddr) } var i, j int i = portMin if i == 0 { i = 1 } j = portMax if j == 0 { j = 0xFFFF } if i > j { return nil, ErrPort } portStart := globalMathRandomGenerator.Intn(j-i+1) + i portCurrent := portStart for { lAddr = &net.UDPAddr{IP: lAddr.IP, Port: portCurrent} c, e := n.ListenUDP(network, lAddr) if e == nil { return c, e //nolint:nilerr } log.Debugf("failed to listen %s: %v", lAddr.String(), e) portCurrent++ if portCurrent > j { portCurrent = i } if portCurrent == portStart { break } } return nil, ErrPort } ice-2.3.1/net_test.go000066400000000000000000000024531437620344400144440ustar00rootroot00000000000000package ice import ( "net" "testing" "github.com/stretchr/testify/assert" ) func TestIsSupportedIPv6(t *testing.T) { if isSupportedIPv6(net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1}) { t.Errorf("isSupportedIPv6 return true with IPv4-compatible IPv6 address") } if isSupportedIPv6(net.ParseIP("fec0::2333")) { t.Errorf("isSupportedIPv6 return true with IPv6 site-local unicast address") } if isSupportedIPv6(net.ParseIP("fe80::2333")) { t.Errorf("isSupportedIPv6 return true with IPv6 link-local address") } if isSupportedIPv6(net.ParseIP("ff02::2333")) { t.Errorf("isSupportedIPv6 return true with IPv6 link-local multicast address") } if !isSupportedIPv6(net.ParseIP("2001::1")) { t.Errorf("isSupportedIPv6 return false with IPv6 global unicast address") } } func TestCreateAddr(t *testing.T) { ipv4 := net.IP{127, 0, 0, 1} ipv6 := net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1} port := 9000 assert.Equal(t, &net.UDPAddr{IP: ipv4, Port: port}, createAddr(NetworkTypeUDP4, ipv4, port)) assert.Equal(t, &net.UDPAddr{IP: ipv6, Port: port}, createAddr(NetworkTypeUDP6, ipv6, port)) assert.Equal(t, &net.TCPAddr{IP: ipv4, Port: port}, createAddr(NetworkTypeTCP4, ipv4, port)) assert.Equal(t, &net.TCPAddr{IP: ipv6, Port: port}, createAddr(NetworkTypeTCP6, ipv6, port)) } ice-2.3.1/networktype.go000066400000000000000000000052741437620344400152160ustar00rootroot00000000000000package ice import ( "fmt" "net" "strings" ) const ( udp = "udp" tcp = "tcp" udp4 = "udp4" udp6 = "udp6" tcp4 = "tcp4" tcp6 = "tcp6" ) func supportedNetworkTypes() []NetworkType { return []NetworkType{ NetworkTypeUDP4, NetworkTypeUDP6, NetworkTypeTCP4, NetworkTypeTCP6, } } // NetworkType represents the type of network type NetworkType int const ( // NetworkTypeUDP4 indicates UDP over IPv4. NetworkTypeUDP4 NetworkType = iota + 1 // NetworkTypeUDP6 indicates UDP over IPv6. NetworkTypeUDP6 // NetworkTypeTCP4 indicates TCP over IPv4. NetworkTypeTCP4 // NetworkTypeTCP6 indicates TCP over IPv6. NetworkTypeTCP6 ) func (t NetworkType) String() string { switch t { case NetworkTypeUDP4: return udp4 case NetworkTypeUDP6: return udp6 case NetworkTypeTCP4: return tcp4 case NetworkTypeTCP6: return tcp6 default: return ErrUnknownType.Error() } } // IsUDP returns true when network is UDP4 or UDP6. func (t NetworkType) IsUDP() bool { return t == NetworkTypeUDP4 || t == NetworkTypeUDP6 } // IsTCP returns true when network is TCP4 or TCP6. func (t NetworkType) IsTCP() bool { return t == NetworkTypeTCP4 || t == NetworkTypeTCP6 } // NetworkShort returns the short network description func (t NetworkType) NetworkShort() string { switch t { case NetworkTypeUDP4, NetworkTypeUDP6: return udp case NetworkTypeTCP4, NetworkTypeTCP6: return tcp default: return ErrUnknownType.Error() } } // IsReliable returns true if the network is reliable func (t NetworkType) IsReliable() bool { switch t { case NetworkTypeUDP4, NetworkTypeUDP6: return false case NetworkTypeTCP4, NetworkTypeTCP6: return true } return false } // IsIPv4 returns whether the network type is IPv4 or not. func (t NetworkType) IsIPv4() bool { switch t { case NetworkTypeUDP4, NetworkTypeTCP4: return true case NetworkTypeUDP6, NetworkTypeTCP6: return false } return false } // IsIPv6 returns whether the network type is IPv6 or not. func (t NetworkType) IsIPv6() bool { switch t { case NetworkTypeUDP4, NetworkTypeTCP4: return false case NetworkTypeUDP6, NetworkTypeTCP6: return true } return false } // determineNetworkType determines the type of network based on // the short network string and an IP address. func determineNetworkType(network string, ip net.IP) (NetworkType, error) { ipv4 := ip.To4() != nil switch { case strings.HasPrefix(strings.ToLower(network), udp): if ipv4 { return NetworkTypeUDP4, nil } return NetworkTypeUDP6, nil case strings.HasPrefix(strings.ToLower(network), tcp): if ipv4 { return NetworkTypeTCP4, nil } return NetworkTypeTCP6, nil } return NetworkType(0), fmt.Errorf("%w from %s %s", ErrDetermineNetworkType, network, ip) } ice-2.3.1/networktype_test.go000066400000000000000000000034171437620344400162520ustar00rootroot00000000000000package ice import ( "net" "testing" "github.com/stretchr/testify/assert" ) func TestNetworkTypeParsing_Success(t *testing.T) { ipv4 := net.ParseIP("192.168.0.1") ipv6 := net.ParseIP("fe80::a3:6ff:fec4:5454") for _, test := range []struct { name string inNetwork string inIP net.IP expected NetworkType }{ { "lowercase UDP4", "udp", ipv4, NetworkTypeUDP4, }, { "uppercase UDP4", "UDP", ipv4, NetworkTypeUDP4, }, { "lowercase UDP6", "udp", ipv6, NetworkTypeUDP6, }, { "uppercase UDP6", "UDP", ipv6, NetworkTypeUDP6, }, } { actual, err := determineNetworkType(test.inNetwork, test.inIP) if err != nil { t.Errorf("NetworkTypeParsing failed: %v", err) } if actual != test.expected { t.Errorf("NetworkTypeParsing: '%s' -- input:%s expected:%s actual:%s", test.name, test.inNetwork, test.expected, actual) } } } func TestNetworkTypeParsing_Failure(t *testing.T) { ipv6 := net.ParseIP("fe80::a3:6ff:fec4:5454") for _, test := range []struct { name string inNetwork string inIP net.IP }{ { "invalid network", "junkNetwork", ipv6, }, } { actual, err := determineNetworkType(test.inNetwork, test.inIP) if err == nil { t.Errorf("NetworkTypeParsing should fail: '%s' -- input:%s actual:%s", test.name, test.inNetwork, actual) } } } func TestNetworkTypeIsUDP(t *testing.T) { assert.True(t, NetworkTypeUDP4.IsUDP()) assert.True(t, NetworkTypeUDP6.IsUDP()) assert.False(t, NetworkTypeUDP4.IsTCP()) assert.False(t, NetworkTypeUDP6.IsTCP()) } func TestNetworkTypeIsTCP(t *testing.T) { assert.True(t, NetworkTypeTCP4.IsTCP()) assert.True(t, NetworkTypeTCP6.IsTCP()) assert.False(t, NetworkTypeTCP4.IsUDP()) assert.False(t, NetworkTypeTCP6.IsUDP()) } ice-2.3.1/priority.go000066400000000000000000000013271437620344400144770ustar00rootroot00000000000000package ice import ( "encoding/binary" "github.com/pion/stun" ) // PriorityAttr represents PRIORITY attribute. type PriorityAttr uint32 const prioritySize = 4 // 32 bit // AddTo adds PRIORITY attribute to message. func (p PriorityAttr) AddTo(m *stun.Message) error { v := make([]byte, prioritySize) binary.BigEndian.PutUint32(v, uint32(p)) m.Add(stun.AttrPriority, v) return nil } // GetFrom decodes PRIORITY attribute from message. func (p *PriorityAttr) GetFrom(m *stun.Message) error { v, err := m.Get(stun.AttrPriority) if err != nil { return err } if err = stun.CheckSize(stun.AttrPriority, len(v), prioritySize); err != nil { return err } *p = PriorityAttr(binary.BigEndian.Uint32(v)) return nil } ice-2.3.1/priority_test.go000066400000000000000000000014301437620344400155310ustar00rootroot00000000000000package ice import ( "errors" "testing" "github.com/pion/stun" ) func TestPriority_GetFrom(t *testing.T) { //nolint:dupl m := new(stun.Message) var p PriorityAttr if err := p.GetFrom(m); !errors.Is(err, stun.ErrAttributeNotFound) { t.Error("unexpected error") } if err := m.Build(stun.BindingRequest, &p); err != nil { t.Error(err) } m1 := new(stun.Message) if _, err := m1.Write(m.Raw); err != nil { t.Error(err) } var p1 PriorityAttr if err := p1.GetFrom(m1); err != nil { t.Error(err) } if p1 != p { t.Error("not equal") } t.Run("IncorrectSize", func(t *testing.T) { m3 := new(stun.Message) m3.Add(stun.AttrPriority, make([]byte, 100)) var p2 PriorityAttr if err := p2.GetFrom(m3); !stun.IsAttrSizeInvalid(err) { t.Error("should error") } }) } ice-2.3.1/rand.go000066400000000000000000000033751437620344400135470ustar00rootroot00000000000000package ice import "github.com/pion/randutil" const ( runesAlpha = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" runesDigit = "0123456789" runesCandidateIDFoundation = runesAlpha + runesDigit + "+/" lenUFrag = 16 lenPwd = 32 ) // Seeding random generator each time limits number of generated sequence to 31-bits, // and causes collision on low time accuracy environments. // Use global random generator seeded by crypto grade random. var ( globalMathRandomGenerator = randutil.NewMathRandomGenerator() //nolint:gochecknoglobals globalCandidateIDGenerator = candidateIDGenerator{globalMathRandomGenerator} //nolint:gochecknoglobals ) // candidateIDGenerator is a random candidate ID generator. // Candidate ID is used in SDP and always shared to the other peer. // It doesn't require cryptographic random. type candidateIDGenerator struct { randutil.MathRandomGenerator } func newCandidateIDGenerator() *candidateIDGenerator { return &candidateIDGenerator{ randutil.NewMathRandomGenerator(), } } func (g *candidateIDGenerator) Generate() string { // https://tools.ietf.org/html/rfc5245#section-15.1 // candidate-id = "candidate" ":" foundation // foundation = 1*32ice-char // ice-char = ALPHA / DIGIT / "+" / "/" return "candidate:" + g.MathRandomGenerator.GenerateString(32, runesCandidateIDFoundation) } // generatePwd generates ICE pwd. // This internally uses generateCryptoRandomString. func generatePwd() (string, error) { return randutil.GenerateCryptoRandomString(lenPwd, runesAlpha) } // generateUFrag generates ICE user fragment. // This internally uses generateCryptoRandomString. func generateUFrag() (string, error) { return randutil.GenerateCryptoRandomString(lenUFrag, runesAlpha) } ice-2.3.1/rand_test.go000066400000000000000000000025241437620344400146010ustar00rootroot00000000000000package ice import ( "sync" "testing" ) func TestRandomGeneratorCollision(t *testing.T) { candidateIDGen := newCandidateIDGenerator() testCases := map[string]struct { gen func(t *testing.T) string }{ "CandidateID": { gen: func(t *testing.T) string { return candidateIDGen.Generate() }, }, "PWD": { gen: func(t *testing.T) string { s, err := generatePwd() if err != nil { t.Fatal(err) } return s }, }, "Ufrag": { gen: func(t *testing.T) string { s, err := generateUFrag() if err != nil { t.Fatal(err) } return s }, }, } const N = 100 const iteration = 100 for name, testCase := range testCases { testCase := testCase t.Run(name, func(t *testing.T) { for iter := 0; iter < iteration; iter++ { var wg sync.WaitGroup var mu sync.Mutex rands := make([]string, 0, N) for i := 0; i < N; i++ { wg.Add(1) go func() { r := testCase.gen(t) mu.Lock() rands = append(rands, r) mu.Unlock() wg.Done() }() } wg.Wait() if len(rands) != N { t.Fatal("Failed to generate randoms") } for i := 0; i < N; i++ { for j := i + 1; j < N; j++ { if rands[i] == rands[j] { t.Fatalf("generateRandString caused collision: %s == %s", rands[i], rands[j]) } } } } }) } } ice-2.3.1/renovate.json000066400000000000000000000001731437620344400150030ustar00rootroot00000000000000{ "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ "github>pion/renovate-config" ] } ice-2.3.1/role.go000066400000000000000000000013761437620344400135630ustar00rootroot00000000000000package ice import ( "fmt" ) // Role represents ICE agent role, which can be controlling or controlled. type Role byte // Possible ICE agent roles. const ( Controlling Role = iota Controlled ) // UnmarshalText implements TextUnmarshaler. func (r *Role) UnmarshalText(text []byte) error { switch string(text) { case "controlling": *r = Controlling case "controlled": *r = Controlled default: return fmt.Errorf("%w %q", errUnknownRole, text) } return nil } // MarshalText implements TextMarshaler. func (r Role) MarshalText() (text []byte, err error) { return []byte(r.String()), nil } func (r Role) String() string { switch r { case Controlling: return "controlling" case Controlled: return "controlled" default: return "unknown" } } ice-2.3.1/selection.go000066400000000000000000000227171437620344400146110ustar00rootroot00000000000000package ice import ( "net" "time" "github.com/pion/logging" "github.com/pion/stun" ) type pairCandidateSelector interface { Start() ContactCandidates() PingCandidate(local, remote Candidate) HandleSuccessResponse(m *stun.Message, local, remote Candidate, remoteAddr net.Addr) HandleBindingRequest(m *stun.Message, local, remote Candidate) } type controllingSelector struct { startTime time.Time agent *Agent nominatedPair *CandidatePair log logging.LeveledLogger } func (s *controllingSelector) Start() { s.startTime = time.Now() s.nominatedPair = nil } func (s *controllingSelector) isNominatable(c Candidate) bool { switch { case c.Type() == CandidateTypeHost: return time.Since(s.startTime).Nanoseconds() > s.agent.hostAcceptanceMinWait.Nanoseconds() case c.Type() == CandidateTypeServerReflexive: return time.Since(s.startTime).Nanoseconds() > s.agent.srflxAcceptanceMinWait.Nanoseconds() case c.Type() == CandidateTypePeerReflexive: return time.Since(s.startTime).Nanoseconds() > s.agent.prflxAcceptanceMinWait.Nanoseconds() case c.Type() == CandidateTypeRelay: return time.Since(s.startTime).Nanoseconds() > s.agent.relayAcceptanceMinWait.Nanoseconds() } s.log.Errorf("isNominatable invalid candidate type %s", c.Type().String()) return false } func (s *controllingSelector) ContactCandidates() { switch { case s.agent.getSelectedPair() != nil: if s.agent.validateSelectedPair() { s.log.Trace("checking keepalive") s.agent.checkKeepalive() } case s.nominatedPair != nil: s.nominatePair(s.nominatedPair) default: p := s.agent.getBestValidCandidatePair() if p != nil && s.isNominatable(p.Local) && s.isNominatable(p.Remote) { s.log.Tracef("Nominatable pair found, nominating (%s, %s)", p.Local.String(), p.Remote.String()) p.nominated = true s.nominatedPair = p s.nominatePair(p) return } s.agent.pingAllCandidates() } } func (s *controllingSelector) nominatePair(pair *CandidatePair) { // The controlling agent MUST include the USE-CANDIDATE attribute in // order to nominate a candidate pair (Section 8.1.1). The controlled // agent MUST NOT include the USE-CANDIDATE attribute in a Binding // request. msg, err := stun.Build(stun.BindingRequest, stun.TransactionID, stun.NewUsername(s.agent.remoteUfrag+":"+s.agent.localUfrag), UseCandidate(), AttrControlling(s.agent.tieBreaker), PriorityAttr(pair.Local.Priority()), stun.NewShortTermIntegrity(s.agent.remotePwd), stun.Fingerprint, ) if err != nil { s.log.Error(err.Error()) return } s.log.Tracef("ping STUN (nominate candidate pair) from %s to %s", pair.Local.String(), pair.Remote.String()) s.agent.sendBindingRequest(msg, pair.Local, pair.Remote) } func (s *controllingSelector) HandleBindingRequest(m *stun.Message, local, remote Candidate) { s.agent.sendBindingSuccess(m, local, remote) p := s.agent.findPair(local, remote) if p == nil { s.agent.addPair(local, remote) return } if p.state == CandidatePairStateSucceeded && s.nominatedPair == nil && s.agent.getSelectedPair() == nil { bestPair := s.agent.getBestAvailableCandidatePair() if bestPair == nil { s.log.Tracef("No best pair available") } else if bestPair.equal(p) && s.isNominatable(p.Local) && s.isNominatable(p.Remote) { s.log.Tracef("The candidate (%s, %s) is the best candidate available, marking it as nominated", p.Local.String(), p.Remote.String()) s.nominatedPair = p s.nominatePair(p) } } } func (s *controllingSelector) HandleSuccessResponse(m *stun.Message, local, remote Candidate, remoteAddr net.Addr) { ok, pendingRequest := s.agent.handleInboundBindingSuccess(m.TransactionID) if !ok { s.log.Warnf("discard message from (%s), unknown TransactionID 0x%x", remote, m.TransactionID) return } transactionAddr := pendingRequest.destination // Assert that NAT is not symmetric // https://tools.ietf.org/html/rfc8445#section-7.2.5.2.1 if !addrEqual(transactionAddr, remoteAddr) { s.log.Debugf("discard message: transaction source and destination does not match expected(%s), actual(%s)", transactionAddr, remote) return } s.log.Tracef("inbound STUN (SuccessResponse) from %s to %s", remote.String(), local.String()) p := s.agent.findPair(local, remote) if p == nil { // This shouldn't happen s.log.Error("Success response from invalid candidate pair") return } p.state = CandidatePairStateSucceeded s.log.Tracef("Found valid candidate pair: %s", p) if pendingRequest.isUseCandidate && s.agent.getSelectedPair() == nil { s.agent.setSelectedPair(p) } } func (s *controllingSelector) PingCandidate(local, remote Candidate) { msg, err := stun.Build(stun.BindingRequest, stun.TransactionID, stun.NewUsername(s.agent.remoteUfrag+":"+s.agent.localUfrag), AttrControlling(s.agent.tieBreaker), PriorityAttr(local.Priority()), stun.NewShortTermIntegrity(s.agent.remotePwd), stun.Fingerprint, ) if err != nil { s.log.Error(err.Error()) return } s.agent.sendBindingRequest(msg, local, remote) } type controlledSelector struct { agent *Agent log logging.LeveledLogger } func (s *controlledSelector) Start() { } func (s *controlledSelector) ContactCandidates() { if s.agent.getSelectedPair() != nil { if s.agent.validateSelectedPair() { s.log.Trace("checking keepalive") s.agent.checkKeepalive() } } else { s.agent.pingAllCandidates() } } func (s *controlledSelector) PingCandidate(local, remote Candidate) { msg, err := stun.Build(stun.BindingRequest, stun.TransactionID, stun.NewUsername(s.agent.remoteUfrag+":"+s.agent.localUfrag), AttrControlled(s.agent.tieBreaker), PriorityAttr(local.Priority()), stun.NewShortTermIntegrity(s.agent.remotePwd), stun.Fingerprint, ) if err != nil { s.log.Error(err.Error()) return } s.agent.sendBindingRequest(msg, local, remote) } func (s *controlledSelector) HandleSuccessResponse(m *stun.Message, local, remote Candidate, remoteAddr net.Addr) { // nolint:godox // TODO according to the standard we should specifically answer a failed nomination: // https://tools.ietf.org/html/rfc8445#section-7.3.1.5 // If the controlled agent does not accept the request from the // controlling agent, the controlled agent MUST reject the nomination // request with an appropriate error code response (e.g., 400) // [RFC5389]. ok, pendingRequest := s.agent.handleInboundBindingSuccess(m.TransactionID) if !ok { s.log.Warnf("discard message from (%s), unknown TransactionID 0x%x", remote, m.TransactionID) return } transactionAddr := pendingRequest.destination // Assert that NAT is not symmetric // https://tools.ietf.org/html/rfc8445#section-7.2.5.2.1 if !addrEqual(transactionAddr, remoteAddr) { s.log.Debugf("discard message: transaction source and destination does not match expected(%s), actual(%s)", transactionAddr, remote) return } s.log.Tracef("inbound STUN (SuccessResponse) from %s to %s", remote.String(), local.String()) p := s.agent.findPair(local, remote) if p == nil { // This shouldn't happen s.log.Error("Success response from invalid candidate pair") return } p.state = CandidatePairStateSucceeded s.log.Tracef("Found valid candidate pair: %s", p) if p.nominateOnBindingSuccess { if selectedPair := s.agent.getSelectedPair(); selectedPair == nil || (selectedPair != p && selectedPair.priority() <= p.priority()) { s.agent.setSelectedPair(p) } else if selectedPair != p { s.log.Tracef("ignore nominate new pair %s, already nominated pair %s", p, selectedPair) } } } func (s *controlledSelector) HandleBindingRequest(m *stun.Message, local, remote Candidate) { useCandidate := m.Contains(stun.AttrUseCandidate) p := s.agent.findPair(local, remote) if p == nil { p = s.agent.addPair(local, remote) } if useCandidate { // https://tools.ietf.org/html/rfc8445#section-7.3.1.5 if p.state == CandidatePairStateSucceeded { // If the state of this pair is Succeeded, it means that the check // previously sent by this pair produced a successful response and // generated a valid pair (Section 7.2.5.3.2). The agent sets the // nominated flag value of the valid pair to true. if selectedPair := s.agent.getSelectedPair(); selectedPair == nil || (selectedPair != p && selectedPair.priority() <= p.priority()) { s.agent.setSelectedPair(p) } else if selectedPair != p { s.log.Tracef("ignore nominate new pair %s, already nominated pair %s", p, selectedPair) } } else { // If the received Binding request triggered a new check to be // enqueued in the triggered-check queue (Section 7.3.1.4), once the // check is sent and if it generates a successful response, and // generates a valid pair, the agent sets the nominated flag of the // pair to true. If the request fails (Section 7.2.5.2), the agent // MUST remove the candidate pair from the valid list, set the // candidate pair state to Failed, and set the checklist state to // Failed. p.nominateOnBindingSuccess = true } } s.agent.sendBindingSuccess(m, local, remote) s.PingCandidate(local, remote) } type liteSelector struct { pairCandidateSelector } // A lite selector should not contact candidates func (s *liteSelector) ContactCandidates() { if _, ok := s.pairCandidateSelector.(*controllingSelector); ok { // nolint:godox // pion/ice#96 // TODO: implement lite controlling agent. For now falling back to full agent. // This only happens if both peers are lite. See RFC 8445 S6.1.1 and S6.2 s.pairCandidateSelector.ContactCandidates() } else if v, ok := s.pairCandidateSelector.(*controlledSelector); ok { v.agent.validateSelectedPair() } } ice-2.3.1/stats.go000066400000000000000000000164001437620344400137520ustar00rootroot00000000000000package ice import ( "time" ) // CandidatePairStats contains ICE candidate pair statistics type CandidatePairStats struct { // Timestamp is the timestamp associated with this object. Timestamp time.Time // LocalCandidateID is the ID of the local candidate LocalCandidateID string // RemoteCandidateID is the ID of the remote candidate RemoteCandidateID string // State represents the state of the checklist for the local and remote // candidates in a pair. State CandidatePairState // Nominated is true when this valid pair that should be used for media // if it is the highest-priority one amongst those whose nominated flag is set Nominated bool // PacketsSent represents the total number of packets sent on this candidate pair. PacketsSent uint32 // PacketsReceived represents the total number of packets received on this candidate pair. PacketsReceived uint32 // BytesSent represents the total number of payload bytes sent on this candidate pair // not including headers or padding. BytesSent uint64 // BytesReceived represents the total number of payload bytes received on this candidate pair // not including headers or padding. BytesReceived uint64 // LastPacketSentTimestamp represents the timestamp at which the last packet was // sent on this particular candidate pair, excluding STUN packets. LastPacketSentTimestamp time.Time // LastPacketReceivedTimestamp represents the timestamp at which the last packet // was received on this particular candidate pair, excluding STUN packets. LastPacketReceivedTimestamp time.Time // FirstRequestTimestamp represents the timestamp at which the first STUN request // was sent on this particular candidate pair. FirstRequestTimestamp time.Time // LastRequestTimestamp represents the timestamp at which the last STUN request // was sent on this particular candidate pair. The average interval between two // consecutive connectivity checks sent can be calculated with // (LastRequestTimestamp - FirstRequestTimestamp) / RequestsSent. LastRequestTimestamp time.Time // LastResponseTimestamp represents the timestamp at which the last STUN response // was received on this particular candidate pair. LastResponseTimestamp time.Time // TotalRoundTripTime represents the sum of all round trip time measurements // in seconds since the beginning of the session, based on STUN connectivity // check responses (ResponsesReceived), including those that reply to requests // that are sent in order to verify consent. The average round trip time can // be computed from TotalRoundTripTime by dividing it by ResponsesReceived. TotalRoundTripTime float64 // CurrentRoundTripTime represents the latest round trip time measured in seconds, // computed from both STUN connectivity checks, including those that are sent // for consent verification. CurrentRoundTripTime float64 // AvailableOutgoingBitrate is calculated by the underlying congestion control // by combining the available bitrate for all the outgoing RTP streams using // this candidate pair. The bitrate measurement does not count the size of the // IP or other transport layers like TCP or UDP. It is similar to the TIAS defined // in RFC 3890, i.e., it is measured in bits per second and the bitrate is calculated // over a 1 second window. AvailableOutgoingBitrate float64 // AvailableIncomingBitrate is calculated by the underlying congestion control // by combining the available bitrate for all the incoming RTP streams using // this candidate pair. The bitrate measurement does not count the size of the // IP or other transport layers like TCP or UDP. It is similar to the TIAS defined // in RFC 3890, i.e., it is measured in bits per second and the bitrate is // calculated over a 1 second window. AvailableIncomingBitrate float64 // CircuitBreakerTriggerCount represents the number of times the circuit breaker // is triggered for this particular 5-tuple, ceasing transmission. CircuitBreakerTriggerCount uint32 // RequestsReceived represents the total number of connectivity check requests // received (including retransmissions). It is impossible for the receiver to // tell whether the request was sent in order to check connectivity or check // consent, so all connectivity checks requests are counted here. RequestsReceived uint64 // RequestsSent represents the total number of connectivity check requests // sent (not including retransmissions). RequestsSent uint64 // ResponsesReceived represents the total number of connectivity check responses received. ResponsesReceived uint64 // ResponsesSent represents the total number of connectivity check responses sent. // Since we cannot distinguish connectivity check requests and consent requests, // all responses are counted. ResponsesSent uint64 // RetransmissionsReceived represents the total number of connectivity check // request retransmissions received. RetransmissionsReceived uint64 // RetransmissionsSent represents the total number of connectivity check // request retransmissions sent. RetransmissionsSent uint64 // ConsentRequestsSent represents the total number of consent requests sent. ConsentRequestsSent uint64 // ConsentExpiredTimestamp represents the timestamp at which the latest valid // STUN binding response expired. ConsentExpiredTimestamp time.Time } // CandidateStats contains ICE candidate statistics related to the ICETransport objects. type CandidateStats struct { // Timestamp is the timestamp associated with this object. Timestamp time.Time // ID is the candidate ID ID string // NetworkType represents the type of network interface used by the base of a // local candidate (the address the ICE agent sends from). Only present for // local candidates; it's not possible to know what type of network interface // a remote candidate is using. // // Note: // This stat only tells you about the network interface used by the first "hop"; // it's possible that a connection will be bottlenecked by another type of network. // For example, when using Wi-Fi tethering, the networkType of the relevant candidate // would be "wifi", even when the next hop is over a cellular connection. NetworkType NetworkType // IP is the IP address of the candidate, allowing for IPv4 addresses and // IPv6 addresses, but fully qualified domain names (FQDNs) are not allowed. IP string // Port is the port number of the candidate. Port int // CandidateType is the "Type" field of the ICECandidate. CandidateType CandidateType // Priority is the "Priority" field of the ICECandidate. Priority uint32 // URL is the URL of the TURN or STUN server indicated in the that translated // this IP address. It is the URL address surfaced in an PeerConnectionICEEvent. URL string // RelayProtocol is the protocol used by the endpoint to communicate with the // TURN server. This is only present for local candidates. Valid values for // the TURN URL protocol is one of udp, tcp, or tls. RelayProtocol string // Deleted is true if the candidate has been deleted/freed. For host candidates, // this means that any network resources (typically a socket) associated with the // candidate have been released. For TURN candidates, this means the TURN allocation // is no longer active. // // Only defined for local candidates. For remote candidates, this property is not applicable. Deleted bool } ice-2.3.1/tcp_mux.go000066400000000000000000000253731437620344400143040ustar00rootroot00000000000000package ice import ( "encoding/binary" "errors" "io" "net" "strings" "sync" "github.com/pion/logging" "github.com/pion/stun" ) // ErrGetTransportAddress can't convert net.Addr to underlying type (UDPAddr or TCPAddr). var ErrGetTransportAddress = errors.New("failed to get local transport address") // TCPMux is allows grouping multiple TCP net.Conns and using them like UDP // net.PacketConns. The main implementation of this is TCPMuxDefault, and this // interface exists to: // 1. prevent SEGV panics when TCPMuxDefault is not initialized by using the // invalidTCPMux implementation, and // 2. allow mocking in tests. type TCPMux interface { io.Closer GetConnByUfrag(ufrag string, isIPv6 bool, local net.IP) (net.PacketConn, error) RemoveConnByUfrag(ufrag string) } // invalidTCPMux is an implementation of TCPMux that always returns ErrTCPMuxNotInitialized. type invalidTCPMux struct{} func newInvalidTCPMux() *invalidTCPMux { return &invalidTCPMux{} } // Close implements TCPMux interface. func (m *invalidTCPMux) Close() error { return ErrTCPMuxNotInitialized } // GetConnByUfrag implements TCPMux interface. func (m *invalidTCPMux) GetConnByUfrag(ufrag string, isIPv6 bool, local net.IP) (net.PacketConn, error) { return nil, ErrTCPMuxNotInitialized } // RemoveConnByUfrag implements TCPMux interface. func (m *invalidTCPMux) RemoveConnByUfrag(ufrag string) {} type ipAddr string // TCPMuxDefault muxes TCP net.Conns into net.PacketConns and groups them by // Ufrag. It is a default implementation of TCPMux interface. type TCPMuxDefault struct { params *TCPMuxParams closed bool // connsIPv4 and connsIPv6 are maps of all tcpPacketConns indexed by ufrag and local address connsIPv4, connsIPv6 map[string]map[ipAddr]*tcpPacketConn mu sync.Mutex wg sync.WaitGroup } // TCPMuxParams are parameters for TCPMux. type TCPMuxParams struct { Listener net.Listener Logger logging.LeveledLogger ReadBufferSize int // max buffer size for write op. 0 means no write buffer, the write op will block until the whole packet is written // if the write buffer is full, the subsequent write packet will be dropped until it has enough space. // a default 4MB is recommended. WriteBufferSize int } // NewTCPMuxDefault creates a new instance of TCPMuxDefault. func NewTCPMuxDefault(params TCPMuxParams) *TCPMuxDefault { if params.Logger == nil { params.Logger = logging.NewDefaultLoggerFactory().NewLogger("ice") } m := &TCPMuxDefault{ params: ¶ms, connsIPv4: map[string]map[ipAddr]*tcpPacketConn{}, connsIPv6: map[string]map[ipAddr]*tcpPacketConn{}, } m.wg.Add(1) go func() { defer m.wg.Done() m.start() }() return m } func (m *TCPMuxDefault) start() { m.params.Logger.Infof("Listening TCP on %s", m.params.Listener.Addr()) for { conn, err := m.params.Listener.Accept() if err != nil { m.params.Logger.Infof("Error accepting connection: %s", err) return } m.params.Logger.Debugf("Accepted connection from: %s to %s", conn.RemoteAddr(), conn.LocalAddr()) m.wg.Add(1) go func() { defer m.wg.Done() m.handleConn(conn) }() } } // LocalAddr returns the listening address of this TCPMuxDefault. func (m *TCPMuxDefault) LocalAddr() net.Addr { return m.params.Listener.Addr() } // GetConnByUfrag retrieves an existing or creates a new net.PacketConn. func (m *TCPMuxDefault) GetConnByUfrag(ufrag string, isIPv6 bool, local net.IP) (net.PacketConn, error) { m.mu.Lock() defer m.mu.Unlock() if m.closed { return nil, io.ErrClosedPipe } if conn, ok := m.getConn(ufrag, isIPv6, local); ok { return conn, nil } return m.createConn(ufrag, isIPv6, local) } func (m *TCPMuxDefault) createConn(ufrag string, isIPv6 bool, local net.IP) (*tcpPacketConn, error) { addr, ok := m.LocalAddr().(*net.TCPAddr) if !ok { return nil, ErrGetTransportAddress } localAddr := *addr localAddr.IP = local conn := newTCPPacketConn(tcpPacketParams{ ReadBuffer: m.params.ReadBufferSize, WriteBuffer: m.params.WriteBufferSize, LocalAddr: &localAddr, Logger: m.params.Logger, }) var conns map[ipAddr]*tcpPacketConn if isIPv6 { if conns, ok = m.connsIPv6[ufrag]; !ok { conns = make(map[ipAddr]*tcpPacketConn) m.connsIPv6[ufrag] = conns } } else { if conns, ok = m.connsIPv4[ufrag]; !ok { conns = make(map[ipAddr]*tcpPacketConn) m.connsIPv4[ufrag] = conns } } conns[ipAddr(local.String())] = conn m.wg.Add(1) go func() { defer m.wg.Done() <-conn.CloseChannel() m.removeConnByUfragAndLocalHost(ufrag, local) }() return conn, nil } func (m *TCPMuxDefault) closeAndLogError(closer io.Closer) { err := closer.Close() if err != nil { m.params.Logger.Warnf("Error closing connection: %s", err) } } func (m *TCPMuxDefault) handleConn(conn net.Conn) { buf := make([]byte, receiveMTU) n, err := readStreamingPacket(conn, buf) if err != nil { m.params.Logger.Warnf("Error reading first packet from %s: %s", conn.RemoteAddr().String(), err) return } buf = buf[:n] msg := &stun.Message{ Raw: make([]byte, len(buf)), } // Explicitly copy raw buffer so Message can own the memory. copy(msg.Raw, buf) if err = msg.Decode(); err != nil { m.closeAndLogError(conn) m.params.Logger.Warnf("Failed to handle decode ICE from %s to %s: %v", conn.RemoteAddr(), conn.LocalAddr(), err) return } if m == nil || msg.Type.Method != stun.MethodBinding { // not a stun m.closeAndLogError(conn) m.params.Logger.Warnf("Not a STUN message from %s to %s", conn.RemoteAddr(), conn.LocalAddr()) return } for _, attr := range msg.Attributes { m.params.Logger.Debugf("msg attr: %s", attr.String()) } attr, err := msg.Get(stun.AttrUsername) if err != nil { m.closeAndLogError(conn) m.params.Logger.Warnf("No Username attribute in STUN message from %s to %s", conn.RemoteAddr(), conn.LocalAddr()) return } ufrag := strings.Split(string(attr), ":")[0] m.params.Logger.Debugf("Ufrag: %s", ufrag) m.mu.Lock() defer m.mu.Unlock() host, _, err := net.SplitHostPort(conn.RemoteAddr().String()) if err != nil { m.closeAndLogError(conn) m.params.Logger.Warnf("Failed to get host in STUN message from %s to %s", conn.RemoteAddr(), conn.LocalAddr()) return } isIPv6 := net.ParseIP(host).To4() == nil localAddr, ok := conn.LocalAddr().(*net.TCPAddr) if !ok { m.closeAndLogError(conn) m.params.Logger.Warnf("Failed to get local tcp address in STUN message from %s to %s", conn.RemoteAddr(), conn.LocalAddr()) return } packetConn, ok := m.getConn(ufrag, isIPv6, localAddr.IP) if !ok { packetConn, err = m.createConn(ufrag, isIPv6, localAddr.IP) if err != nil { m.closeAndLogError(conn) m.params.Logger.Warnf("Failed to create packetConn for STUN message from %s to %s", conn.RemoteAddr(), conn.LocalAddr()) return } } if err := packetConn.AddConn(conn, buf); err != nil { m.closeAndLogError(conn) m.params.Logger.Warnf("Error adding conn to tcpPacketConn from %s to %s: %s", conn.RemoteAddr(), conn.LocalAddr(), err) return } } // Close closes the listener and waits for all goroutines to exit. func (m *TCPMuxDefault) Close() error { m.mu.Lock() m.closed = true for _, conns := range m.connsIPv4 { for _, conn := range conns { m.closeAndLogError(conn) } } for _, conns := range m.connsIPv6 { for _, conn := range conns { m.closeAndLogError(conn) } } m.connsIPv4 = map[string]map[ipAddr]*tcpPacketConn{} m.connsIPv6 = map[string]map[ipAddr]*tcpPacketConn{} err := m.params.Listener.Close() m.mu.Unlock() m.wg.Wait() return err } // RemoveConnByUfrag closes and removes a net.PacketConn by Ufrag. func (m *TCPMuxDefault) RemoveConnByUfrag(ufrag string) { removedConns := make([]*tcpPacketConn, 0, 4) // Keep lock section small to avoid deadlock with conn lock m.mu.Lock() if conns, ok := m.connsIPv4[ufrag]; ok { delete(m.connsIPv4, ufrag) for _, conn := range conns { removedConns = append(removedConns, conn) } } if conns, ok := m.connsIPv6[ufrag]; ok { delete(m.connsIPv6, ufrag) for _, conn := range conns { removedConns = append(removedConns, conn) } } m.mu.Unlock() // Close the connections outside the critical section to avoid // deadlocking TCP mux if (*tcpPacketConn).Close() blocks. for _, conn := range removedConns { m.closeAndLogError(conn) } } func (m *TCPMuxDefault) removeConnByUfragAndLocalHost(ufrag string, local net.IP) { removedConns := make([]*tcpPacketConn, 0, 4) localIP := ipAddr(local.String()) // Keep lock section small to avoid deadlock with conn lock m.mu.Lock() if conns, ok := m.connsIPv4[ufrag]; ok { if conn, ok := conns[localIP]; ok { delete(conns, localIP) if len(conns) == 0 { delete(m.connsIPv4, ufrag) } removedConns = append(removedConns, conn) } } if conns, ok := m.connsIPv6[ufrag]; ok { if conn, ok := conns[localIP]; ok { delete(conns, localIP) if len(conns) == 0 { delete(m.connsIPv6, ufrag) } removedConns = append(removedConns, conn) } } m.mu.Unlock() // Close the connections outside the critical section to avoid // deadlocking TCP mux if (*tcpPacketConn).Close() blocks. for _, conn := range removedConns { m.closeAndLogError(conn) } } func (m *TCPMuxDefault) getConn(ufrag string, isIPv6 bool, local net.IP) (val *tcpPacketConn, ok bool) { var conns map[ipAddr]*tcpPacketConn if isIPv6 { conns, ok = m.connsIPv6[ufrag] } else { conns, ok = m.connsIPv4[ufrag] } if conns != nil { val, ok = conns[ipAddr(local.String())] } return } const streamingPacketHeaderLen = 2 // readStreamingPacket reads 1 packet from stream // read packet bytes https://tools.ietf.org/html/rfc4571#section-2 // 2-byte length header prepends each packet: // // 0 1 2 3 // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // ----------------------------------------------------------------- // | LENGTH | RTP or RTCP packet ... | // ----------------------------------------------------------------- func readStreamingPacket(conn net.Conn, buf []byte) (int, error) { header := make([]byte, streamingPacketHeaderLen) var bytesRead, n int var err error for bytesRead < streamingPacketHeaderLen { if n, err = conn.Read(header[bytesRead:streamingPacketHeaderLen]); err != nil { return 0, err } bytesRead += n } length := int(binary.BigEndian.Uint16(header)) if length > cap(buf) { return length, io.ErrShortBuffer } bytesRead = 0 for bytesRead < length { if n, err = conn.Read(buf[bytesRead:length]); err != nil { return 0, err } bytesRead += n } return bytesRead, nil } func writeStreamingPacket(conn net.Conn, buf []byte) (int, error) { bufCopy := make([]byte, streamingPacketHeaderLen+len(buf)) binary.BigEndian.PutUint16(bufCopy, uint16(len(buf))) copy(bufCopy[2:], buf) n, err := conn.Write(bufCopy) if err != nil { return 0, err } return n - streamingPacketHeaderLen, nil } ice-2.3.1/tcp_mux_multi.go000066400000000000000000000047561437620344400155200ustar00rootroot00000000000000// Package ice ... // //nolint:dupl package ice import "net" // AllConnsGetter allows multiple fixed TCP ports to be used, // each of which is multiplexed like TCPMux. AllConnsGetter also acts as // a TCPMux, in which case it will return a single connection for one // of the ports. type AllConnsGetter interface { GetAllConns(ufrag string, isIPv6 bool, localIP net.IP) ([]net.PacketConn, error) } // MultiTCPMuxDefault implements both TCPMux and AllConnsGetter, // allowing users to pass multiple TCPMux instances to the ICE agent // configuration. type MultiTCPMuxDefault struct { muxes []TCPMux } // NewMultiTCPMuxDefault creates an instance of MultiTCPMuxDefault that // uses the provided TCPMux instances. func NewMultiTCPMuxDefault(muxes ...TCPMux) *MultiTCPMuxDefault { return &MultiTCPMuxDefault{ muxes: muxes, } } // GetConnByUfrag returns a PacketConn given the connection's ufrag, network and local address // creates the connection if an existing one can't be found. This, unlike // GetAllConns, will only return a single PacketConn from the first mux that was // passed in to NewMultiTCPMuxDefault. func (m *MultiTCPMuxDefault) GetConnByUfrag(ufrag string, isIPv6 bool, local net.IP) (net.PacketConn, error) { // NOTE: We always use the first element here in order to maintain the // behavior of using an existing connection if one exists. if len(m.muxes) == 0 { return nil, errNoTCPMuxAvailable } return m.muxes[0].GetConnByUfrag(ufrag, isIPv6, local) } // RemoveConnByUfrag stops and removes the muxed packet connection // from all underlying TCPMux instances. func (m *MultiTCPMuxDefault) RemoveConnByUfrag(ufrag string) { for _, mux := range m.muxes { mux.RemoveConnByUfrag(ufrag) } } // GetAllConns returns a PacketConn for each underlying TCPMux func (m *MultiTCPMuxDefault) GetAllConns(ufrag string, isIPv6 bool, local net.IP) ([]net.PacketConn, error) { if len(m.muxes) == 0 { // Make sure that we either return at least one connection or an error. return nil, errNoTCPMuxAvailable } var conns []net.PacketConn for _, mux := range m.muxes { conn, err := mux.GetConnByUfrag(ufrag, isIPv6, local) if err != nil { // For now, this implementation is all or none. return nil, err } if conn != nil { conns = append(conns, conn) } } return conns, nil } // Close the multi mux, no further connections could be created func (m *MultiTCPMuxDefault) Close() error { var err error for _, mux := range m.muxes { if e := mux.Close(); e != nil { err = e } } return err } ice-2.3.1/tcp_mux_multi_test.go000066400000000000000000000073341437620344400165520ustar00rootroot00000000000000//go:build !js // +build !js package ice import ( "io" "net" "testing" "github.com/pion/logging" "github.com/pion/stun" "github.com/pion/transport/v2/test" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestMultiTCPMux_Recv(t *testing.T) { for name, bufSize := range map[string]int{ "no buffer": 0, "buffered 4MB": 4 * 1024 * 1024, } { bufSize := bufSize t.Run(name, func(t *testing.T) { report := test.CheckRoutines(t) defer report() loggerFactory := logging.NewDefaultLoggerFactory() var muxInstances []TCPMux for i := 0; i < 3; i++ { listener, err := net.ListenTCP("tcp", &net.TCPAddr{ IP: net.IP{127, 0, 0, 1}, Port: 0, }) require.NoError(t, err, "error starting listener") defer func() { _ = listener.Close() }() tcpMux := NewTCPMuxDefault(TCPMuxParams{ Listener: listener, Logger: loggerFactory.NewLogger("ice"), ReadBufferSize: 20, WriteBufferSize: bufSize, }) muxInstances = append(muxInstances, tcpMux) require.NotNil(t, tcpMux.LocalAddr(), "tcpMux.LocalAddr() is nil") } multiMux := NewMultiTCPMuxDefault(muxInstances...) defer func() { _ = multiMux.Close() }() pktConns, err := multiMux.GetAllConns("myufrag", false, net.IP{127, 0, 0, 1}) require.NoError(t, err, "error retrieving muxed connection for ufrag") for _, pktConn := range pktConns { defer func() { _ = pktConn.Close() }() conn, err := net.DialTCP("tcp", nil, pktConn.LocalAddr().(*net.TCPAddr)) require.NoError(t, err, "error dialing test tcp connection") msg := stun.New() msg.Type = stun.MessageType{Method: stun.MethodBinding, Class: stun.ClassRequest} msg.Add(stun.AttrUsername, []byte("myufrag:otherufrag")) msg.Encode() n, err := writeStreamingPacket(conn, msg.Raw) require.NoError(t, err, "error writing tcp stun packet") recv := make([]byte, n) n2, rAddr, err := pktConn.ReadFrom(recv) require.NoError(t, err, "error receiving data") assert.Equal(t, conn.LocalAddr(), rAddr, "remote tcp address mismatch") assert.Equal(t, n, n2, "received byte size mismatch") assert.Equal(t, msg.Raw, recv, "received bytes mismatch") // check echo response n, err = pktConn.WriteTo(recv, conn.LocalAddr()) require.NoError(t, err, "error writing echo stun packet") recvEcho := make([]byte, n) n3, err := readStreamingPacket(conn, recvEcho) require.NoError(t, err, "error receiving echo data") assert.Equal(t, n2, n3, "received byte size mismatch") assert.Equal(t, msg.Raw, recvEcho, "received bytes mismatch") } }) } } func TestMultiTCPMux_NoDeadlockWhenClosingUnusedPacketConn(t *testing.T) { report := test.CheckRoutines(t) defer report() loggerFactory := logging.NewDefaultLoggerFactory() var tcpMuxInstances []TCPMux for i := 0; i < 3; i++ { listener, err := net.ListenTCP("tcp", &net.TCPAddr{ IP: net.IP{127, 0, 0, 1}, Port: 0, }) require.NoError(t, err, "error starting listener") defer func() { _ = listener.Close() }() tcpMux := NewTCPMuxDefault(TCPMuxParams{ Listener: listener, Logger: loggerFactory.NewLogger("ice"), ReadBufferSize: 20, }) tcpMuxInstances = append(tcpMuxInstances, tcpMux) } muxMulti := NewMultiTCPMuxDefault(tcpMuxInstances...) _, err := muxMulti.GetAllConns("test", false, net.IP{127, 0, 0, 1}) require.NoError(t, err, "error getting conn by ufrag") require.NoError(t, muxMulti.Close(), "error closing tcpMux") conn, err := muxMulti.GetAllConns("test", false, net.IP{127, 0, 0, 1}) assert.Nil(t, conn, "should receive nil because mux is closed") assert.Equal(t, io.ErrClosedPipe, err, "should receive error because mux is closed") } ice-2.3.1/tcp_mux_test.go000066400000000000000000000065611437620344400153410ustar00rootroot00000000000000package ice import ( "io" "net" "testing" "github.com/pion/logging" "github.com/pion/stun" "github.com/pion/transport/v2/test" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( _ TCPMux = &TCPMuxDefault{} _ TCPMux = &invalidTCPMux{} ) func TestTCPMux_Recv(t *testing.T) { for name, bufSize := range map[string]int{ "no buffer": 0, "buffered 4MB": 4 * 1024 * 1024, } { bufSize := bufSize t.Run(name, func(t *testing.T) { report := test.CheckRoutines(t) defer report() loggerFactory := logging.NewDefaultLoggerFactory() listener, err := net.ListenTCP("tcp", &net.TCPAddr{ IP: net.IP{127, 0, 0, 1}, Port: 0, }) require.NoError(t, err, "error starting listener") defer func() { _ = listener.Close() }() tcpMux := NewTCPMuxDefault(TCPMuxParams{ Listener: listener, Logger: loggerFactory.NewLogger("ice"), ReadBufferSize: 20, WriteBufferSize: bufSize, }) defer func() { _ = tcpMux.Close() }() require.NotNil(t, tcpMux.LocalAddr(), "tcpMux.LocalAddr() is nil") conn, err := net.DialTCP("tcp", nil, tcpMux.LocalAddr().(*net.TCPAddr)) require.NoError(t, err, "error dialing test tcp connection") msg := stun.New() msg.Type = stun.MessageType{Method: stun.MethodBinding, Class: stun.ClassRequest} msg.Add(stun.AttrUsername, []byte("myufrag:otherufrag")) msg.Encode() n, err := writeStreamingPacket(conn, msg.Raw) require.NoError(t, err, "error writing tcp stun packet") pktConn, err := tcpMux.GetConnByUfrag("myufrag", false, listener.Addr().(*net.TCPAddr).IP) require.NoError(t, err, "error retrieving muxed connection for ufrag") defer func() { _ = pktConn.Close() }() recv := make([]byte, n) n2, rAddr, err := pktConn.ReadFrom(recv) require.NoError(t, err, "error receiving data") assert.Equal(t, conn.LocalAddr(), rAddr, "remote tcp address mismatch") assert.Equal(t, n, n2, "received byte size mismatch") assert.Equal(t, msg.Raw, recv, "received bytes mismatch") // check echo response n, err = pktConn.WriteTo(recv, conn.LocalAddr()) require.NoError(t, err, "error writing echo stun packet") recvEcho := make([]byte, n) n3, err := readStreamingPacket(conn, recvEcho) require.NoError(t, err, "error receiving echo data") assert.Equal(t, n2, n3, "received byte size mismatch") assert.Equal(t, msg.Raw, recvEcho, "received bytes mismatch") }) } } func TestTCPMux_NoDeadlockWhenClosingUnusedPacketConn(t *testing.T) { report := test.CheckRoutines(t) defer report() loggerFactory := logging.NewDefaultLoggerFactory() listener, err := net.ListenTCP("tcp", &net.TCPAddr{ IP: net.IP{127, 0, 0, 1}, Port: 0, }) require.NoError(t, err, "error starting listener") defer func() { _ = listener.Close() }() tcpMux := NewTCPMuxDefault(TCPMuxParams{ Listener: listener, Logger: loggerFactory.NewLogger("ice"), ReadBufferSize: 20, }) _, err = tcpMux.GetConnByUfrag("test", false, listener.Addr().(*net.TCPAddr).IP) require.NoError(t, err, "error getting conn by ufrag") require.NoError(t, tcpMux.Close(), "error closing tcpMux") conn, err := tcpMux.GetConnByUfrag("test", false, listener.Addr().(*net.TCPAddr).IP) assert.Nil(t, conn, "should receive nil because mux is closed") assert.Equal(t, io.ErrClosedPipe, err, "should receive error because mux is closed") } ice-2.3.1/tcp_packet_conn.go000066400000000000000000000140141437620344400157450ustar00rootroot00000000000000package ice import ( "errors" "fmt" "io" "net" "sync" "sync/atomic" "time" "github.com/pion/logging" "github.com/pion/transport/v2/packetio" ) type bufferedConn struct { net.Conn buf *packetio.Buffer logger logging.LeveledLogger closed int32 } func newBufferedConn(conn net.Conn, bufSize int, logger logging.LeveledLogger) net.Conn { buf := packetio.NewBuffer() if bufSize > 0 { buf.SetLimitSize(bufSize) } bc := &bufferedConn{ Conn: conn, buf: buf, logger: logger, } go bc.writeProcess() return bc } func (bc *bufferedConn) Write(b []byte) (int, error) { n, err := bc.buf.Write(b) if err != nil { return n, err } return n, nil } func (bc *bufferedConn) writeProcess() { pktBuf := make([]byte, receiveMTU) for atomic.LoadInt32(&bc.closed) == 0 { n, err := bc.buf.Read(pktBuf) if errors.Is(err, io.EOF) { return } if err != nil { bc.logger.Warnf("read buffer error: %s", err) continue } if _, err := bc.Conn.Write(pktBuf[:n]); err != nil { bc.logger.Warnf("write error: %s", err) continue } } } func (bc *bufferedConn) Close() error { atomic.StoreInt32(&bc.closed, 1) _ = bc.buf.Close() return bc.Conn.Close() } type tcpPacketConn struct { params *tcpPacketParams // conns is a map of net.Conns indexed by remote net.Addr.String() conns map[string]net.Conn recvChan chan streamingPacket mu sync.Mutex wg sync.WaitGroup closedChan chan struct{} closeOnce sync.Once } type streamingPacket struct { Data []byte RAddr net.Addr Err error } type tcpPacketParams struct { ReadBuffer int LocalAddr net.Addr Logger logging.LeveledLogger WriteBuffer int } func newTCPPacketConn(params tcpPacketParams) *tcpPacketConn { p := &tcpPacketConn{ params: ¶ms, conns: map[string]net.Conn{}, recvChan: make(chan streamingPacket, params.ReadBuffer), closedChan: make(chan struct{}), } return p } func (t *tcpPacketConn) AddConn(conn net.Conn, firstPacketData []byte) error { t.params.Logger.Infof("AddConn: %s remote %s to local %s", conn.RemoteAddr().Network(), conn.RemoteAddr(), conn.LocalAddr()) t.mu.Lock() defer t.mu.Unlock() select { case <-t.closedChan: return io.ErrClosedPipe default: } if _, ok := t.conns[conn.RemoteAddr().String()]; ok { return fmt.Errorf("%w: %s", errConnectionAddrAlreadyExist, conn.RemoteAddr().String()) } if t.params.WriteBuffer > 0 { conn = newBufferedConn(conn, t.params.WriteBuffer, t.params.Logger) } t.conns[conn.RemoteAddr().String()] = conn t.wg.Add(1) go func() { defer t.wg.Done() if firstPacketData != nil { select { case <-t.closedChan: // NOTE: recvChan can fill up and never drain in edge // cases while closing a connection, which can cause the // packetConn to never finish closing. Bail out early // here to prevent that. return case t.recvChan <- streamingPacket{firstPacketData, conn.RemoteAddr(), nil}: } } t.startReading(conn) }() return nil } func (t *tcpPacketConn) startReading(conn net.Conn) { buf := make([]byte, receiveMTU) for { n, err := readStreamingPacket(conn, buf) // t.params.Logger.Infof("readStreamingPacket read %d bytes", n) if err != nil { t.params.Logger.Infof("%v: %s", errReadingStreamingPacket, err) t.handleRecv(streamingPacket{nil, conn.RemoteAddr(), err}) t.removeConn(conn) return } data := make([]byte, n) copy(data, buf[:n]) // t.params.Logger.Infof("Writing read streaming packet to recvChan: %d bytes", len(data)) t.handleRecv(streamingPacket{data, conn.RemoteAddr(), nil}) } } func (t *tcpPacketConn) handleRecv(pkt streamingPacket) { t.mu.Lock() recvChan := t.recvChan if t.isClosed() { recvChan = nil } t.mu.Unlock() select { case recvChan <- pkt: case <-t.closedChan: } } func (t *tcpPacketConn) isClosed() bool { select { case <-t.closedChan: return true default: return false } } // WriteTo is for passive and s-o candidates. func (t *tcpPacketConn) ReadFrom(b []byte) (n int, rAddr net.Addr, err error) { pkt, ok := <-t.recvChan if !ok { return 0, nil, io.ErrClosedPipe } if pkt.Err != nil { return 0, pkt.RAddr, pkt.Err } if cap(b) < len(pkt.Data) { return 0, pkt.RAddr, io.ErrShortBuffer } n = len(pkt.Data) copy(b, pkt.Data[:n]) return n, pkt.RAddr, err } // WriteTo is for active and s-o candidates. func (t *tcpPacketConn) WriteTo(buf []byte, rAddr net.Addr) (n int, err error) { t.mu.Lock() conn, ok := t.conns[rAddr.String()] t.mu.Unlock() if !ok { return 0, io.ErrClosedPipe // conn, err := net.DialTCP(tcp, nil, rAddr.(*net.TCPAddr)) // if err != nil { // t.params.Logger.Tracef("DialTCP error: %s", err) // return 0, err // } // go t.startReading(conn) // t.conns[rAddr.String()] = conn } n, err = writeStreamingPacket(conn, buf) if err != nil { t.params.Logger.Tracef("%w %s", errWriting, rAddr) return n, err } return n, err } func (t *tcpPacketConn) closeAndLogError(closer io.Closer) { err := closer.Close() if err != nil { t.params.Logger.Warnf("%v: %s", errClosingConnection, err) } } func (t *tcpPacketConn) removeConn(conn net.Conn) { t.mu.Lock() defer t.mu.Unlock() t.closeAndLogError(conn) delete(t.conns, conn.RemoteAddr().String()) } func (t *tcpPacketConn) Close() error { t.mu.Lock() var shouldCloseRecvChan bool t.closeOnce.Do(func() { close(t.closedChan) shouldCloseRecvChan = true }) for _, conn := range t.conns { t.closeAndLogError(conn) delete(t.conns, conn.RemoteAddr().String()) } t.mu.Unlock() t.wg.Wait() if shouldCloseRecvChan { close(t.recvChan) } return nil } func (t *tcpPacketConn) LocalAddr() net.Addr { return t.params.LocalAddr } func (t *tcpPacketConn) SetDeadline(tm time.Time) error { return nil } func (t *tcpPacketConn) SetReadDeadline(tm time.Time) error { return nil } func (t *tcpPacketConn) SetWriteDeadline(tm time.Time) error { return nil } func (t *tcpPacketConn) CloseChannel() <-chan struct{} { return t.closedChan } func (t *tcpPacketConn) String() string { return fmt.Sprintf("tcpPacketConn{LocalAddr: %s}", t.params.LocalAddr) } ice-2.3.1/tcptype.go000066400000000000000000000021561437620344400143070ustar00rootroot00000000000000package ice import "strings" // TCPType is the type of ICE TCP candidate as described in // https://tools.ietf.org/html/rfc6544#section-4.5 type TCPType int const ( // TCPTypeUnspecified is the default value. For example UDP candidates do not // need this field. TCPTypeUnspecified TCPType = iota // TCPTypeActive is active TCP candidate, which initiates TCP connections. TCPTypeActive // TCPTypePassive is passive TCP candidate, only accepts TCP connections. TCPTypePassive // TCPTypeSimultaneousOpen is like active and passive at the same time. TCPTypeSimultaneousOpen ) // NewTCPType creates a new TCPType from string. func NewTCPType(value string) TCPType { switch strings.ToLower(value) { case "active": return TCPTypeActive case "passive": return TCPTypePassive case "so": return TCPTypeSimultaneousOpen default: return TCPTypeUnspecified } } func (t TCPType) String() string { switch t { case TCPTypeUnspecified: return "" case TCPTypeActive: return "active" case TCPTypePassive: return "passive" case TCPTypeSimultaneousOpen: return "so" default: return ErrUnknownType.Error() } } ice-2.3.1/tcptype_test.go000066400000000000000000000012451437620344400153440ustar00rootroot00000000000000package ice import ( "testing" "github.com/stretchr/testify/assert" ) func TestTCPType(t *testing.T) { var tcpType TCPType assert.Equal(t, TCPTypeUnspecified, tcpType) assert.Equal(t, TCPTypeActive, NewTCPType("active")) assert.Equal(t, TCPTypePassive, NewTCPType("passive")) assert.Equal(t, TCPTypeSimultaneousOpen, NewTCPType("so")) assert.Equal(t, TCPTypeUnspecified, NewTCPType("something else")) assert.Equal(t, "", TCPTypeUnspecified.String()) assert.Equal(t, "active", TCPTypeActive.String()) assert.Equal(t, "passive", TCPTypePassive.String()) assert.Equal(t, "so", TCPTypeSimultaneousOpen.String()) assert.Equal(t, "Unknown", TCPType(-1).String()) } ice-2.3.1/transport.go000066400000000000000000000064771437620344400146650ustar00rootroot00000000000000package ice import ( "context" "net" "sync/atomic" "time" "github.com/pion/stun" ) // Dial connects to the remote agent, acting as the controlling ice agent. // Dial blocks until at least one ice candidate pair has successfully connected. func (a *Agent) Dial(ctx context.Context, remoteUfrag, remotePwd string) (*Conn, error) { return a.connect(ctx, true, remoteUfrag, remotePwd) } // Accept connects to the remote agent, acting as the controlled ice agent. // Accept blocks until at least one ice candidate pair has successfully connected. func (a *Agent) Accept(ctx context.Context, remoteUfrag, remotePwd string) (*Conn, error) { return a.connect(ctx, false, remoteUfrag, remotePwd) } // Conn represents the ICE connection. // At the moment the lifetime of the Conn is equal to the Agent. type Conn struct { bytesReceived uint64 bytesSent uint64 agent *Agent } // BytesSent returns the number of bytes sent func (c *Conn) BytesSent() uint64 { return atomic.LoadUint64(&c.bytesSent) } // BytesReceived returns the number of bytes received func (c *Conn) BytesReceived() uint64 { return atomic.LoadUint64(&c.bytesReceived) } func (a *Agent) connect(ctx context.Context, isControlling bool, remoteUfrag, remotePwd string) (*Conn, error) { err := a.ok() if err != nil { return nil, err } err = a.startConnectivityChecks(isControlling, remoteUfrag, remotePwd) //nolint:contextcheck if err != nil { return nil, err } // block until pair selected select { case <-a.done: return nil, a.getErr() case <-ctx.Done(): return nil, ErrCanceledByCaller case <-a.onConnected: } return &Conn{ agent: a, }, nil } // Read implements the Conn Read method. func (c *Conn) Read(p []byte) (int, error) { err := c.agent.ok() if err != nil { return 0, err } n, err := c.agent.buf.Read(p) atomic.AddUint64(&c.bytesReceived, uint64(n)) return n, err } // Write implements the Conn Write method. func (c *Conn) Write(p []byte) (int, error) { err := c.agent.ok() if err != nil { return 0, err } if stun.IsMessage(p) { return 0, errICEWriteSTUNMessage } pair := c.agent.getSelectedPair() if pair == nil { if err = c.agent.run(c.agent.context(), func(ctx context.Context, a *Agent) { pair = a.getBestValidCandidatePair() }); err != nil { return 0, err } if pair == nil { return 0, err } } atomic.AddUint64(&c.bytesSent, uint64(len(p))) return pair.Write(p) } // Close implements the Conn Close method. It is used to close // the connection. Any calls to Read and Write will be unblocked and return an error. func (c *Conn) Close() error { return c.agent.Close() } // LocalAddr returns the local address of the current selected pair or nil if there is none. func (c *Conn) LocalAddr() net.Addr { pair := c.agent.getSelectedPair() if pair == nil { return nil } return pair.Local.addr() } // RemoteAddr returns the remote address of the current selected pair or nil if there is none. func (c *Conn) RemoteAddr() net.Addr { pair := c.agent.getSelectedPair() if pair == nil { return nil } return pair.Remote.addr() } // SetDeadline is a stub func (c *Conn) SetDeadline(t time.Time) error { return nil } // SetReadDeadline is a stub func (c *Conn) SetReadDeadline(t time.Time) error { return nil } // SetWriteDeadline is a stub func (c *Conn) SetWriteDeadline(t time.Time) error { return nil } ice-2.3.1/transport_test.go000066400000000000000000000162631437620344400157160ustar00rootroot00000000000000//go:build !js // +build !js package ice import ( "context" "net" "sync" "testing" "time" "github.com/pion/transport/v2/test" ) func TestStressDuplex(t *testing.T) { // Check for leaking routines report := test.CheckRoutines(t) defer report() // Limit runtime in case of deadlocks lim := test.TimeOut(time.Second * 20) defer lim.Stop() // Run the test stressDuplex(t) } func testTimeout(t *testing.T, c *Conn, timeout time.Duration) { const pollRate = 100 * time.Millisecond const margin = 20 * time.Millisecond // allow 20msec error in time ticker := time.NewTicker(pollRate) defer func() { ticker.Stop() err := c.Close() if err != nil { t.Error(err) } }() startedAt := time.Now() for cnt := time.Duration(0); cnt <= timeout+defaultKeepaliveInterval+pollRate; cnt += pollRate { <-ticker.C var cs ConnectionState err := c.agent.run(context.Background(), func(ctx context.Context, agent *Agent) { cs = agent.connectionState }) if err != nil { // we should never get here. panic(err) } if cs != ConnectionStateConnected { elapsed := time.Since(startedAt) if elapsed+margin < timeout { t.Fatalf("Connection timed out %f msec early", elapsed.Seconds()*1000) } else { t.Logf("Connection timed out in %f msec", elapsed.Seconds()*1000) return } } } t.Fatalf("Connection failed to time out in time. (expected timeout: %v)", timeout) } func TestTimeout(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } // Check for leaking routines report := test.CheckRoutines(t) defer report() // Limit runtime in case of deadlocks lim := test.TimeOut(time.Second * 20) defer lim.Stop() t.Run("WithoutDisconnectTimeout", func(t *testing.T) { ca, cb := pipe(nil) err := cb.Close() if err != nil { // we should never get here. panic(err) } testTimeout(t, ca, defaultDisconnectedTimeout) }) t.Run("WithDisconnectTimeout", func(t *testing.T) { ca, cb := pipeWithTimeout(5*time.Second, 3*time.Second) err := cb.Close() if err != nil { // we should never get here. panic(err) } testTimeout(t, ca, 5*time.Second) }) } func TestReadClosed(t *testing.T) { // Check for leaking routines report := test.CheckRoutines(t) defer report() // Limit runtime in case of deadlocks lim := test.TimeOut(time.Second * 20) defer lim.Stop() ca, cb := pipe(nil) err := ca.Close() if err != nil { // we should never get here. panic(err) } err = cb.Close() if err != nil { // we should never get here. panic(err) } empty := make([]byte, 10) _, err = ca.Read(empty) if err == nil { t.Fatalf("Reading from a closed channel should return an error") } } func stressDuplex(t *testing.T) { ca, cb := pipe(nil) defer func() { err := ca.Close() if err != nil { t.Fatal(err) } err = cb.Close() if err != nil { t.Fatal(err) } }() opt := test.Options{ MsgSize: 10, MsgCount: 1, // Order not reliable due to UDP & potentially multiple candidate pairs. } err := test.StressDuplex(ca, cb, opt) if err != nil { t.Fatal(err) } } func check(err error) { if err != nil { panic(err) } } func gatherAndExchangeCandidates(aAgent, bAgent *Agent) { var wg sync.WaitGroup wg.Add(2) check(aAgent.OnCandidate(func(candidate Candidate) { if candidate == nil { wg.Done() } })) check(aAgent.GatherCandidates()) check(bAgent.OnCandidate(func(candidate Candidate) { if candidate == nil { wg.Done() } })) check(bAgent.GatherCandidates()) wg.Wait() candidates, err := aAgent.GetLocalCandidates() check(err) for _, c := range candidates { candidateCopy, copyErr := c.copy() check(copyErr) check(bAgent.AddRemoteCandidate(candidateCopy)) } candidates, err = bAgent.GetLocalCandidates() check(err) for _, c := range candidates { candidateCopy, copyErr := c.copy() check(copyErr) check(aAgent.AddRemoteCandidate(candidateCopy)) } } func connect(aAgent, bAgent *Agent) (*Conn, *Conn) { gatherAndExchangeCandidates(aAgent, bAgent) accepted := make(chan struct{}) var aConn *Conn go func() { var acceptErr error bUfrag, bPwd, acceptErr := bAgent.GetLocalUserCredentials() check(acceptErr) aConn, acceptErr = aAgent.Accept(context.TODO(), bUfrag, bPwd) check(acceptErr) close(accepted) }() aUfrag, aPwd, err := aAgent.GetLocalUserCredentials() check(err) bConn, err := bAgent.Dial(context.TODO(), aUfrag, aPwd) check(err) // Ensure accepted <-accepted return aConn, bConn } func pipe(defaultConfig *AgentConfig) (*Conn, *Conn) { var urls []*URL aNotifier, aConnected := onConnected() bNotifier, bConnected := onConnected() cfg := &AgentConfig{} if defaultConfig != nil { *cfg = *defaultConfig } cfg.Urls = urls cfg.NetworkTypes = supportedNetworkTypes() aAgent, err := NewAgent(cfg) check(err) check(aAgent.OnConnectionStateChange(aNotifier)) bAgent, err := NewAgent(cfg) check(err) check(bAgent.OnConnectionStateChange(bNotifier)) aConn, bConn := connect(aAgent, bAgent) // Ensure pair selected // Note: this assumes ConnectionStateConnected is thrown after selecting the final pair <-aConnected <-bConnected return aConn, bConn } func pipeWithTimeout(disconnectTimeout time.Duration, iceKeepalive time.Duration) (*Conn, *Conn) { var urls []*URL aNotifier, aConnected := onConnected() bNotifier, bConnected := onConnected() cfg := &AgentConfig{ Urls: urls, DisconnectedTimeout: &disconnectTimeout, KeepaliveInterval: &iceKeepalive, NetworkTypes: supportedNetworkTypes(), } aAgent, err := NewAgent(cfg) check(err) check(aAgent.OnConnectionStateChange(aNotifier)) bAgent, err := NewAgent(cfg) check(err) check(bAgent.OnConnectionStateChange(bNotifier)) aConn, bConn := connect(aAgent, bAgent) // Ensure pair selected // Note: this assumes ConnectionStateConnected is thrown after selecting the final pair <-aConnected <-bConnected return aConn, bConn } func onConnected() (func(ConnectionState), chan struct{}) { done := make(chan struct{}) return func(state ConnectionState) { if state == ConnectionStateConnected { close(done) } }, done } func randomPort(t testing.TB) int { t.Helper() conn, err := net.ListenPacket("udp4", "127.0.0.1:0") if err != nil { t.Fatalf("failed to pickPort: %v", err) } defer func() { _ = conn.Close() }() switch addr := conn.LocalAddr().(type) { case *net.UDPAddr: return addr.Port default: t.Fatalf("unknown addr type %T", addr) return 0 } } func TestConnStats(t *testing.T) { // Check for leaking routines report := test.CheckRoutines(t) defer report() // Limit runtime in case of deadlocks lim := test.TimeOut(time.Second * 20) defer lim.Stop() ca, cb := pipe(nil) if _, err := ca.Write(make([]byte, 10)); err != nil { t.Fatal("unexpected error trying to write") } var wg sync.WaitGroup wg.Add(1) go func() { buf := make([]byte, 10) if _, err := cb.Read(buf); err != nil { panic(errRead) } wg.Done() }() wg.Wait() if ca.BytesSent() != 10 { t.Fatal("bytes sent don't match") } if cb.BytesReceived() != 10 { t.Fatal("bytes received don't match") } err := ca.Close() if err != nil { // we should never get here. panic(err) } err = cb.Close() if err != nil { // we should never get here. panic(err) } } ice-2.3.1/transport_vnet_test.go000066400000000000000000000043601437620344400167450ustar00rootroot00000000000000//go:build !js // +build !js package ice import ( "fmt" "net" "testing" "time" "github.com/pion/transport/v2/test" "github.com/pion/transport/v2/vnet" "github.com/stretchr/testify/assert" ) func TestRemoteLocalAddr(t *testing.T) { // Check for leaking routines report := test.CheckRoutines(t) defer report() // Limit runtime in case of deadlocks lim := test.TimeOut(time.Second * 20) defer lim.Stop() // Agent0 is behind 1:1 NAT natType0 := &vnet.NATType{Mode: vnet.NATModeNAT1To1} // Agent1 is behind 1:1 NAT natType1 := &vnet.NATType{Mode: vnet.NATModeNAT1To1} v, errVnet := buildVNet(natType0, natType1) if !assert.NoError(t, errVnet, "should succeed") { return } defer v.close() stunServerURL := &URL{ Scheme: SchemeTypeSTUN, Host: vnetSTUNServerIP, Port: vnetSTUNServerPort, Proto: ProtoTypeUDP, } t.Run("Disconnected Returns nil", func(t *testing.T) { disconnectedAgent, err := NewAgent(&AgentConfig{}) assert.NoError(t, err) disconnectedConn := Conn{agent: disconnectedAgent} assert.Nil(t, disconnectedConn.RemoteAddr()) assert.Nil(t, disconnectedConn.LocalAddr()) assert.NoError(t, disconnectedConn.Close()) }) t.Run("Remote/Local Pair Match between Agents", func(t *testing.T) { ca, cb := pipeWithVNet(v, &agentTestConfig{ urls: []*URL{stunServerURL}, }, &agentTestConfig{ urls: []*URL{stunServerURL}, }, ) aRAddr := ca.RemoteAddr() aLAddr := ca.LocalAddr() bRAddr := cb.RemoteAddr() bLAddr := cb.LocalAddr() // Assert that nothing is nil assert.NotNil(t, aRAddr) assert.NotNil(t, aLAddr) assert.NotNil(t, bRAddr) assert.NotNil(t, bLAddr) // Assert addresses assert.Equal(t, aLAddr.String(), fmt.Sprintf("%s:%d", vnetLocalIPA, bRAddr.(*net.UDPAddr).Port), //nolint:forcetypeassert ) assert.Equal(t, bLAddr.String(), fmt.Sprintf("%s:%d", vnetLocalIPB, aRAddr.(*net.UDPAddr).Port), //nolint:forcetypeassert ) assert.Equal(t, aRAddr.String(), fmt.Sprintf("%s:%d", vnetGlobalIPB, bLAddr.(*net.UDPAddr).Port), //nolint:forcetypeassert ) assert.Equal(t, bRAddr.String(), fmt.Sprintf("%s:%d", vnetGlobalIPA, aLAddr.(*net.UDPAddr).Port), //nolint:forcetypeassert ) // Close assert.NoError(t, ca.Close()) assert.NoError(t, cb.Close()) }) } ice-2.3.1/udp_mux.go000066400000000000000000000213501437620344400142750ustar00rootroot00000000000000package ice import ( "errors" "io" "net" "os" "strings" "sync" "github.com/pion/logging" "github.com/pion/stun" "github.com/pion/transport/v2" "github.com/pion/transport/v2/stdnet" ) // UDPMux allows multiple connections to go over a single UDP port type UDPMux interface { io.Closer GetConn(ufrag string, addr net.Addr) (net.PacketConn, error) RemoveConnByUfrag(ufrag string) GetListenAddresses() []net.Addr } // UDPMuxDefault is an implementation of the interface type UDPMuxDefault struct { params UDPMuxParams closedChan chan struct{} closeOnce sync.Once // connsIPv4 and connsIPv6 are maps of all udpMuxedConn indexed by ufrag|network|candidateType connsIPv4, connsIPv6 map[string]*udpMuxedConn addressMapMu sync.RWMutex addressMap map[string]*udpMuxedConn // buffer pool to recycle buffers for net.UDPAddr encodes/decodes pool *sync.Pool mu sync.Mutex // for UDP connection listen at unspecified address localAddrsForUnspecified []net.Addr } const maxAddrSize = 512 // UDPMuxParams are parameters for UDPMux. type UDPMuxParams struct { Logger logging.LeveledLogger UDPConn net.PacketConn // Required for gathering local addresses // in case a un UDPConn is passed which does not // bind to a specific local address. Net transport.Net } // NewUDPMuxDefault creates an implementation of UDPMux func NewUDPMuxDefault(params UDPMuxParams) *UDPMuxDefault { if params.Logger == nil { params.Logger = logging.NewDefaultLoggerFactory().NewLogger("ice") } var localAddrsForUnspecified []net.Addr if addr, ok := params.UDPConn.LocalAddr().(*net.UDPAddr); !ok { params.Logger.Errorf("LocalAddr is not a net.UDPAddr, got %T", params.UDPConn.LocalAddr()) } else if ok && addr.IP.IsUnspecified() { // For unspecified addresses, the correct behavior is to return errListenUnspecified, but // it will break the applications that are already using unspecified UDP connection // with UDPMuxDefault, so print a warn log and create a local address list for mux. params.Logger.Warn("UDPMuxDefault should not listening on unspecified address, use NewMultiUDPMuxFromPort instead") var networks []NetworkType switch { case addr.IP.To4() != nil: networks = []NetworkType{NetworkTypeUDP4} case addr.IP.To16() != nil: networks = []NetworkType{NetworkTypeUDP4, NetworkTypeUDP6} default: params.Logger.Errorf("LocalAddr expected IPV4 or IPV6, got %T", params.UDPConn.LocalAddr()) } if len(networks) > 0 { if params.Net == nil { var err error if params.Net, err = stdnet.NewNet(); err != nil { params.Logger.Errorf("failed to get create network: %v", err) } } ips, err := localInterfaces(params.Net, nil, nil, networks, true) if err == nil { for _, ip := range ips { localAddrsForUnspecified = append(localAddrsForUnspecified, &net.UDPAddr{IP: ip, Port: addr.Port}) } } else { params.Logger.Errorf("failed to get local interfaces for unspecified addr: %v", err) } } } m := &UDPMuxDefault{ addressMap: map[string]*udpMuxedConn{}, params: params, connsIPv4: make(map[string]*udpMuxedConn), connsIPv6: make(map[string]*udpMuxedConn), closedChan: make(chan struct{}, 1), pool: &sync.Pool{ New: func() interface{} { // big enough buffer to fit both packet and address return newBufferHolder(receiveMTU + maxAddrSize) }, }, localAddrsForUnspecified: localAddrsForUnspecified, } go m.connWorker() return m } // LocalAddr returns the listening address of this UDPMuxDefault func (m *UDPMuxDefault) LocalAddr() net.Addr { return m.params.UDPConn.LocalAddr() } // GetListenAddresses returns the list of addresses that this mux is listening on func (m *UDPMuxDefault) GetListenAddresses() []net.Addr { if len(m.localAddrsForUnspecified) > 0 { return m.localAddrsForUnspecified } return []net.Addr{m.LocalAddr()} } // GetConn returns a PacketConn given the connection's ufrag and network address // creates the connection if an existing one can't be found func (m *UDPMuxDefault) GetConn(ufrag string, addr net.Addr) (net.PacketConn, error) { // don't check addr for mux using unspecified address if len(m.localAddrsForUnspecified) == 0 && m.params.UDPConn.LocalAddr().String() != addr.String() { return nil, errInvalidAddress } var isIPv6 bool if udpAddr, _ := addr.(*net.UDPAddr); udpAddr != nil && udpAddr.IP.To4() == nil { isIPv6 = true } m.mu.Lock() defer m.mu.Unlock() if m.IsClosed() { return nil, io.ErrClosedPipe } if conn, ok := m.getConn(ufrag, isIPv6); ok { return conn, nil } c := m.createMuxedConn(ufrag) go func() { <-c.CloseChannel() m.RemoveConnByUfrag(ufrag) }() if isIPv6 { m.connsIPv6[ufrag] = c } else { m.connsIPv4[ufrag] = c } return c, nil } // RemoveConnByUfrag stops and removes the muxed packet connection func (m *UDPMuxDefault) RemoveConnByUfrag(ufrag string) { removedConns := make([]*udpMuxedConn, 0, 2) // Keep lock section small to avoid deadlock with conn lock m.mu.Lock() if c, ok := m.connsIPv4[ufrag]; ok { delete(m.connsIPv4, ufrag) removedConns = append(removedConns, c) } if c, ok := m.connsIPv6[ufrag]; ok { delete(m.connsIPv6, ufrag) removedConns = append(removedConns, c) } m.mu.Unlock() if len(removedConns) == 0 { // No need to lock if no connection was found return } m.addressMapMu.Lock() defer m.addressMapMu.Unlock() for _, c := range removedConns { addresses := c.getAddresses() for _, addr := range addresses { delete(m.addressMap, addr) } } } // IsClosed returns true if the mux had been closed func (m *UDPMuxDefault) IsClosed() bool { select { case <-m.closedChan: return true default: return false } } // Close the mux, no further connections could be created func (m *UDPMuxDefault) Close() error { var err error m.closeOnce.Do(func() { m.mu.Lock() defer m.mu.Unlock() for _, c := range m.connsIPv4 { _ = c.Close() } for _, c := range m.connsIPv6 { _ = c.Close() } m.connsIPv4 = make(map[string]*udpMuxedConn) m.connsIPv6 = make(map[string]*udpMuxedConn) close(m.closedChan) _ = m.params.UDPConn.Close() }) return err } func (m *UDPMuxDefault) writeTo(buf []byte, rAddr net.Addr) (n int, err error) { return m.params.UDPConn.WriteTo(buf, rAddr) } func (m *UDPMuxDefault) registerConnForAddress(conn *udpMuxedConn, addr string) { if m.IsClosed() { return } m.addressMapMu.Lock() defer m.addressMapMu.Unlock() existing, ok := m.addressMap[addr] if ok { existing.removeAddress(addr) } m.addressMap[addr] = conn m.params.Logger.Debugf("Registered %s for %s", addr, conn.params.Key) } func (m *UDPMuxDefault) createMuxedConn(key string) *udpMuxedConn { c := newUDPMuxedConn(&udpMuxedConnParams{ Mux: m, Key: key, AddrPool: m.pool, LocalAddr: m.LocalAddr(), Logger: m.params.Logger, }) return c } func (m *UDPMuxDefault) connWorker() { logger := m.params.Logger defer func() { _ = m.Close() }() buf := make([]byte, receiveMTU) for { n, addr, err := m.params.UDPConn.ReadFrom(buf) if m.IsClosed() { return } else if err != nil { if os.IsTimeout(err) { continue } else if !errors.Is(err, io.EOF) { logger.Errorf("could not read udp packet: %v", err) } return } udpAddr, ok := addr.(*net.UDPAddr) if !ok { logger.Errorf("underlying PacketConn did not return a UDPAddr") return } // If we have already seen this address dispatch to the appropriate destination m.addressMapMu.Lock() destinationConn := m.addressMap[addr.String()] m.addressMapMu.Unlock() // If we haven't seen this address before but is a STUN packet lookup by ufrag if destinationConn == nil && stun.IsMessage(buf[:n]) { msg := &stun.Message{ Raw: append([]byte{}, buf[:n]...), } if err = msg.Decode(); err != nil { m.params.Logger.Warnf("Failed to handle decode ICE from %s: %v", addr.String(), err) continue } attr, stunAttrErr := msg.Get(stun.AttrUsername) if stunAttrErr != nil { m.params.Logger.Warnf("No Username attribute in STUN message from %s", addr.String()) continue } ufrag := strings.Split(string(attr), ":")[0] isIPv6 := udpAddr.IP.To4() == nil m.mu.Lock() destinationConn, _ = m.getConn(ufrag, isIPv6) m.mu.Unlock() } if destinationConn == nil { m.params.Logger.Tracef("dropping packet from %s, addr: %s", udpAddr.String(), addr.String()) continue } if err = destinationConn.writePacket(buf[:n], udpAddr); err != nil { m.params.Logger.Errorf("could not write packet: %v", err) } } } func (m *UDPMuxDefault) getConn(ufrag string, isIPv6 bool) (val *udpMuxedConn, ok bool) { if isIPv6 { val, ok = m.connsIPv6[ufrag] } else { val, ok = m.connsIPv4[ufrag] } return } type bufferHolder struct { buf []byte } func newBufferHolder(size int) *bufferHolder { return &bufferHolder{ buf: make([]byte, size), } } ice-2.3.1/udp_mux_multi.go000066400000000000000000000137131437620344400155130ustar00rootroot00000000000000// Package ice ... // //nolint:dupl package ice import ( "fmt" "net" "github.com/pion/logging" "github.com/pion/transport/v2" "github.com/pion/transport/v2/stdnet" ) // MultiUDPMuxDefault implements both UDPMux and AllConnsGetter, // allowing users to pass multiple UDPMux instances to the ICE agent // configuration. type MultiUDPMuxDefault struct { muxes []UDPMux localAddrToMux map[string]UDPMux } // NewMultiUDPMuxDefault creates an instance of MultiUDPMuxDefault that // uses the provided UDPMux instances. func NewMultiUDPMuxDefault(muxes ...UDPMux) *MultiUDPMuxDefault { addrToMux := make(map[string]UDPMux) for _, mux := range muxes { for _, addr := range mux.GetListenAddresses() { addrToMux[addr.String()] = mux } } return &MultiUDPMuxDefault{ muxes: muxes, localAddrToMux: addrToMux, } } // GetConn returns a PacketConn given the connection's ufrag and network // creates the connection if an existing one can't be found. func (m *MultiUDPMuxDefault) GetConn(ufrag string, addr net.Addr) (net.PacketConn, error) { mux, ok := m.localAddrToMux[addr.String()] if !ok { return nil, errNoUDPMuxAvailable } return mux.GetConn(ufrag, addr) } // RemoveConnByUfrag stops and removes the muxed packet connection // from all underlying UDPMux instances. func (m *MultiUDPMuxDefault) RemoveConnByUfrag(ufrag string) { for _, mux := range m.muxes { mux.RemoveConnByUfrag(ufrag) } } // Close the multi mux, no further connections could be created func (m *MultiUDPMuxDefault) Close() error { var err error for _, mux := range m.muxes { if e := mux.Close(); e != nil { err = e } } return err } // GetListenAddresses returns the list of addresses that this mux is listening on func (m *MultiUDPMuxDefault) GetListenAddresses() []net.Addr { addrs := make([]net.Addr, 0, len(m.localAddrToMux)) for _, mux := range m.muxes { addrs = append(addrs, mux.GetListenAddresses()...) } return addrs } // NewMultiUDPMuxFromPort creates an instance of MultiUDPMuxDefault that // listen all interfaces on the provided port. func NewMultiUDPMuxFromPort(port int, opts ...UDPMuxFromPortOption) (*MultiUDPMuxDefault, error) { params := multiUDPMuxFromPortParam{ networks: []NetworkType{NetworkTypeUDP4, NetworkTypeUDP6}, } for _, opt := range opts { opt.apply(¶ms) } if params.net == nil { var err error if params.net, err = stdnet.NewNet(); err != nil { return nil, fmt.Errorf("failed to get create network: %w", err) } } ips, err := localInterfaces(params.net, params.ifFilter, params.ipFilter, params.networks, params.includeLoopback) if err != nil { return nil, err } conns := make([]net.PacketConn, 0, len(ips)) for _, ip := range ips { conn, listenErr := params.net.ListenUDP("udp", &net.UDPAddr{IP: ip, Port: port}) if listenErr != nil { err = listenErr break } if params.readBufferSize > 0 { _ = conn.SetReadBuffer(params.readBufferSize) } if params.writeBufferSize > 0 { _ = conn.SetWriteBuffer(params.writeBufferSize) } conns = append(conns, conn) } if err != nil { for _, conn := range conns { _ = conn.Close() } return nil, err } muxes := make([]UDPMux, 0, len(conns)) for _, conn := range conns { mux := NewUDPMuxDefault(UDPMuxParams{ Logger: params.logger, UDPConn: conn, Net: params.net, }) muxes = append(muxes, mux) } return NewMultiUDPMuxDefault(muxes...), nil } // UDPMuxFromPortOption provide options for NewMultiUDPMuxFromPort type UDPMuxFromPortOption interface { apply(*multiUDPMuxFromPortParam) } type multiUDPMuxFromPortParam struct { ifFilter func(string) bool ipFilter func(ip net.IP) bool networks []NetworkType readBufferSize int writeBufferSize int logger logging.LeveledLogger includeLoopback bool net transport.Net } type udpMuxFromPortOption struct { f func(*multiUDPMuxFromPortParam) } func (o *udpMuxFromPortOption) apply(p *multiUDPMuxFromPortParam) { o.f(p) } // UDPMuxFromPortWithInterfaceFilter set the filter to filter out interfaces that should not be used func UDPMuxFromPortWithInterfaceFilter(f func(string) bool) UDPMuxFromPortOption { return &udpMuxFromPortOption{ f: func(p *multiUDPMuxFromPortParam) { p.ifFilter = f }, } } // UDPMuxFromPortWithIPFilter set the filter to filter out IP addresses that should not be used func UDPMuxFromPortWithIPFilter(f func(ip net.IP) bool) UDPMuxFromPortOption { return &udpMuxFromPortOption{ f: func(p *multiUDPMuxFromPortParam) { p.ipFilter = f }, } } // UDPMuxFromPortWithNetworks set the networks that should be used. default is both IPv4 and IPv6 func UDPMuxFromPortWithNetworks(networks ...NetworkType) UDPMuxFromPortOption { return &udpMuxFromPortOption{ f: func(p *multiUDPMuxFromPortParam) { p.networks = networks }, } } // UDPMuxFromPortWithReadBufferSize set the UDP connection read buffer size func UDPMuxFromPortWithReadBufferSize(size int) UDPMuxFromPortOption { return &udpMuxFromPortOption{ f: func(p *multiUDPMuxFromPortParam) { p.readBufferSize = size }, } } // UDPMuxFromPortWithWriteBufferSize set the UDP connection write buffer size func UDPMuxFromPortWithWriteBufferSize(size int) UDPMuxFromPortOption { return &udpMuxFromPortOption{ f: func(p *multiUDPMuxFromPortParam) { p.writeBufferSize = size }, } } // UDPMuxFromPortWithLogger set the logger for the created UDPMux func UDPMuxFromPortWithLogger(logger logging.LeveledLogger) UDPMuxFromPortOption { return &udpMuxFromPortOption{ f: func(p *multiUDPMuxFromPortParam) { p.logger = logger }, } } // UDPMuxFromPortWithLoopback set loopback interface should be included func UDPMuxFromPortWithLoopback() UDPMuxFromPortOption { return &udpMuxFromPortOption{ f: func(p *multiUDPMuxFromPortParam) { p.includeLoopback = true }, } } // UDPMuxFromPortWithNet sets the network transport to use. func UDPMuxFromPortWithNet(n transport.Net) UDPMuxFromPortOption { return &udpMuxFromPortOption{ f: func(p *multiUDPMuxFromPortParam) { p.net = n }, } } ice-2.3.1/udp_mux_multi_test.go000066400000000000000000000072321437620344400165510ustar00rootroot00000000000000//go:build !js // +build !js package ice import ( "net" "strings" "sync" "testing" "time" "github.com/pion/transport/v2/test" "github.com/stretchr/testify/require" ) func TestMultiUDPMux(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() conn1, err := net.ListenUDP(udp, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}) require.NoError(t, err) conn2, err := net.ListenUDP(udp, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}) require.NoError(t, err) conn3, err := net.ListenUDP(udp, &net.UDPAddr{IP: net.IPv6loopback}) if err != nil { // ipv6 is not supported on this machine t.Log("ipv6 is not supported on this machine") } muxes := []UDPMux{} muxV41 := NewUDPMuxDefault(UDPMuxParams{UDPConn: conn1}) muxes = append(muxes, muxV41) muxV42 := NewUDPMuxDefault(UDPMuxParams{UDPConn: conn2}) muxes = append(muxes, muxV42) if conn3 != nil { muxV6 := NewUDPMuxDefault(UDPMuxParams{UDPConn: conn3}) muxes = append(muxes, muxV6) } udpMuxMulti := NewMultiUDPMuxDefault(muxes...) defer func() { _ = udpMuxMulti.Close() _ = conn1.Close() _ = conn2.Close() }() wg := sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() testMultiUDPMuxConnections(t, udpMuxMulti, "ufrag1", udp) }() wg.Add(1) go func() { defer wg.Done() testMultiUDPMuxConnections(t, udpMuxMulti, "ufrag2", udp4) }() // skip ipv6 test on i386 const ptrSize = 32 << (^uintptr(0) >> 63) if ptrSize != 32 { testMultiUDPMuxConnections(t, udpMuxMulti, "ufrag3", udp6) } wg.Wait() require.NoError(t, udpMuxMulti.Close()) // can't create more connections _, err = udpMuxMulti.GetConn("failufrag", conn1.LocalAddr()) require.Error(t, err) } func testMultiUDPMuxConnections(t *testing.T, udpMuxMulti *MultiUDPMuxDefault, ufrag string, network string) { addrs := udpMuxMulti.GetListenAddresses() pktConns := make([]net.PacketConn, 0, len(addrs)) for _, addr := range addrs { udpAddr, ok := addr.(*net.UDPAddr) require.True(t, ok) if network == udp4 && udpAddr.IP.To4() == nil { continue } else if network == udp6 && udpAddr.IP.To4() != nil { continue } c, err := udpMuxMulti.GetConn(ufrag, addr) require.NoError(t, err, "error retrieving muxed connection for ufrag") pktConns = append(pktConns, c) } defer func() { for _, c := range pktConns { _ = c.Close() } }() // Try talking with each PacketConn for _, pktConn := range pktConns { remoteConn, err := net.DialUDP(network, nil, pktConn.LocalAddr().(*net.UDPAddr)) require.NoError(t, err, "error dialing test udp connection") testMuxConnectionPair(t, pktConn, remoteConn, ufrag) } } func TestUnspecifiedUDPMux(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() muxPort := 7778 udpMuxMulti, err := NewMultiUDPMuxFromPort(muxPort, UDPMuxFromPortWithInterfaceFilter(func(s string) bool { defaultDockerBridgeNetwork := strings.Contains(s, "docker") customDockerBridgeNetwork := strings.Contains(s, "br-") return !defaultDockerBridgeNetwork && !customDockerBridgeNetwork })) require.NoError(t, err) require.GreaterOrEqual(t, len(udpMuxMulti.muxes), 1, "at least have 1 muxes") defer func() { _ = udpMuxMulti.Close() }() wg := sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() testMultiUDPMuxConnections(t, udpMuxMulti, "ufrag1", udp) }() wg.Add(1) go func() { defer wg.Done() testMultiUDPMuxConnections(t, udpMuxMulti, "ufrag2", udp4) }() // skip ipv6 test on i386 const ptrSize = 32 << (^uintptr(0) >> 63) if ptrSize != 32 { testMultiUDPMuxConnections(t, udpMuxMulti, "ufrag3", udp6) } wg.Wait() require.NoError(t, udpMuxMulti.Close()) } ice-2.3.1/udp_mux_test.go000066400000000000000000000164401437620344400153400ustar00rootroot00000000000000//go:build !js // +build !js package ice //nolint:gosec import ( "crypto/rand" "crypto/sha1" "encoding/binary" "net" "sync" "testing" "time" "github.com/pion/stun" "github.com/pion/transport/v2/test" "github.com/stretchr/testify/require" ) func TestUDPMux(t *testing.T) { report := test.CheckRoutines(t) defer report() lim := test.TimeOut(time.Second * 30) defer lim.Stop() conn4, err := net.ListenUDP(udp, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}) require.NoError(t, err) conn6, err := net.ListenUDP(udp, &net.UDPAddr{IP: net.IPv6loopback}) if err != nil { t.Log("IPv6 is not supported on this machine") } connUnspecified, err := net.ListenUDP(udp, nil) require.NoError(t, err) conn4Unspecified, err := net.ListenUDP(udp, &net.UDPAddr{IP: net.IPv4zero}) require.NoError(t, err) conn6Unspecified, err := net.ListenUDP(udp, &net.UDPAddr{IP: net.IPv6unspecified}) if err != nil { t.Log("IPv6 is not supported on this machine") } type testCase struct { name string conn net.PacketConn network string } for _, subTest := range []testCase{ {name: "IPv4loopback", conn: conn4, network: udp4}, {name: "IPv6loopback", conn: conn6, network: udp6}, {name: "Unspecified", conn: connUnspecified, network: udp}, {name: "IPv4Unspecified", conn: conn4Unspecified, network: udp4}, {name: "IPv6Unspecified", conn: conn6Unspecified, network: udp6}, } { network, conn := subTest.network, subTest.conn if udpConn, ok := conn.(*net.UDPConn); !ok || udpConn == nil { continue } t.Run(subTest.name, func(t *testing.T) { udpMux := NewUDPMuxDefault(UDPMuxParams{ Logger: nil, UDPConn: conn, }) defer func() { _ = udpMux.Close() _ = conn.Close() }() require.NotNil(t, udpMux.LocalAddr(), "udpMux.LocalAddr() is nil") wg := sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() testMuxConnection(t, udpMux, "ufrag1", udp) }() const ptrSize = 32 << (^uintptr(0) >> 63) if network == udp { wg.Add(1) go func() { defer wg.Done() testMuxConnection(t, udpMux, "ufrag2", udp4) }() // skip ipv6 test on i386 if ptrSize != 32 { testMuxConnection(t, udpMux, "ufrag3", udp6) } } else if ptrSize != 32 || network != udp6 { testMuxConnection(t, udpMux, "ufrag2", network) } wg.Wait() require.NoError(t, udpMux.Close()) // can't create more connections _, err = udpMux.GetConn("failufrag", udpMux.LocalAddr()) require.Error(t, err) }) } } func TestAddressEncoding(t *testing.T) { cases := []struct { name string addr net.UDPAddr }{ { name: "empty address", }, { name: "ipv4", addr: net.UDPAddr{ IP: net.IPv4(244, 120, 0, 5), Port: 6000, Zone: "", }, }, { name: "ipv6", addr: net.UDPAddr{ IP: net.IPv6loopback, Port: 2500, Zone: "zone", }, }, } for _, c := range cases { addr := c.addr t.Run(c.name, func(t *testing.T) { buf := make([]byte, maxAddrSize) n, err := encodeUDPAddr(&addr, buf) require.NoError(t, err) parsedAddr, err := decodeUDPAddr(buf[:n]) require.NoError(t, err) require.EqualValues(t, &addr, parsedAddr) }) } } func testMuxConnection(t *testing.T, udpMux *UDPMuxDefault, ufrag string, network string) { pktConn, err := udpMux.GetConn(ufrag, udpMux.LocalAddr()) require.NoError(t, err, "error retrieving muxed connection for ufrag") defer func() { _ = pktConn.Close() }() addr, ok := pktConn.LocalAddr().(*net.UDPAddr) require.True(t, ok, "pktConn.LocalAddr() is not a net.UDPAddr") if addr.IP.IsUnspecified() { addr = &net.UDPAddr{Port: addr.Port} } remoteConn, err := net.DialUDP(network, nil, addr) require.NoError(t, err, "error dialing test udp connection") testMuxConnectionPair(t, pktConn, remoteConn, ufrag) } func testMuxConnectionPair(t *testing.T, pktConn net.PacketConn, remoteConn *net.UDPConn, ufrag string) { // initial messages are dropped _, err := remoteConn.Write([]byte("dropped bytes")) require.NoError(t, err) // wait for packet to be consumed time.Sleep(time.Millisecond) // write out to establish connection msg := stun.New() msg.Type = stun.MessageType{Method: stun.MethodBinding, Class: stun.ClassRequest} msg.Add(stun.AttrUsername, []byte(ufrag+":otherufrag")) msg.Encode() _, err = pktConn.WriteTo(msg.Raw, remoteConn.LocalAddr()) require.NoError(t, err) // ensure received buf := make([]byte, receiveMTU) n, err := remoteConn.Read(buf) require.NoError(t, err) require.Equal(t, msg.Raw, buf[:n]) // start writing packets through mux targetSize := 1 * 1024 * 1024 readDone := make(chan struct{}, 1) remoteReadDone := make(chan struct{}, 1) // read packets from the muxed side go func() { defer func() { t.Logf("closing read chan for: %s", ufrag) close(readDone) }() readBuf := make([]byte, receiveMTU) nextSeq := uint32(0) for read := 0; read < targetSize; { n, _, err := pktConn.ReadFrom(readBuf) require.NoError(t, err) require.Equal(t, receiveMTU, n) verifyPacket(t, readBuf[:n], nextSeq) // write it back to sender _, err = pktConn.WriteTo(readBuf[:n], remoteConn.LocalAddr()) require.NoError(t, err) read += n nextSeq++ } }() go func() { defer func() { close(remoteReadDone) }() readBuf := make([]byte, receiveMTU) nextSeq := uint32(0) for read := 0; read < targetSize; { n, _, err := remoteConn.ReadFrom(readBuf) require.NoError(t, err) require.Equal(t, receiveMTU, n) verifyPacket(t, readBuf[:n], nextSeq) read += n nextSeq++ } }() sequence := 0 for written := 0; written < targetSize; { buf := make([]byte, receiveMTU) // byte0-4: sequence // bytes4-24: sha1 checksum // bytes24-mtu: random data _, err := rand.Read(buf[24:]) require.NoError(t, err) h := sha1.Sum(buf[24:]) //nolint:gosec copy(buf[4:24], h[:]) binary.LittleEndian.PutUint32(buf[0:4], uint32(sequence)) _, err = remoteConn.Write(buf) require.NoError(t, err) written += len(buf) sequence++ time.Sleep(time.Millisecond) } <-readDone <-remoteReadDone } func verifyPacket(t *testing.T, b []byte, nextSeq uint32) { readSeq := binary.LittleEndian.Uint32(b[0:4]) require.Equal(t, nextSeq, readSeq) h := sha1.Sum(b[24:]) //nolint:gosec require.Equal(t, h[:], b[4:24]) } func TestUDPMux_Agent_Restart(t *testing.T) { oneSecond := time.Second connA, connB := pipe(&AgentConfig{ DisconnectedTimeout: &oneSecond, FailedTimeout: &oneSecond, }) aNotifier, aConnected := onConnected() require.NoError(t, connA.agent.OnConnectionStateChange(aNotifier)) bNotifier, bConnected := onConnected() require.NoError(t, connB.agent.OnConnectionStateChange(bNotifier)) // Maintain Credentials across restarts ufragA, pwdA, err := connA.agent.GetLocalUserCredentials() require.NoError(t, err) ufragB, pwdB, err := connB.agent.GetLocalUserCredentials() require.NoError(t, err) require.NoError(t, err) // Restart and Re-Signal require.NoError(t, connA.agent.Restart(ufragA, pwdA)) require.NoError(t, connB.agent.Restart(ufragB, pwdB)) require.NoError(t, connA.agent.SetRemoteCredentials(ufragB, pwdB)) require.NoError(t, connB.agent.SetRemoteCredentials(ufragA, pwdA)) gatherAndExchangeCandidates(connA.agent, connB.agent) // Wait until both have gone back to connected <-aConnected <-bConnected require.NoError(t, connA.agent.Close()) require.NoError(t, connB.agent.Close()) } ice-2.3.1/udp_mux_universal.go000066400000000000000000000200441437620344400163640ustar00rootroot00000000000000package ice import ( "fmt" "net" "time" "github.com/pion/logging" "github.com/pion/stun" "github.com/pion/transport/v2" ) // UniversalUDPMux allows multiple connections to go over a single UDP port for // host, server reflexive and relayed candidates. // Actual connection muxing is happening in the UDPMux. type UniversalUDPMux interface { UDPMux GetXORMappedAddr(stunAddr net.Addr, deadline time.Duration) (*stun.XORMappedAddress, error) GetRelayedAddr(turnAddr net.Addr, deadline time.Duration) (*net.Addr, error) GetConnForURL(ufrag string, url string, addr net.Addr) (net.PacketConn, error) } // UniversalUDPMuxDefault handles STUN and TURN servers packets by wrapping the original UDPConn overriding ReadFrom. // It the passes packets to the UDPMux that does the actual connection muxing. type UniversalUDPMuxDefault struct { *UDPMuxDefault params UniversalUDPMuxParams // since we have a shared socket, for srflx candidates it makes sense to have a shared mapped address across all the agents // stun.XORMappedAddress indexed by the STUN server addr xorMappedMap map[string]*xorMapped } // UniversalUDPMuxParams are parameters for UniversalUDPMux server reflexive. type UniversalUDPMuxParams struct { Logger logging.LeveledLogger UDPConn net.PacketConn XORMappedAddrCacheTTL time.Duration Net transport.Net } // NewUniversalUDPMuxDefault creates an implementation of UniversalUDPMux embedding UDPMux func NewUniversalUDPMuxDefault(params UniversalUDPMuxParams) *UniversalUDPMuxDefault { if params.Logger == nil { params.Logger = logging.NewDefaultLoggerFactory().NewLogger("ice") } if params.XORMappedAddrCacheTTL == 0 { params.XORMappedAddrCacheTTL = time.Second * 25 } m := &UniversalUDPMuxDefault{ params: params, xorMappedMap: make(map[string]*xorMapped), } // wrap UDP connection, process server reflexive messages // before they are passed to the UDPMux connection handler (connWorker) m.params.UDPConn = &udpConn{ PacketConn: params.UDPConn, mux: m, logger: params.Logger, } // embed UDPMux udpMuxParams := UDPMuxParams{ Logger: params.Logger, UDPConn: m.params.UDPConn, Net: m.params.Net, } m.UDPMuxDefault = NewUDPMuxDefault(udpMuxParams) return m } // udpConn is a wrapper around UDPMux conn that overrides ReadFrom and handles STUN/TURN packets type udpConn struct { net.PacketConn mux *UniversalUDPMuxDefault logger logging.LeveledLogger } // GetRelayedAddr creates relayed connection to the given TURN service and returns the relayed addr. // Not implemented yet. func (m *UniversalUDPMuxDefault) GetRelayedAddr(turnAddr net.Addr, deadline time.Duration) (*net.Addr, error) { return nil, errNotImplemented } // GetConnForURL add uniques to the muxed connection by concatenating ufrag and URL (e.g. STUN URL) to be able to support multiple STUN/TURN servers // and return a unique connection per server. func (m *UniversalUDPMuxDefault) GetConnForURL(ufrag string, url string, addr net.Addr) (net.PacketConn, error) { return m.UDPMuxDefault.GetConn(fmt.Sprintf("%s%s", ufrag, url), addr) } // ReadFrom is called by UDPMux connWorker and handles packets coming from the STUN server discovering a mapped address. // It passes processed packets further to the UDPMux (maybe this is not really necessary). func (c *udpConn) ReadFrom(p []byte) (n int, addr net.Addr, err error) { n, addr, err = c.PacketConn.ReadFrom(p) if err != nil { return } if stun.IsMessage(p[:n]) { msg := &stun.Message{ Raw: append([]byte{}, p[:n]...), } if err = msg.Decode(); err != nil { c.logger.Warnf("Failed to handle decode ICE from %s: %v", addr.String(), err) err = nil return } udpAddr, ok := addr.(*net.UDPAddr) if !ok { // message about this err will be logged in the UDPMux return } if c.mux.isXORMappedResponse(msg, udpAddr.String()) { err = c.mux.handleXORMappedResponse(udpAddr, msg) if err != nil { c.logger.Debugf("%w: %v", errGetXorMappedAddrResponse, err) err = nil } return } } return n, addr, err } // isXORMappedResponse indicates whether the message is a XORMappedAddress and is coming from the known STUN server. func (m *UniversalUDPMuxDefault) isXORMappedResponse(msg *stun.Message, stunAddr string) bool { m.mu.Lock() defer m.mu.Unlock() // check first if it is a STUN server address because remote peer can also send similar messages but as a BindingSuccess _, ok := m.xorMappedMap[stunAddr] _, err := msg.Get(stun.AttrXORMappedAddress) return err == nil && ok } // handleXORMappedResponse parses response from the STUN server, extracts XORMappedAddress attribute // and set the mapped address for the server func (m *UniversalUDPMuxDefault) handleXORMappedResponse(stunAddr *net.UDPAddr, msg *stun.Message) error { m.mu.Lock() defer m.mu.Unlock() mappedAddr, ok := m.xorMappedMap[stunAddr.String()] if !ok { return errNoXorAddrMapping } var addr stun.XORMappedAddress if err := addr.GetFrom(msg); err != nil { return err } m.xorMappedMap[stunAddr.String()] = mappedAddr mappedAddr.SetAddr(&addr) return nil } // GetXORMappedAddr returns *stun.XORMappedAddress if already present for a given STUN server. // Makes a STUN binding request to discover mapped address otherwise. // Blocks until the stun.XORMappedAddress has been discovered or deadline. // Method is safe for concurrent use. func (m *UniversalUDPMuxDefault) GetXORMappedAddr(serverAddr net.Addr, deadline time.Duration) (*stun.XORMappedAddress, error) { m.mu.Lock() mappedAddr, ok := m.xorMappedMap[serverAddr.String()] // if we already have a mapping for this STUN server (address already received) // and if it is not too old we return it without making a new request to STUN server if ok { if mappedAddr.expired() { mappedAddr.closeWaiters() delete(m.xorMappedMap, serverAddr.String()) ok = false } else if mappedAddr.pending() { ok = false } } m.mu.Unlock() if ok { return mappedAddr.addr, nil } // otherwise, make a STUN request to discover the address // or wait for already sent request to complete waitAddrReceived, err := m.sendStun(serverAddr) if err != nil { return nil, fmt.Errorf("%w: %s", errSendSTUNPacket, err) } // block until response was handled by the connWorker routine and XORMappedAddress was updated select { case <-waitAddrReceived: // when channel closed, addr was obtained m.mu.Lock() mappedAddr := *m.xorMappedMap[serverAddr.String()] m.mu.Unlock() if mappedAddr.addr == nil { return nil, errNoXorAddrMapping } return mappedAddr.addr, nil case <-time.After(deadline): return nil, errXORMappedAddrTimeout } } // sendStun sends a STUN request via UDP conn. // // The returned channel is closed when the STUN response has been received. // Method is safe for concurrent use. func (m *UniversalUDPMuxDefault) sendStun(serverAddr net.Addr) (chan struct{}, error) { m.mu.Lock() defer m.mu.Unlock() // if record present in the map, we already sent a STUN request, // just wait when waitAddrReceived will be closed addrMap, ok := m.xorMappedMap[serverAddr.String()] if !ok { addrMap = &xorMapped{ expiresAt: time.Now().Add(m.params.XORMappedAddrCacheTTL), waitAddrReceived: make(chan struct{}), } m.xorMappedMap[serverAddr.String()] = addrMap } req, err := stun.Build(stun.BindingRequest, stun.TransactionID) if err != nil { return nil, err } if _, err = m.params.UDPConn.WriteTo(req.Raw, serverAddr); err != nil { return nil, err } return addrMap.waitAddrReceived, nil } type xorMapped struct { addr *stun.XORMappedAddress waitAddrReceived chan struct{} expiresAt time.Time } func (a *xorMapped) closeWaiters() { select { case <-a.waitAddrReceived: // notify was close, ok, that means we received duplicate response // just exit break default: // notify tha twe have a new addr close(a.waitAddrReceived) } } func (a *xorMapped) pending() bool { return a.addr == nil } func (a *xorMapped) expired() bool { return a.expiresAt.Before(time.Now()) } func (a *xorMapped) SetAddr(addr *stun.XORMappedAddress) { a.addr = addr a.closeWaiters() } ice-2.3.1/udp_mux_universal_test.go000066400000000000000000000064061437620344400174310ustar00rootroot00000000000000//go:build !js // +build !js package ice import ( "net" "sync" "testing" "time" "github.com/pion/stun" "github.com/stretchr/testify/require" ) func TestUniversalUDPMux(t *testing.T) { conn, err := net.ListenUDP(udp, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}) require.NoError(t, err) udpMux := NewUniversalUDPMuxDefault(UniversalUDPMuxParams{ Logger: nil, UDPConn: conn, }) defer func() { _ = udpMux.Close() _ = conn.Close() }() require.NotNil(t, udpMux.LocalAddr(), "tcpMux.LocalAddr() is nil") wg := sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() testMuxSrflxConnection(t, udpMux, "ufrag4", udp) }() wg.Wait() } func testMuxSrflxConnection(t *testing.T, udpMux *UniversalUDPMuxDefault, ufrag string, network string) { pktConn, err := udpMux.GetConn(ufrag, udpMux.LocalAddr()) require.NoError(t, err, "error retrieving muxed connection for ufrag") defer func() { _ = pktConn.Close() }() remoteConn, err := net.DialUDP(network, nil, &net.UDPAddr{ Port: udpMux.LocalAddr().(*net.UDPAddr).Port, }) require.NoError(t, err, "error dialing test udp connection") defer func() { _ = remoteConn.Close() }() // use small value for TTL to check expiration of the address udpMux.params.XORMappedAddrCacheTTL = time.Millisecond * 20 testXORIP := net.ParseIP("213.141.156.236") testXORPort := 21254 wg := sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() address, e := udpMux.GetXORMappedAddr(remoteConn.LocalAddr(), time.Second) require.NoError(t, e) require.NotNil(t, address) require.True(t, address.IP.Equal(testXORIP)) require.Equal(t, address.Port, testXORPort) }() // wait until GetXORMappedAddr calls sendStun method time.Sleep(time.Millisecond) // check that mapped address filled correctly after sent stun udpMux.mu.Lock() mappedAddr, ok := udpMux.xorMappedMap[remoteConn.LocalAddr().String()] require.True(t, ok) require.NotNil(t, mappedAddr) require.True(t, mappedAddr.pending()) require.False(t, mappedAddr.expired()) udpMux.mu.Unlock() // clean receiver read buffer buf := make([]byte, receiveMTU) _, err = remoteConn.Read(buf) require.NoError(t, err) // write back to udpMux XOR message with address msg := stun.New() msg.Type = stun.MessageType{Method: stun.MethodBinding, Class: stun.ClassRequest} msg.Add(stun.AttrUsername, []byte(ufrag+":otherufrag")) addr := &stun.XORMappedAddress{ IP: testXORIP, Port: testXORPort, } err = addr.AddTo(msg) require.NoError(t, err) msg.Encode() _, err = remoteConn.Write(msg.Raw) require.NoError(t, err) // wait for the packet to be consumed and parsed by udpMux wg.Wait() // we should get address immediately from the cached map address, err := udpMux.GetXORMappedAddr(remoteConn.LocalAddr(), time.Second) require.NoError(t, err) require.NotNil(t, address) udpMux.mu.Lock() // check mappedAddr is not pending, we didn't send stun twice require.False(t, mappedAddr.pending()) // check expiration by TTL time.Sleep(time.Millisecond * 21) require.True(t, mappedAddr.expired()) udpMux.mu.Unlock() // after expire, we send stun request again // but we not receive response in 5 milliseconds and should get error here address, err = udpMux.GetXORMappedAddr(remoteConn.LocalAddr(), time.Millisecond*5) require.NotNil(t, err) require.Nil(t, address) } ice-2.3.1/udp_muxed_conn.go000066400000000000000000000123031437620344400156210ustar00rootroot00000000000000package ice import ( "encoding/binary" "io" "net" "sync" "time" "github.com/pion/logging" "github.com/pion/transport/v2/packetio" ) type udpMuxedConnParams struct { Mux *UDPMuxDefault AddrPool *sync.Pool Key string LocalAddr net.Addr Logger logging.LeveledLogger } // udpMuxedConn represents a logical packet conn for a single remote as identified by ufrag type udpMuxedConn struct { params *udpMuxedConnParams // remote addresses that we have sent to on this conn addresses []string // channel holding incoming packets buf *packetio.Buffer closedChan chan struct{} closeOnce sync.Once mu sync.Mutex } func newUDPMuxedConn(params *udpMuxedConnParams) *udpMuxedConn { p := &udpMuxedConn{ params: params, buf: packetio.NewBuffer(), closedChan: make(chan struct{}), } return p } func (c *udpMuxedConn) ReadFrom(b []byte) (n int, rAddr net.Addr, err error) { buf := c.params.AddrPool.Get().(*bufferHolder) //nolint:forcetypeassert defer c.params.AddrPool.Put(buf) // read address total, err := c.buf.Read(buf.buf) if err != nil { return 0, nil, err } dataLen := int(binary.LittleEndian.Uint16(buf.buf[:2])) if dataLen > total || dataLen > len(b) { return 0, nil, io.ErrShortBuffer } // read data and then address offset := 2 copy(b, buf.buf[offset:offset+dataLen]) offset += dataLen // read address len & decode address addrLen := int(binary.LittleEndian.Uint16(buf.buf[offset : offset+2])) offset += 2 if rAddr, err = decodeUDPAddr(buf.buf[offset : offset+addrLen]); err != nil { return 0, nil, err } return dataLen, rAddr, nil } func (c *udpMuxedConn) WriteTo(buf []byte, rAddr net.Addr) (n int, err error) { if c.isClosed() { return 0, io.ErrClosedPipe } // each time we write to a new address, we'll register it with the mux addr := rAddr.String() if !c.containsAddress(addr) { c.addAddress(addr) } return c.params.Mux.writeTo(buf, rAddr) } func (c *udpMuxedConn) LocalAddr() net.Addr { return c.params.LocalAddr } func (c *udpMuxedConn) SetDeadline(tm time.Time) error { return nil } func (c *udpMuxedConn) SetReadDeadline(tm time.Time) error { return nil } func (c *udpMuxedConn) SetWriteDeadline(tm time.Time) error { return nil } func (c *udpMuxedConn) CloseChannel() <-chan struct{} { return c.closedChan } func (c *udpMuxedConn) Close() error { var err error c.closeOnce.Do(func() { err = c.buf.Close() close(c.closedChan) }) return err } func (c *udpMuxedConn) isClosed() bool { select { case <-c.closedChan: return true default: return false } } func (c *udpMuxedConn) getAddresses() []string { c.mu.Lock() defer c.mu.Unlock() addresses := make([]string, len(c.addresses)) copy(addresses, c.addresses) return addresses } func (c *udpMuxedConn) addAddress(addr string) { c.mu.Lock() c.addresses = append(c.addresses, addr) c.mu.Unlock() // map it on mux c.params.Mux.registerConnForAddress(c, addr) } func (c *udpMuxedConn) removeAddress(addr string) { c.mu.Lock() defer c.mu.Unlock() newAddresses := make([]string, 0, len(c.addresses)) for _, a := range c.addresses { if a != addr { newAddresses = append(newAddresses, a) } } c.addresses = newAddresses } func (c *udpMuxedConn) containsAddress(addr string) bool { c.mu.Lock() defer c.mu.Unlock() for _, a := range c.addresses { if addr == a { return true } } return false } func (c *udpMuxedConn) writePacket(data []byte, addr *net.UDPAddr) error { // write two packets, address and data buf := c.params.AddrPool.Get().(*bufferHolder) //nolint:forcetypeassert defer c.params.AddrPool.Put(buf) // format of buffer | data len | data bytes | addr len | addr bytes | if len(buf.buf) < len(data)+maxAddrSize { return io.ErrShortBuffer } // data len binary.LittleEndian.PutUint16(buf.buf, uint16(len(data))) offset := 2 // data copy(buf.buf[offset:], data) offset += len(data) // write address first, leaving room for its length n, err := encodeUDPAddr(addr, buf.buf[offset+2:]) if err != nil { return err } total := offset + n + 2 // address len binary.LittleEndian.PutUint16(buf.buf[offset:], uint16(n)) if _, err := c.buf.Write(buf.buf[:total]); err != nil { return err } return nil } func encodeUDPAddr(addr *net.UDPAddr, buf []byte) (int, error) { ipData, err := addr.IP.MarshalText() if err != nil { return 0, err } total := 2 + len(ipData) + 2 + len(addr.Zone) if total > len(buf) { return 0, io.ErrShortBuffer } binary.LittleEndian.PutUint16(buf, uint16(len(ipData))) offset := 2 n := copy(buf[offset:], ipData) offset += n binary.LittleEndian.PutUint16(buf[offset:], uint16(addr.Port)) offset += 2 copy(buf[offset:], addr.Zone) return total, nil } func decodeUDPAddr(buf []byte) (*net.UDPAddr, error) { addr := net.UDPAddr{} offset := 0 ipLen := int(binary.LittleEndian.Uint16(buf[:2])) offset += 2 // basic bounds checking if ipLen+offset > len(buf) { return nil, io.ErrShortBuffer } if err := addr.IP.UnmarshalText(buf[offset : offset+ipLen]); err != nil { return nil, err } offset += ipLen addr.Port = int(binary.LittleEndian.Uint16(buf[offset : offset+2])) offset += 2 zone := make([]byte, len(buf[offset:])) copy(zone, buf[offset:]) addr.Zone = string(zone) return &addr, nil } ice-2.3.1/url.go000066400000000000000000000120601437620344400134140ustar00rootroot00000000000000package ice import ( "errors" "net" "net/url" "strconv" ) // SchemeType indicates the type of server used in the ice.URL structure. type SchemeType int // Unknown defines default public constant to use for "enum" like struct // comparisons when no value was defined. const Unknown = iota const ( // SchemeTypeSTUN indicates the URL represents a STUN server. SchemeTypeSTUN SchemeType = iota + 1 // SchemeTypeSTUNS indicates the URL represents a STUNS (secure) server. SchemeTypeSTUNS // SchemeTypeTURN indicates the URL represents a TURN server. SchemeTypeTURN // SchemeTypeTURNS indicates the URL represents a TURNS (secure) server. SchemeTypeTURNS ) // NewSchemeType defines a procedure for creating a new SchemeType from a raw // string naming the scheme type. func NewSchemeType(raw string) SchemeType { switch raw { case "stun": return SchemeTypeSTUN case "stuns": return SchemeTypeSTUNS case "turn": return SchemeTypeTURN case "turns": return SchemeTypeTURNS default: return SchemeType(Unknown) } } func (t SchemeType) String() string { switch t { case SchemeTypeSTUN: return "stun" case SchemeTypeSTUNS: return "stuns" case SchemeTypeTURN: return "turn" case SchemeTypeTURNS: return "turns" default: return ErrUnknownType.Error() } } // ProtoType indicates the transport protocol type that is used in the ice.URL // structure. type ProtoType int const ( // ProtoTypeUDP indicates the URL uses a UDP transport. ProtoTypeUDP ProtoType = iota + 1 // ProtoTypeTCP indicates the URL uses a TCP transport. ProtoTypeTCP ) // NewProtoType defines a procedure for creating a new ProtoType from a raw // string naming the transport protocol type. func NewProtoType(raw string) ProtoType { switch raw { case "udp": return ProtoTypeUDP case "tcp": return ProtoTypeTCP default: return ProtoType(Unknown) } } func (t ProtoType) String() string { switch t { case ProtoTypeUDP: return "udp" case ProtoTypeTCP: return "tcp" default: return ErrUnknownType.Error() } } // URL represents a STUN (rfc7064) or TURN (rfc7065) URL type URL struct { Scheme SchemeType Host string Port int Username string Password string Proto ProtoType } // ParseURL parses a STUN or TURN urls following the ABNF syntax described in // https://tools.ietf.org/html/rfc7064 and https://tools.ietf.org/html/rfc7065 // respectively. func ParseURL(raw string) (*URL, error) { //nolint:gocognit rawParts, err := url.Parse(raw) if err != nil { return nil, err } var u URL u.Scheme = NewSchemeType(rawParts.Scheme) if u.Scheme == SchemeType(Unknown) { return nil, ErrSchemeType } var rawPort string if u.Host, rawPort, err = net.SplitHostPort(rawParts.Opaque); err != nil { var e *net.AddrError if errors.As(err, &e) { if e.Err == "missing port in address" { nextRawURL := u.Scheme.String() + ":" + rawParts.Opaque switch { case u.Scheme == SchemeTypeSTUN || u.Scheme == SchemeTypeTURN: nextRawURL += ":3478" if rawParts.RawQuery != "" { nextRawURL += "?" + rawParts.RawQuery } return ParseURL(nextRawURL) case u.Scheme == SchemeTypeSTUNS || u.Scheme == SchemeTypeTURNS: nextRawURL += ":5349" if rawParts.RawQuery != "" { nextRawURL += "?" + rawParts.RawQuery } return ParseURL(nextRawURL) } } } return nil, err } if u.Host == "" { return nil, ErrHost } if u.Port, err = strconv.Atoi(rawPort); err != nil { return nil, ErrPort } switch u.Scheme { case SchemeTypeSTUN: qArgs, err := url.ParseQuery(rawParts.RawQuery) if err != nil || len(qArgs) > 0 { return nil, ErrSTUNQuery } u.Proto = ProtoTypeUDP case SchemeTypeSTUNS: qArgs, err := url.ParseQuery(rawParts.RawQuery) if err != nil || len(qArgs) > 0 { return nil, ErrSTUNQuery } u.Proto = ProtoTypeTCP case SchemeTypeTURN: proto, err := parseProto(rawParts.RawQuery) if err != nil { return nil, err } u.Proto = proto if u.Proto == ProtoType(Unknown) { u.Proto = ProtoTypeUDP } case SchemeTypeTURNS: proto, err := parseProto(rawParts.RawQuery) if err != nil { return nil, err } u.Proto = proto if u.Proto == ProtoType(Unknown) { u.Proto = ProtoTypeTCP } } return &u, nil } func parseProto(raw string) (ProtoType, error) { qArgs, err := url.ParseQuery(raw) if err != nil || len(qArgs) > 1 { return ProtoType(Unknown), ErrInvalidQuery } var proto ProtoType if rawProto := qArgs.Get("transport"); rawProto != "" { if proto = NewProtoType(rawProto); proto == ProtoType(0) { return ProtoType(Unknown), ErrProtoType } return proto, nil } if len(qArgs) > 0 { return ProtoType(Unknown), ErrInvalidQuery } return proto, nil } func (u URL) String() string { rawURL := u.Scheme.String() + ":" + net.JoinHostPort(u.Host, strconv.Itoa(u.Port)) if u.Scheme == SchemeTypeTURN || u.Scheme == SchemeTypeTURNS { rawURL += "?transport=" + u.Proto.String() } return rawURL } // IsSecure returns whether the this URL's scheme describes secure scheme or not. func (u URL) IsSecure() bool { return u.Scheme == SchemeTypeSTUNS || u.Scheme == SchemeTypeTURNS } ice-2.3.1/url_test.go000066400000000000000000000060371437620344400144620ustar00rootroot00000000000000package ice import ( "errors" "fmt" "net" "net/url" "testing" "github.com/stretchr/testify/assert" ) func TestParseURL(t *testing.T) { t.Run("Success", func(t *testing.T) { testCases := []struct { rawURL string expectedURLString string expectedScheme SchemeType expectedSecure bool expectedHost string expectedPort int expectedProto ProtoType }{ {"stun:google.de", "stun:google.de:3478", SchemeTypeSTUN, false, "google.de", 3478, ProtoTypeUDP}, {"stun:google.de:1234", "stun:google.de:1234", SchemeTypeSTUN, false, "google.de", 1234, ProtoTypeUDP}, {"stuns:google.de", "stuns:google.de:5349", SchemeTypeSTUNS, true, "google.de", 5349, ProtoTypeTCP}, {"stun:[::1]:123", "stun:[::1]:123", SchemeTypeSTUN, false, "::1", 123, ProtoTypeUDP}, {"turn:google.de", "turn:google.de:3478?transport=udp", SchemeTypeTURN, false, "google.de", 3478, ProtoTypeUDP}, {"turns:google.de", "turns:google.de:5349?transport=tcp", SchemeTypeTURNS, true, "google.de", 5349, ProtoTypeTCP}, {"turn:google.de?transport=udp", "turn:google.de:3478?transport=udp", SchemeTypeTURN, false, "google.de", 3478, ProtoTypeUDP}, {"turns:google.de?transport=tcp", "turns:google.de:5349?transport=tcp", SchemeTypeTURNS, true, "google.de", 5349, ProtoTypeTCP}, } for i, testCase := range testCases { url, err := ParseURL(testCase.rawURL) assert.Nil(t, err, "testCase: %d %v", i, testCase) if err != nil { return } assert.Equal(t, testCase.expectedScheme, url.Scheme, "testCase: %d %v", i, testCase) assert.Equal(t, testCase.expectedURLString, url.String(), "testCase: %d %v", i, testCase) assert.Equal(t, testCase.expectedSecure, url.IsSecure(), "testCase: %d %v", i, testCase) assert.Equal(t, testCase.expectedHost, url.Host, "testCase: %d %v", i, testCase) assert.Equal(t, testCase.expectedPort, url.Port, "testCase: %d %v", i, testCase) assert.Equal(t, testCase.expectedProto, url.Proto, "testCase: %d %v", i, testCase) } }) t.Run("Failure", func(t *testing.T) { testCases := []struct { rawURL string expectedErr error }{ {"", ErrSchemeType}, {":::", errMissingProtocolScheme}, {"stun:[::1]:123:", errTooManyColonsAddr}, {"stun:[::1]:123a", ErrPort}, {"google.de", ErrSchemeType}, {"stun:", ErrHost}, {"stun:google.de:abc", ErrPort}, {"stun:google.de?transport=udp", ErrSTUNQuery}, {"stuns:google.de?transport=udp", ErrSTUNQuery}, {"turn:google.de?trans=udp", ErrInvalidQuery}, {"turns:google.de?trans=udp", ErrInvalidQuery}, {"turns:google.de?transport=udp&another=1", ErrInvalidQuery}, {"turn:google.de?transport=ip", ErrProtoType}, } for i, testCase := range testCases { _, err := ParseURL(testCase.rawURL) var ( urlError *url.Error addrError *net.AddrError ) switch { case errors.As(err, &urlError): err = urlError.Err case errors.As(err, &addrError): err = fmt.Errorf(addrError.Err) //nolint:goerr113 } assert.EqualError(t, err, testCase.expectedErr.Error(), "testCase: %d %v", i, testCase) } }) } ice-2.3.1/usecandidate.go000066400000000000000000000011031437620344400152370ustar00rootroot00000000000000package ice import "github.com/pion/stun" // UseCandidateAttr represents USE-CANDIDATE attribute. type UseCandidateAttr struct{} // AddTo adds USE-CANDIDATE attribute to message. func (UseCandidateAttr) AddTo(m *stun.Message) error { m.Add(stun.AttrUseCandidate, nil) return nil } // IsSet returns true if USE-CANDIDATE attribute is set. func (UseCandidateAttr) IsSet(m *stun.Message) bool { _, err := m.Get(stun.AttrUseCandidate) return err == nil } // UseCandidate is shorthand for UseCandidateAttr. func UseCandidate() UseCandidateAttr { return UseCandidateAttr{} } ice-2.3.1/usecandidate_test.go000066400000000000000000000006641437620344400163110ustar00rootroot00000000000000package ice import ( "testing" "github.com/pion/stun" ) func TestUseCandidateAttr_AddTo(t *testing.T) { m := new(stun.Message) if UseCandidate().IsSet(m) { t.Error("should not be set") } if err := m.Build(stun.BindingRequest, UseCandidate()); err != nil { t.Error(err) } m1 := new(stun.Message) if _, err := m1.Write(m.Raw); err != nil { t.Error(err) } if !UseCandidate().IsSet(m1) { t.Error("should be set") } }