pax_global_header00006660000000000000000000000064146032160040014506gustar00rootroot0000000000000052 comment=b2c6bd320b3846205d7aaa8a59fa4270410213ef nextcloud-spreed-signaling-1.2.4/000077500000000000000000000000001460321600400167505ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/.dockerignore000066400000000000000000000000561460321600400214250ustar00rootroot00000000000000/bin /docker/*/Dockerfile /docker-compose.yml nextcloud-spreed-signaling-1.2.4/.github/000077500000000000000000000000001460321600400203105ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/.github/dependabot.yml000066400000000000000000000013461460321600400231440ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "docker" directory: "/docker/janus" schedule: interval: "daily" - package-ecosystem: "docker" directory: "/docker/proxy" schedule: interval: "daily" - package-ecosystem: "docker" directory: "/docker/server" schedule: interval: "daily" - package-ecosystem: gomod directory: / schedule: interval: daily groups: etcd: patterns: - "go.etcd.io*" - package-ecosystem: "github-actions" directory: "/" schedule: interval: "daily" groups: artifacts: patterns: - "actions/*-artifact" - package-ecosystem: "pip" directory: "/docs" schedule: interval: "daily" nextcloud-spreed-signaling-1.2.4/.github/workflows/000077500000000000000000000000001460321600400223455ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/.github/workflows/check-continentmap.yml000066400000000000000000000010671460321600400266460ustar00rootroot00000000000000name: check-continentmap on: push: branches: [ master ] paths: - '.github/workflows/check-continentmap.yml' - 'scripts/get_continent_map.py' - 'Makefile' pull_request: branches: [ master ] paths: - '.github/workflows/check-continentmap.yml' - 'scripts/get_continent_map.py' - 'Makefile' schedule: - cron: "0 2 * * SUN" permissions: contents: read jobs: check: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Check continentmap run: make check-continentmap nextcloud-spreed-signaling-1.2.4/.github/workflows/codeql-analysis.yml000066400000000000000000000016371460321600400261670ustar00rootroot00000000000000name: "CodeQL" on: push: branches: [ master ] paths: - '.github/workflows/codeql-analysis.yml' - '**.go' - 'go.*' pull_request: branches: [ master ] paths: - '.github/workflows/codeql-analysis.yml' - '**.go' - 'go.*' schedule: - cron: '28 2 * * 5' permissions: contents: read jobs: analyze: name: Analyze runs-on: ubuntu-latest permissions: actions: read contents: read security-events: write strategy: fail-fast: false matrix: language: - go steps: - name: Checkout repository uses: actions/checkout@v4 - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} - name: Autobuild uses: github/codeql-action/autobuild@v3 - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 nextcloud-spreed-signaling-1.2.4/.github/workflows/command-rebase.yml000066400000000000000000000027121460321600400257470ustar00rootroot00000000000000# This workflow is provided via the organization template repository # # https://github.com/nextcloud/.github # https://docs.github.com/en/actions/learn-github-actions/sharing-workflows-with-your-organization name: Rebase command on: issue_comment: types: created permissions: contents: read jobs: rebase: runs-on: ubuntu-latest permissions: contents: none # On pull requests and if the comment starts with `/rebase` if: github.event.issue.pull_request != '' && startsWith(github.event.comment.body, '/rebase') steps: - name: Add reaction on start uses: peter-evans/create-or-update-comment@v4 with: token: ${{ secrets.COMMAND_BOT_PAT }} repository: ${{ github.event.repository.full_name }} comment-id: ${{ github.event.comment.id }} reaction-type: "+1" - name: Checkout the latest code uses: actions/checkout@v4 with: fetch-depth: 0 token: ${{ secrets.COMMAND_BOT_PAT }} - name: Automatic Rebase uses: cirrus-actions/rebase@1.8 env: GITHUB_TOKEN: ${{ secrets.COMMAND_BOT_PAT }} - name: Add reaction on failure uses: peter-evans/create-or-update-comment@v4 if: failure() with: token: ${{ secrets.COMMAND_BOT_PAT }} repository: ${{ github.event.repository.full_name }} comment-id: ${{ github.event.comment.id }} reaction-type: "-1" nextcloud-spreed-signaling-1.2.4/.github/workflows/deploydocker.yml000066400000000000000000000112171460321600400255560ustar00rootroot00000000000000name: Deploy to Docker Hub / GHCR on: pull_request: branches: [ master ] paths: - '.github/workflows/deploydocker.yml' - '**.go' - 'go.*' - 'Makefile' - '*.conf.in' - 'docker/server/*' - 'docker/proxy/*' push: branches: - master tags: - "v*.*.*" permissions: contents: read packages: write jobs: server: runs-on: ubuntu-latest steps: - name: Check Out Repo uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v3 - name: Generate Docker metadata id: meta uses: docker/metadata-action@v5 with: images: | strukturag/nextcloud-spreed-signaling ghcr.io/strukturag/nextcloud-spreed-signaling tags: | type=ref,event=branch type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}} type=sha,prefix= - name: Cache Docker layers uses: actions/cache@v4 with: path: /tmp/.buildx-cache key: ${{ runner.os }}-buildx-${{ github.sha }} restore-keys: | ${{ runner.os }}-buildx- - name: Login to Docker Hub if: github.event_name != 'pull_request' uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_HUB_USERNAME }} password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} - name: Login to GHCR if: github.event_name != 'pull_request' uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - name: Set up Docker Buildx id: buildx uses: docker/setup-buildx-action@v3 - name: Build and push id: docker_build uses: docker/build-push-action@v5 with: context: . file: ./docker/server/Dockerfile platforms: linux/amd64,linux/arm64 push: ${{ github.event_name != 'pull_request' }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=local,src=/tmp/.buildx-cache cache-to: type=local,dest=/tmp/.buildx-cache - name: Image digest run: echo ${{ steps.docker_build.outputs.digest }} proxy: runs-on: ubuntu-latest steps: - name: Check Out Repo uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v3 - name: Generate Docker metadata id: meta uses: docker/metadata-action@v5 with: images: | strukturag/nextcloud-spreed-signaling ghcr.io/strukturag/nextcloud-spreed-signaling labels: | org.opencontainers.image.title=nextcloud-spreed-signaling-proxy org.opencontainers.image.description=Signaling proxy for the standalone signaling server for Nextcloud Talk. flavor: | suffix=-proxy,onlatest=true tags: | type=ref,event=branch type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}} type=sha,prefix= - name: Cache Docker layers uses: actions/cache@v4 with: path: /tmp/.buildx-cache key: ${{ runner.os }}-buildx-${{ github.sha }} restore-keys: | ${{ runner.os }}-buildx- - name: Login to Docker Hub if: github.event_name != 'pull_request' uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_HUB_USERNAME }} password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} - name: Login to GHCR if: github.event_name != 'pull_request' uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - name: Set up Docker Buildx id: buildx uses: docker/setup-buildx-action@v3 - name: Build and push id: docker_build uses: docker/build-push-action@v5 with: context: . file: ./docker/proxy/Dockerfile platforms: linux/amd64,linux/arm64 push: ${{ github.event_name != 'pull_request' }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=local,src=/tmp/.buildx-cache cache-to: type=local,dest=/tmp/.buildx-cache - name: Image digest run: echo ${{ steps.docker_build.outputs.digest }} nextcloud-spreed-signaling-1.2.4/.github/workflows/docker-compose.yml000066400000000000000000000022261460321600400260040ustar00rootroot00000000000000name: docker-compose on: pull_request: branches: [ master ] paths: - '.github/workflows/docker-compose.yml' - '**/docker-compose.yml' - 'docker/server/Dockerfile' push: branches: [ master ] paths: - '.github/workflows/docker-compose.yml' - '**/docker-compose.yml' - 'docker/server/Dockerfile' permissions: contents: read jobs: pull: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Update docker-compose run: | curl -SL https://github.com/docker/compose/releases/download/v2.15.1/docker-compose-linux-x86_64 -o docker-compose chmod a+x docker-compose - name: Pull Docker images run: ./docker-compose -f docker/docker-compose.yml pull build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Update docker-compose run: | curl -SL https://github.com/docker/compose/releases/download/v2.15.1/docker-compose-linux-x86_64 -o docker-compose chmod a+x docker-compose - name: Build Docker images run: ./docker-compose -f docker/docker-compose.yml build nextcloud-spreed-signaling-1.2.4/.github/workflows/docker-janus.yml000066400000000000000000000015271460321600400254620ustar00rootroot00000000000000name: Janus Docker image on: pull_request: branches: [ master ] paths: - '.github/workflows/docker-janus.yml' - 'docker/janus/Dockerfile' push: branches: [ master ] paths: - '.github/workflows/docker-janus.yml' - 'docker/janus/Dockerfile' permissions: contents: read env: TEST_TAG: strukturag/nextcloud-spreed-signaling:janus-test jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Build Docker image uses: docker/build-push-action@v5 with: context: docker/janus load: true tags: ${{ env.TEST_TAG }} - name: Test Docker image run: | docker run --rm ${{ env.TEST_TAG }} /usr/local/bin/janus --version nextcloud-spreed-signaling-1.2.4/.github/workflows/docker.yml000066400000000000000000000025641460321600400243460ustar00rootroot00000000000000name: Docker image on: pull_request: branches: [ master ] paths: - '.github/workflows/docker.yml' - '**.go' - 'go.*' - 'Makefile' - '*.conf.in' - 'docker/server/*' - 'docker/proxy/*' push: branches: [ master ] paths: - '.github/workflows/docker.yml' - '**.go' - 'go.*' - 'Makefile' - '*.conf.in' - 'docker/server/*' - 'docker/proxy/*' permissions: contents: read env: TEST_TAG: strukturag/nextcloud-spreed-signaling:test jobs: server: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Build Docker image uses: docker/build-push-action@v5 with: context: . file: docker/server/Dockerfile platforms: linux/amd64,linux/arm64 proxy: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Build Docker image uses: docker/build-push-action@v5 with: context: . file: docker/proxy/Dockerfile platforms: linux/amd64,linux/arm64 nextcloud-spreed-signaling-1.2.4/.github/workflows/licensecheck.yml000066400000000000000000000016601460321600400255130ustar00rootroot00000000000000name: licensecheck on: push: branches: [ master ] paths: - '.github/workflows/licensecheck.yml' - '**.go' pull_request: branches: [ master ] paths: - '.github/workflows/licensecheck.yml' - '**.go' permissions: contents: read jobs: golang: name: golang runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Install licensecheck run: | sudo apt-get -y update sudo apt-get -y install licensecheck - id: licensecheck name: Check licenses run: | { echo 'CHECK_RESULT<> "$GITHUB_ENV" - name: Check for missing licenses run: | MISSING=$(echo "$CHECK_RESULT" | grep UNKNOWN || true) if [ -n "$MISSING" ]; then \ echo "$MISSING"; \ exit 1; \ fi nextcloud-spreed-signaling-1.2.4/.github/workflows/lint.yml000066400000000000000000000025231460321600400240400ustar00rootroot00000000000000name: lint on: push: branches: [ master ] paths: - '.github/workflows/lint.yml' - '.golangci.yml' - '**.go' - 'go.*' pull_request: branches: [ master ] paths: - '.github/workflows/lint.yml' - '.golangci.yml' - '**.go' - 'go.*' permissions: contents: read jobs: lint: name: golang runs-on: ubuntu-latest continue-on-error: true steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: go-version: "1.20" - name: Install dependencies run: | sudo apt -y update && sudo apt -y install protobuf-compiler make common - name: lint uses: golangci/golangci-lint-action@v4.0.0 with: version: latest args: --timeout=2m0s skip-cache: true skip-pkg-cache: true skip-build-cache: true dependencies: name: dependencies runs-on: ubuntu-latest continue-on-error: true steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: go-version: "stable" - name: Check minimum supported version of Go run: | go mod tidy -go=1.20 -compat=1.20 - name: Check go.mod / go.sum run: | git add go.* git diff --cached --exit-code go.* nextcloud-spreed-signaling-1.2.4/.github/workflows/tarball.yml000066400000000000000000000036761460321600400245250ustar00rootroot00000000000000name: tarball on: push: branches: [ master ] paths: - '.github/workflows/tarball.yml' - '**.go' - 'go.*' - 'Makefile' pull_request: branches: [ master ] paths: - '.github/workflows/tarball.yml' - '**.go' - 'go.*' - 'Makefile' permissions: contents: read jobs: create: strategy: matrix: go-version: - "1.20" - "1.21" - "1.22" runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: go-version: ${{ matrix.go-version }} - name: Install dependencies run: | sudo apt -y update && sudo apt -y install protobuf-compiler - name: Create tarball run: | echo "Building with $(nproc) threads" make tarball - name: Upload tarball uses: actions/upload-artifact@v4 with: name: tarball-${{ matrix.go-version }} path: nextcloud-spreed-signaling*.tar.gz test: strategy: matrix: go-version: - "1.20" - "1.21" - "1.22" runs-on: ubuntu-latest needs: [create] steps: - uses: actions/setup-go@v5 with: go-version: ${{ matrix.go-version }} - name: Install dependencies run: | sudo apt -y update && sudo apt -y install protobuf-compiler - name: Download tarball uses: actions/download-artifact@v4 with: name: tarball-${{ matrix.go-version }} - name: Extract tarball run: | mkdir -p tmp tar xvf nextcloud-spreed-signaling*.tar.gz --strip-components=1 -C tmp [ -d "tmp/vendor" ] || exit 1 - name: Build env: GOPROXY: off run: | echo "Building with $(nproc) threads" make -C tmp build -j$(nproc) - name: Run tests env: GOPROXY: off USE_DB_IP_GEOIP_DATABASE: "1" run: | make -C tmp test TIMEOUT=120s nextcloud-spreed-signaling-1.2.4/.github/workflows/test.yml000066400000000000000000000035551460321600400240570ustar00rootroot00000000000000name: test on: push: branches: [ master ] paths: - '.github/workflows/test.yml' - '**.go' - 'go.*' - 'Makefile' pull_request: branches: [ master ] paths: - '.github/workflows/test.yml' - '**.go' - 'go.*' - 'Makefile' permissions: contents: read jobs: go: env: MAXMIND_GEOLITE2_LICENSE: ${{ secrets.MAXMIND_GEOLITE2_LICENSE }} USE_DB_IP_GEOIP_DATABASE: "1" strategy: matrix: go-version: - "1.20" - "1.21" - "1.22" runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: go-version: ${{ matrix.go-version }} - name: Install dependencies run: | sudo apt -y update && sudo apt -y install protobuf-compiler - name: Build applications run: | echo "Building with $(nproc) threads" make client -j$(nproc) make proxy -j$(nproc) make server -j$(nproc) - name: Run tests run: | make test TIMEOUT=120s - name: Generate coverage report run: | make cover TIMEOUT=120s echo "GOROOT=$(go env GOROOT)" >> $GITHUB_ENV - name: Convert coverage to lcov uses: jandelgado/gcov2lcov-action@v1.0.9 with: infile: cover.out outfile: cover.lcov - name: Coveralls Parallel uses: coverallsapp/github-action@v2.2.3 env: COVERALLS_FLAG_NAME: run-${{ matrix.go-version }} with: path-to-lcov: cover.lcov github-token: ${{ secrets.github_token }} parallel: true finish: permissions: contents: none needs: go runs-on: ubuntu-latest steps: - name: Coveralls Finished uses: coverallsapp/github-action@v2.2.3 with: github-token: ${{ secrets.github_token }} parallel-finished: true nextcloud-spreed-signaling-1.2.4/.gitignore000066400000000000000000000001451460321600400207400ustar00rootroot00000000000000bin/ vendor/ *_easyjson.go *.pem *.pb.go *.prof *.socket *.tar.gz cover.out proxy.conf server.conf nextcloud-spreed-signaling-1.2.4/.golangci.yml000066400000000000000000000014721460321600400213400ustar00rootroot00000000000000linters: enable: - gofmt - revive linters-settings: revive: ignoreGeneratedHeader: true severity: warning rules: - name: blank-imports - name: context-as-argument - name: context-keys-type - name: dot-imports - name: error-return #- name: error-strings - name: error-naming - name: exported - name: if-return - name: increment-decrement #- name: var-naming - name: var-declaration - name: package-comments - name: range - name: receiver-naming - name: time-naming - name: unexported-return #- name: indent-error-flow - name: errorf - name: empty-block - name: superfluous-else #- name: unused-parameter - name: unreachable-code - name: redefines-builtin-id nextcloud-spreed-signaling-1.2.4/.readthedocs.yaml000066400000000000000000000004501460321600400221760ustar00rootroot00000000000000# .readthedocs.yaml # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details version: 2 build: os: ubuntu-20.04 tools: python: "3.9" mkdocs: configuration: mkdocs.yml python: install: - requirements: docs/requirements.txt nextcloud-spreed-signaling-1.2.4/CHANGELOG.md000066400000000000000000001651561460321600400205770ustar00rootroot00000000000000# Changelog All notable changes to this project will be documented in this file. ## 1.2.4 - 2024-04-03 ### Added - Add metrics for current number of HTTP client connections. [#668](https://github.com/strukturag/nextcloud-spreed-signaling/pull/668) - Support getting GeoIP DB from db-ip.com for tests. [#689](https://github.com/strukturag/nextcloud-spreed-signaling/pull/689) - Use fsnotify to detect file changes [#680](https://github.com/strukturag/nextcloud-spreed-signaling/pull/680) - CI: Check dependencies for minimum supported version. [#692](https://github.com/strukturag/nextcloud-spreed-signaling/pull/692) ### Changed - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.9 to 2.10.10 [#650](https://github.com/strukturag/nextcloud-spreed-signaling/pull/650) - CI: Also test with Golang 1.22 [#651](https://github.com/strukturag/nextcloud-spreed-signaling/pull/651) - build(deps): Bump the etcd group with 4 updates [#649](https://github.com/strukturag/nextcloud-spreed-signaling/pull/649) - Improve Makefile [#653](https://github.com/strukturag/nextcloud-spreed-signaling/pull/653) - build(deps): Bump google.golang.org/grpc from 1.61.0 to 1.61.1 [#659](https://github.com/strukturag/nextcloud-spreed-signaling/pull/659) - build(deps): Bump golangci/golangci-lint-action from 3.7.0 to 4.0.0 [#658](https://github.com/strukturag/nextcloud-spreed-signaling/pull/658) - Minor improvements to DNS monitor [#663](https://github.com/strukturag/nextcloud-spreed-signaling/pull/663) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.10 to 2.10.11 [#662](https://github.com/strukturag/nextcloud-spreed-signaling/pull/662) - build(deps): Bump google.golang.org/grpc from 1.61.1 to 1.62.0 [#664](https://github.com/strukturag/nextcloud-spreed-signaling/pull/664) - Support ports in full URLs for DNS monitor. [#667](https://github.com/strukturag/nextcloud-spreed-signaling/pull/667) - Calculate proxy load based on maximum bandwidth. [#670](https://github.com/strukturag/nextcloud-spreed-signaling/pull/670) - build(deps): Bump github.com/nats-io/nats.go from 1.32.0 to 1.33.1 [#661](https://github.com/strukturag/nextcloud-spreed-signaling/pull/661) - build(deps): Bump golang from 1.21-alpine to 1.22-alpine in /docker/server [#655](https://github.com/strukturag/nextcloud-spreed-signaling/pull/655) - build(deps): Bump golang from 1.21-alpine to 1.22-alpine in /docker/proxy [#656](https://github.com/strukturag/nextcloud-spreed-signaling/pull/656) - docker: Update Janus from 0.11.8 to 0.14.1. [#672](https://github.com/strukturag/nextcloud-spreed-signaling/pull/672) - build(deps): Bump alpine from 3.18 to 3.19 in /docker/janus [#613](https://github.com/strukturag/nextcloud-spreed-signaling/pull/613) - Reuse backoff waiting code where possible [#673](https://github.com/strukturag/nextcloud-spreed-signaling/pull/673) - build(deps): Bump github.com/prometheus/client_golang from 1.18.0 to 1.19.0 [#674](https://github.com/strukturag/nextcloud-spreed-signaling/pull/674) - Docker improvements [#675](https://github.com/strukturag/nextcloud-spreed-signaling/pull/675) - make: Don't update dependencies but use pinned versions. [#679](https://github.com/strukturag/nextcloud-spreed-signaling/pull/679) - build(deps): Bump github.com/pion/sdp/v3 from 3.0.6 to 3.0.7 [#678](https://github.com/strukturag/nextcloud-spreed-signaling/pull/678) - build(deps): Bump google.golang.org/grpc from 1.62.0 to 1.62.1 [#677](https://github.com/strukturag/nextcloud-spreed-signaling/pull/677) - build(deps): Bump google.golang.org/protobuf from 1.32.0 to 1.33.0 [#676](https://github.com/strukturag/nextcloud-spreed-signaling/pull/676) - build(deps): Bump github.com/pion/sdp/v3 from 3.0.7 to 3.0.8 [#681](https://github.com/strukturag/nextcloud-spreed-signaling/pull/681) - Update source of continentmap to original CSV file. [#682](https://github.com/strukturag/nextcloud-spreed-signaling/pull/682) - build(deps): Bump markdown from 3.5.2 to 3.6 in /docs [#684](https://github.com/strukturag/nextcloud-spreed-signaling/pull/684) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.11 to 2.10.12 [#683](https://github.com/strukturag/nextcloud-spreed-signaling/pull/683) - build(deps): Bump github.com/pion/sdp/v3 from 3.0.8 to 3.0.9 [#687](https://github.com/strukturag/nextcloud-spreed-signaling/pull/687) - build(deps): Bump the etcd group with 4 updates [#686](https://github.com/strukturag/nextcloud-spreed-signaling/pull/686) - build(deps): Bump github.com/nats-io/nats.go from 1.33.1 to 1.34.0 [#685](https://github.com/strukturag/nextcloud-spreed-signaling/pull/685) - Revert "build(deps): Bump the etcd group with 4 updates" [#691](https://github.com/strukturag/nextcloud-spreed-signaling/pull/691) - CI: Limit when to run Docker build jobs. [#695](https://github.com/strukturag/nextcloud-spreed-signaling/pull/695) - Remove deprecated section on multiple signaling servers from README. [#696](https://github.com/strukturag/nextcloud-spreed-signaling/pull/696) ### Fixed - Fix race condition when accessing "expected" in proxy_config tests. [#652](https://github.com/strukturag/nextcloud-spreed-signaling/pull/652) - Fix deadlock when entry is removed while receiver holds lock in lookup. [#654](https://github.com/strukturag/nextcloud-spreed-signaling/pull/654) - Fix flaky "TestProxyConfigStaticDNS". [#671](https://github.com/strukturag/nextcloud-spreed-signaling/pull/671) - Fix flaky DnsMonitor test. [#690](https://github.com/strukturag/nextcloud-spreed-signaling/pull/690) ## 1.2.3 - 2024-01-31 ### Added - CI: Check license headers. [#627](https://github.com/strukturag/nextcloud-spreed-signaling/pull/627) - Add "welcome" endpoint to proxy. [#644](https://github.com/strukturag/nextcloud-spreed-signaling/pull/644) ### Changed - build(deps): Bump github/codeql-action from 2 to 3 [#619](https://github.com/strukturag/nextcloud-spreed-signaling/pull/619) - build(deps): Bump github.com/google/uuid from 1.4.0 to 1.5.0 [#618](https://github.com/strukturag/nextcloud-spreed-signaling/pull/618) - build(deps): Bump google.golang.org/grpc from 1.59.0 to 1.60.0 [#617](https://github.com/strukturag/nextcloud-spreed-signaling/pull/617) - build(deps): Bump the artifacts group with 2 updates [#622](https://github.com/strukturag/nextcloud-spreed-signaling/pull/622) - build(deps): Bump golang.org/x/crypto from 0.16.0 to 0.17.0 [#623](https://github.com/strukturag/nextcloud-spreed-signaling/pull/623) - build(deps): Bump google.golang.org/grpc from 1.60.0 to 1.60.1 [#624](https://github.com/strukturag/nextcloud-spreed-signaling/pull/624) - Refactor proxy config [#606](https://github.com/strukturag/nextcloud-spreed-signaling/pull/606) - build(deps): Bump google.golang.org/protobuf from 1.31.0 to 1.32.0 [#629](https://github.com/strukturag/nextcloud-spreed-signaling/pull/629) - build(deps): Bump github.com/prometheus/client_golang from 1.17.0 to 1.18.0 [#630](https://github.com/strukturag/nextcloud-spreed-signaling/pull/630) - build(deps): Bump jinja2 from 3.1.2 to 3.1.3 in /docs [#632](https://github.com/strukturag/nextcloud-spreed-signaling/pull/632) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.7 to 2.10.9 [#633](https://github.com/strukturag/nextcloud-spreed-signaling/pull/633) - build(deps): Bump markdown from 3.5.1 to 3.5.2 in /docs [#631](https://github.com/strukturag/nextcloud-spreed-signaling/pull/631) - build(deps): Bump github.com/nats-io/nats.go from 1.31.0 to 1.32.0 [#634](https://github.com/strukturag/nextcloud-spreed-signaling/pull/634) - build(deps): Bump readthedocs-sphinx-search from 0.3.1 to 0.3.2 in /docs [#635](https://github.com/strukturag/nextcloud-spreed-signaling/pull/635) - build(deps): Bump actions/cache from 3 to 4 [#638](https://github.com/strukturag/nextcloud-spreed-signaling/pull/638) - build(deps): Bump github.com/google/uuid from 1.5.0 to 1.6.0 [#643](https://github.com/strukturag/nextcloud-spreed-signaling/pull/643) - build(deps): Bump google.golang.org/grpc from 1.60.1 to 1.61.0 [#645](https://github.com/strukturag/nextcloud-spreed-signaling/pull/645) - build(deps): Bump peter-evans/create-or-update-comment from 3 to 4 [#646](https://github.com/strukturag/nextcloud-spreed-signaling/pull/646) - CI: No longer need to manually cache Go modules. [#648](https://github.com/strukturag/nextcloud-spreed-signaling/pull/648) - CI: Disable cache for linter to bring back annotations. [#647](https://github.com/strukturag/nextcloud-spreed-signaling/pull/647) - Refactor DNS monitoring [#648](https://github.com/strukturag/nextcloud-spreed-signaling/pull/648) ### Fixed - Fix link to NATS install docs [#637](https://github.com/strukturag/nextcloud-spreed-signaling/pull/637) - docker: Always need to set proxy token id / key for server. [#641](https://github.com/strukturag/nextcloud-spreed-signaling/pull/641) ## 1.2.2 - 2023-12-11 ### Added - Include "~docker" in version if built on Docker. [#602](https://github.com/strukturag/nextcloud-spreed-signaling/pull/602) ### Changed - CI: No need to build docker images for testing, done internally. [#603](https://github.com/strukturag/nextcloud-spreed-signaling/pull/603) - build(deps): Bump sphinx-rtd-theme from 1.3.0 to 2.0.0 in /docs [#604](https://github.com/strukturag/nextcloud-spreed-signaling/pull/604) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.5 to 2.10.6 [#605](https://github.com/strukturag/nextcloud-spreed-signaling/pull/605) - build(deps): Bump actions/setup-go from 4 to 5 [#608](https://github.com/strukturag/nextcloud-spreed-signaling/pull/608) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.6 to 2.10.7 [#612](https://github.com/strukturag/nextcloud-spreed-signaling/pull/612) - build(deps): Bump the etcd group with 4 updates [#611](https://github.com/strukturag/nextcloud-spreed-signaling/pull/611) ### Fixed - Skip options from default section when parsing "geoip-overrides". [#609](https://github.com/strukturag/nextcloud-spreed-signaling/pull/609) - Hangup virtual session if it gets disinvited. [#610](https://github.com/strukturag/nextcloud-spreed-signaling/pull/610) ## 1.2.1 - 2023-11-15 ### Added - feat(scripts): Add a script to simplify the logs to make it more easily to trace a user/session [#480](https://github.com/strukturag/nextcloud-spreed-signaling/pull/480) ### Changed - build(deps): Bump markdown from 3.5 to 3.5.1 in /docs [#594](https://github.com/strukturag/nextcloud-spreed-signaling/pull/594) - build(deps): Bump github.com/gorilla/websocket from 1.5.0 to 1.5.1 [#595](https://github.com/strukturag/nextcloud-spreed-signaling/pull/595) - build(deps): Bump github.com/gorilla/securecookie from 1.1.1 to 1.1.2 [#597](https://github.com/strukturag/nextcloud-spreed-signaling/pull/597) - build(deps): Bump github.com/gorilla/mux from 1.8.0 to 1.8.1 [#596](https://github.com/strukturag/nextcloud-spreed-signaling/pull/596) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.4 to 2.10.5 [#599](https://github.com/strukturag/nextcloud-spreed-signaling/pull/599) - Improve support for multiple backends with dialouts [#592](https://github.com/strukturag/nextcloud-spreed-signaling/pull/592) - build(deps): Bump go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc from 0.25.0 to 0.46.0 [#600](https://github.com/strukturag/nextcloud-spreed-signaling/pull/600) ## 1.2.0 - 2023-10-30 ### Added - Use GeoIP overrides if no GeoIP database is configured. [#532](https://github.com/strukturag/nextcloud-spreed-signaling/pull/532) - Log warning if no (static) backends have been configured. [#533](https://github.com/strukturag/nextcloud-spreed-signaling/pull/533) - Fallback to common shared secret if none is set for backends. [#534](https://github.com/strukturag/nextcloud-spreed-signaling/pull/534) - CI: Test with Golang 1.21 [#536](https://github.com/strukturag/nextcloud-spreed-signaling/pull/536) - Return response if session tries to join room again. [#547](https://github.com/strukturag/nextcloud-spreed-signaling/pull/547) - Support TTL for transient data. [#575](https://github.com/strukturag/nextcloud-spreed-signaling/pull/575) - Implement message handler for dialout support. [#563](https://github.com/strukturag/nextcloud-spreed-signaling/pull/563) - No longer support Golang 1.19. [#580](https://github.com/strukturag/nextcloud-spreed-signaling/pull/580) ### Changed - build(deps): Bump google.golang.org/grpc from 1.56.1 to 1.57.0 [#520](https://github.com/strukturag/nextcloud-spreed-signaling/pull/520) - build(deps): Bump coverallsapp/github-action from 2.2.0 to 2.2.1 [#514](https://github.com/strukturag/nextcloud-spreed-signaling/pull/514) - build(deps): Bump github.com/nats-io/nats.go from 1.27.1 to 1.28.0 [#515](https://github.com/strukturag/nextcloud-spreed-signaling/pull/515) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.19 to 2.9.20 [#513](https://github.com/strukturag/nextcloud-spreed-signaling/pull/513) - build(deps): Bump mkdocs from 1.4.3 to 1.5.1 in /docs [#523](https://github.com/strukturag/nextcloud-spreed-signaling/pull/523) - build(deps): Bump markdown from 3.3.7 to 3.4.4 in /docs [#519](https://github.com/strukturag/nextcloud-spreed-signaling/pull/519) - build(deps): Bump mkdocs from 1.5.1 to 1.5.2 in /docs [#525](https://github.com/strukturag/nextcloud-spreed-signaling/pull/525) - build(deps): Bump github.com/oschwald/maxminddb-golang from 1.11.0 to 1.12.0 [#524](https://github.com/strukturag/nextcloud-spreed-signaling/pull/524) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.20 to 2.9.21 [#530](https://github.com/strukturag/nextcloud-spreed-signaling/pull/530) - build(deps): Bump sphinx from 6.2.1 to 7.2.4 in /docs [#542](https://github.com/strukturag/nextcloud-spreed-signaling/pull/542) - build(deps): Bump github.com/google/uuid from 1.3.0 to 1.3.1 [#539](https://github.com/strukturag/nextcloud-spreed-signaling/pull/539) - build(deps): Bump sphinx from 7.2.4 to 7.2.5 in /docs [#544](https://github.com/strukturag/nextcloud-spreed-signaling/pull/544) - build(deps): Bump coverallsapp/github-action from 2.2.1 to 2.2.2 [#546](https://github.com/strukturag/nextcloud-spreed-signaling/pull/546) - build(deps): Bump actions/checkout from 3 to 4 [#545](https://github.com/strukturag/nextcloud-spreed-signaling/pull/545) - build(deps): Bump google.golang.org/grpc from 1.57.0 to 1.58.0 [#549](https://github.com/strukturag/nextcloud-spreed-signaling/pull/549) - build(deps): Bump docker/metadata-action from 4 to 5 [#552](https://github.com/strukturag/nextcloud-spreed-signaling/pull/552) - build(deps): Bump docker/setup-qemu-action from 2 to 3 [#553](https://github.com/strukturag/nextcloud-spreed-signaling/pull/553) - build(deps): Bump docker/login-action from 2 to 3 [#554](https://github.com/strukturag/nextcloud-spreed-signaling/pull/554) - build(deps): Bump docker/setup-buildx-action from 2 to 3 [#555](https://github.com/strukturag/nextcloud-spreed-signaling/pull/555) - build(deps): Bump coverallsapp/github-action from 2.2.2 to 2.2.3 [#551](https://github.com/strukturag/nextcloud-spreed-signaling/pull/551) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.21 to 2.9.22 [#550](https://github.com/strukturag/nextcloud-spreed-signaling/pull/550) - build(deps): Bump docker/build-push-action from 4 to 5 [#557](https://github.com/strukturag/nextcloud-spreed-signaling/pull/557) - build(deps): Bump github.com/nats-io/nats.go from 1.28.0 to 1.29.0 [#558](https://github.com/strukturag/nextcloud-spreed-signaling/pull/558) - build(deps): Bump google.golang.org/grpc from 1.58.0 to 1.58.1 [#559](https://github.com/strukturag/nextcloud-spreed-signaling/pull/559) - build(deps): Bump sphinx from 7.2.5 to 7.2.6 in /docs [#560](https://github.com/strukturag/nextcloud-spreed-signaling/pull/560) - build(deps): Bump mkdocs from 1.5.2 to 1.5.3 in /docs [#561](https://github.com/strukturag/nextcloud-spreed-signaling/pull/561) - build(deps): Bump markdown from 3.4.4 to 3.5 in /docs [#570](https://github.com/strukturag/nextcloud-spreed-signaling/pull/570) - build(deps): Bump google.golang.org/grpc from 1.58.1 to 1.58.3 [#573](https://github.com/strukturag/nextcloud-spreed-signaling/pull/573) - build(deps): Bump github.com/prometheus/client_golang from 1.16.0 to 1.17.0 [#569](https://github.com/strukturag/nextcloud-spreed-signaling/pull/569) - build(deps): Bump golang.org/x/net from 0.12.0 to 0.17.0 [#574](https://github.com/strukturag/nextcloud-spreed-signaling/pull/574) - build(deps): Bump github.com/nats-io/nats.go from 1.29.0 to 1.30.2 [#568](https://github.com/strukturag/nextcloud-spreed-signaling/pull/568) - build(deps): Bump google.golang.org/grpc from 1.58.3 to 1.59.0 [#578](https://github.com/strukturag/nextcloud-spreed-signaling/pull/578) - build(deps): Bump github.com/nats-io/nats.go from 1.30.2 to 1.31.0 [#577](https://github.com/strukturag/nextcloud-spreed-signaling/pull/577) - dependabot: Check for updates in docker files. - build(deps): Bump golang from 1.20-alpine to 1.21-alpine in /docker/proxy [#581](https://github.com/strukturag/nextcloud-spreed-signaling/pull/581) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.22 to 2.10.3 [#576](https://github.com/strukturag/nextcloud-spreed-signaling/pull/576) - build(deps): Bump alpine from 3.14 to 3.18 in /docker/janus [#582](https://github.com/strukturag/nextcloud-spreed-signaling/pull/582) - build(deps): Bump golang from 1.20-alpine to 1.21-alpine in /docker/server [#583](https://github.com/strukturag/nextcloud-spreed-signaling/pull/583) - Improve get-version.sh [#584](https://github.com/strukturag/nextcloud-spreed-signaling/pull/584) -build(deps): Bump go.etcd.io/etcd/client/pkg/v3 from 3.5.9 to 3.5.10 [#588](https://github.com/strukturag/nextcloud-spreed-signaling/pull/588) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.3 to 2.10.4 [#586](https://github.com/strukturag/nextcloud-spreed-signaling/pull/586) - build(deps): Bump github.com/google/uuid from 1.3.1 to 1.4.0 [#585](https://github.com/strukturag/nextcloud-spreed-signaling/pull/585) - dependabot: Group etcd updates. - build(deps): Bump the etcd group with 3 updates [#590](https://github.com/strukturag/nextcloud-spreed-signaling/pull/590) - Switch to atomic types from Go 1.19 [#500](https://github.com/strukturag/nextcloud-spreed-signaling/pull/500) - Move common flags code to own struct. [#591](https://github.com/strukturag/nextcloud-spreed-signaling/pull/591) ## 1.1.3 - 2023-07-05 ### Added - stats: Support configuring subnets for allowed IPs. [#448](https://github.com/strukturag/nextcloud-spreed-signaling/pull/448) - Add common code to handle allowed IPs. [#450](https://github.com/strukturag/nextcloud-spreed-signaling/pull/450) - Add allowall to docker image [#488](https://github.com/strukturag/nextcloud-spreed-signaling/pull/488) - Follow the Go release policy by supporting only the last two versions. This drops support for Golang 1.18. [#499](https://github.com/strukturag/nextcloud-spreed-signaling/pull/499) ### Changed - build(deps): Bump google.golang.org/protobuf from 1.29.0 to 1.29.1 [#446](https://github.com/strukturag/nextcloud-spreed-signaling/pull/446) - build(deps): Bump actions/setup-go from 3 to 4 [#447](https://github.com/strukturag/nextcloud-spreed-signaling/pull/447) - build(deps): Bump google.golang.org/protobuf from 1.29.1 to 1.30.0 [#449](https://github.com/strukturag/nextcloud-spreed-signaling/pull/449) - build(deps): Bump coverallsapp/github-action from 1.2.4 to 2.0.0 [#451](https://github.com/strukturag/nextcloud-spreed-signaling/pull/451) - build(deps): Bump readthedocs-sphinx-search from 0.2.0 to 0.3.1 in /docs [#456](https://github.com/strukturag/nextcloud-spreed-signaling/pull/456) - build(deps): Bump coverallsapp/github-action from 2.0.0 to 2.1.0 [#460](https://github.com/strukturag/nextcloud-spreed-signaling/pull/460) - build(deps): Bump peter-evans/create-or-update-comment from 2 to 3 [#459](https://github.com/strukturag/nextcloud-spreed-signaling/pull/459) - build(deps): Bump sphinx from 6.1.3 to 6.2.1 in /docs [#468](https://github.com/strukturag/nextcloud-spreed-signaling/pull/468) - build(deps): Bump mkdocs from 1.4.2 to 1.4.3 in /docs [#471](https://github.com/strukturag/nextcloud-spreed-signaling/pull/471) - build(deps): Bump sphinx-rtd-theme from 1.2.0 to 1.2.1 in /docs [#479](https://github.com/strukturag/nextcloud-spreed-signaling/pull/479) - build(deps): Bump coverallsapp/github-action from 2.1.0 to 2.1.2 [#466](https://github.com/strukturag/nextcloud-spreed-signaling/pull/466) - build(deps): Bump golangci/golangci-lint-action from 3.4.0 to 3.5.0 [#481](https://github.com/strukturag/nextcloud-spreed-signaling/pull/481) - Simplify vendoring. [#482](https://github.com/strukturag/nextcloud-spreed-signaling/pull/482) - build(deps): Bump sphinx-rtd-theme from 1.2.1 to 1.2.2 in /docs [#485](https://github.com/strukturag/nextcloud-spreed-signaling/pull/485) - build(deps): Bump coverallsapp/github-action from 2.1.2 to 2.2.0 [#484](https://github.com/strukturag/nextcloud-spreed-signaling/pull/484) - build(deps): Bump google.golang.org/grpc from 1.53.0 to 1.55.0 [#472](https://github.com/strukturag/nextcloud-spreed-signaling/pull/472) - build(deps): Bump go.etcd.io/etcd/client/v3 from 3.5.7 to 3.5.9 [#473](https://github.com/strukturag/nextcloud-spreed-signaling/pull/473) - build(deps): Bump github.com/nats-io/nats.go from 1.24.0 to 1.26.0 [#478](https://github.com/strukturag/nextcloud-spreed-signaling/pull/478) - build(deps): Bump golangci/golangci-lint-action from 3.5.0 to 3.6.0 [#492](https://github.com/strukturag/nextcloud-spreed-signaling/pull/492) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.15 to 2.9.17 [#495](https://github.com/strukturag/nextcloud-spreed-signaling/pull/495) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.17 to 2.9.18 [#496](https://github.com/strukturag/nextcloud-spreed-signaling/pull/496) - build(deps): Bump github.com/prometheus/client_golang from 1.14.0 to 1.15.1 [#493](https://github.com/strukturag/nextcloud-spreed-signaling/pull/493) - docker: Don't build concurrently. [#498](https://github.com/strukturag/nextcloud-spreed-signaling/pull/498) - Use "struct{}" channel if only used as signaling mechanism. [#491](https://github.com/strukturag/nextcloud-spreed-signaling/pull/491) - build(deps): Bump google.golang.org/grpc from 1.55.0 to 1.56.0 [#502](https://github.com/strukturag/nextcloud-spreed-signaling/pull/502) - build(deps): Bump github.com/prometheus/client_golang from 1.15.1 to 1.16.0 [#501](https://github.com/strukturag/nextcloud-spreed-signaling/pull/501) - build(deps): Bump github.com/oschwald/maxminddb-golang from 1.10.0 to 1.11.0 [#503](https://github.com/strukturag/nextcloud-spreed-signaling/pull/503) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.18 to 2.9.19 [#504](https://github.com/strukturag/nextcloud-spreed-signaling/pull/504) - build(deps): Bump google.golang.org/grpc from 1.56.0 to 1.56.1 [#505](https://github.com/strukturag/nextcloud-spreed-signaling/pull/505) - build(deps): Bump github.com/nats-io/nats.go from 1.27.0 to 1.27.1 [#506](https://github.com/strukturag/nextcloud-spreed-signaling/pull/506) - build(deps): Bump google.golang.org/protobuf from 1.30.0 to 1.31.0 [#507](https://github.com/strukturag/nextcloud-spreed-signaling/pull/507) ### Fixed - CI: Make sure proxy Docker image is never tagged as "latest". [#445](https://github.com/strukturag/nextcloud-spreed-signaling/pull/445) - Write backends comma-separated to config [#487](https://github.com/strukturag/nextcloud-spreed-signaling/pull/487) - Fix duplicate join events [#490](https://github.com/strukturag/nextcloud-spreed-signaling/pull/490) - Add missing lock for "roomSessionId" to avoid potential races. [#497](https://github.com/strukturag/nextcloud-spreed-signaling/pull/497) ## 1.1.2 - 2023-03-13 ### Added - Allow SKIP_VERIFY in docker image. [#430](https://github.com/strukturag/nextcloud-spreed-signaling/pull/430) ### Changed - Keep Docker images alpine based. [#427](https://github.com/strukturag/nextcloud-spreed-signaling/pull/427) - build(deps): Bump coverallsapp/github-action from 1.1.3 to 1.2.0 [#433](https://github.com/strukturag/nextcloud-spreed-signaling/pull/433) - build(deps): Bump coverallsapp/github-action from 1.2.0 to 1.2.2 [#435](https://github.com/strukturag/nextcloud-spreed-signaling/pull/435) - build(deps): Bump coverallsapp/github-action from 1.2.2 to 1.2.3 [#436](https://github.com/strukturag/nextcloud-spreed-signaling/pull/436) - build(deps): Bump coverallsapp/github-action from 1.2.3 to 1.2.4 [#437](https://github.com/strukturag/nextcloud-spreed-signaling/pull/437) - build(deps): Bump github.com/nats-io/nats.go from 1.23.0 to 1.24.0 [#434](https://github.com/strukturag/nextcloud-spreed-signaling/pull/434) - Run "go mod tidy -compat=1.18". [#440](https://github.com/strukturag/nextcloud-spreed-signaling/pull/440) - CI: Run golangci-lint with Go 1.20 - Update protoc-gen-go-grpc to v1.3.0 [#442](https://github.com/strukturag/nextcloud-spreed-signaling/pull/442) - CI: Stop using deprecated "set-output". [#441](https://github.com/strukturag/nextcloud-spreed-signaling/pull/441) - docker: Don't rely on default values when updating TURN settings. [#439](https://github.com/strukturag/nextcloud-spreed-signaling/pull/439) - build(deps): Bump google.golang.org/protobuf from 1.28.1 to 1.29.0 [#443](https://github.com/strukturag/nextcloud-spreed-signaling/pull/443) ### Fixed - Fix example in docker README. [#429](https://github.com/strukturag/nextcloud-spreed-signaling/pull/429) - TURN_API_KEY and TURN_SECRET fix. [#428](https://github.com/strukturag/nextcloud-spreed-signaling/pull/428) ## 1.1.1 - 2023-02-22 ### Fixed - Fix Docker images. [#425](https://github.com/strukturag/nextcloud-spreed-signaling/pull/425) ## 1.1.0 - 2023-02-22 ### Added - Official docker images. [#314](https://github.com/strukturag/nextcloud-spreed-signaling/pull/314) - Use proxy from environment for backend client requests. [#326](https://github.com/strukturag/nextcloud-spreed-signaling/pull/326) - Add aarch64/arm64 docker build [#384](https://github.com/strukturag/nextcloud-spreed-signaling/pull/384) - CI: Setup permissions for workflows. [#393](https://github.com/strukturag/nextcloud-spreed-signaling/pull/393) - Implement "switchto" support [#409](https://github.com/strukturag/nextcloud-spreed-signaling/pull/409) - Allow internal clients to set / change the "inCall" flags. [#421](https://github.com/strukturag/nextcloud-spreed-signaling/pull/421) - Add support for Golang 1.20 [#413](https://github.com/strukturag/nextcloud-spreed-signaling/pull/413) ### Changed - Switch to apt-get on CLI. [#312](https://github.com/strukturag/nextcloud-spreed-signaling/pull/312) - vendor: Automatically vendor protobuf modules. [#313](https://github.com/strukturag/nextcloud-spreed-signaling/pull/313) - Bump github.com/prometheus/client_golang from 1.12.2 to 1.13.0 [#316](https://github.com/strukturag/nextcloud-spreed-signaling/pull/316) - Bump github.com/oschwald/maxminddb-golang from 1.9.0 to 1.10.0 [#317](https://github.com/strukturag/nextcloud-spreed-signaling/pull/317) - Bump github.com/pion/sdp/v3 from 3.0.5 to 3.0.6 [#320](https://github.com/strukturag/nextcloud-spreed-signaling/pull/320) - Bump google.golang.org/grpc from 1.48.0 to 1.49.0 [#324](https://github.com/strukturag/nextcloud-spreed-signaling/pull/324) - Bump github.com/nats-io/nats-server/v2 from 2.8.4 to 2.9.0 [#330](https://github.com/strukturag/nextcloud-spreed-signaling/pull/330) - Bump sphinx from 5.1.1 to 5.2.2 in /docs [#339](https://github.com/strukturag/nextcloud-spreed-signaling/pull/339) - Bump mkdocs from 1.3.1 to 1.4.0 in /docs [#340](https://github.com/strukturag/nextcloud-spreed-signaling/pull/340) - Bump sphinx from 5.2.2 to 5.2.3 in /docs [#345](https://github.com/strukturag/nextcloud-spreed-signaling/pull/345) - Bump github.com/nats-io/nats-server/v2 from 2.9.0 to 2.9.2 [#344](https://github.com/strukturag/nextcloud-spreed-signaling/pull/344) - Bump go.etcd.io/etcd/api/v3 from 3.5.4 to 3.5.5 [#333](https://github.com/strukturag/nextcloud-spreed-signaling/pull/333) - Bump go.etcd.io/etcd/server/v3 from 3.5.4 to 3.5.5 [#334](https://github.com/strukturag/nextcloud-spreed-signaling/pull/334) - Bump google.golang.org/grpc from 1.49.0 to 1.50.0 [#346](https://github.com/strukturag/nextcloud-spreed-signaling/pull/346) - Bump github.com/nats-io/nats-server/v2 from 2.9.2 to 2.9.3 [#348](https://github.com/strukturag/nextcloud-spreed-signaling/pull/348) - Bump github.com/nats-io/nats.go from 1.17.0 to 1.18.0 [#349](https://github.com/strukturag/nextcloud-spreed-signaling/pull/349) - Bump sphinx from 5.2.3 to 5.3.0 in /docs [#351](https://github.com/strukturag/nextcloud-spreed-signaling/pull/351) - Bump mkdocs from 1.4.0 to 1.4.1 in /docs [#352](https://github.com/strukturag/nextcloud-spreed-signaling/pull/352) - Bump google.golang.org/grpc from 1.50.0 to 1.50.1 [#350](https://github.com/strukturag/nextcloud-spreed-signaling/pull/350) - Bump golangci/golangci-lint-action from 3.2.0 to 3.3.0 [#353](https://github.com/strukturag/nextcloud-spreed-signaling/pull/353) - Bump mkdocs from 1.4.1 to 1.4.2 in /docs [#358](https://github.com/strukturag/nextcloud-spreed-signaling/pull/358) - Bump sphinx-rtd-theme from 1.0.0 to 1.1.0 in /docs [#357](https://github.com/strukturag/nextcloud-spreed-signaling/pull/357) - Bump github.com/nats-io/nats.go from 1.18.0 to 1.19.0 [#354](https://github.com/strukturag/nextcloud-spreed-signaling/pull/354) - Bump github.com/prometheus/client_golang from 1.13.0 to 1.13.1 [#360](https://github.com/strukturag/nextcloud-spreed-signaling/pull/360) - Bump github.com/nats-io/nats-server/v2 from 2.9.3 to 2.9.5 [#359](https://github.com/strukturag/nextcloud-spreed-signaling/pull/359) - build(deps): Bump golangci/golangci-lint-action from 3.3.0 to 3.3.1 [#365](https://github.com/strukturag/nextcloud-spreed-signaling/pull/365) - build(deps): Bump sphinx-rtd-theme from 1.1.0 to 1.1.1 in /docs [#363](https://github.com/strukturag/nextcloud-spreed-signaling/pull/363) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.5 to 2.9.6 [#361](https://github.com/strukturag/nextcloud-spreed-signaling/pull/361) - build(deps): Bump github.com/nats-io/nats.go from 1.19.0 to 1.20.0 [#366](https://github.com/strukturag/nextcloud-spreed-signaling/pull/366) - build(deps): Bump google.golang.org/grpc from 1.50.1 to 1.51.0 [#368](https://github.com/strukturag/nextcloud-spreed-signaling/pull/368) - build(deps): Bump github.com/prometheus/client_golang from 1.13.1 to 1.14.0 [#364](https://github.com/strukturag/nextcloud-spreed-signaling/pull/364) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.6 to 2.9.7 [#367](https://github.com/strukturag/nextcloud-spreed-signaling/pull/367) - build(deps): Bump go.etcd.io/etcd/server/v3 from 3.5.5 to 3.5.6 [#372](https://github.com/strukturag/nextcloud-spreed-signaling/pull/372) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.7 to 2.9.8 [#371](https://github.com/strukturag/nextcloud-spreed-signaling/pull/371) - build(deps): Bump github.com/nats-io/nats.go from 1.20.0 to 1.21.0 [#375](https://github.com/strukturag/nextcloud-spreed-signaling/pull/375) - build(deps): Bump github.com/golang-jwt/jwt/v4 from 4.4.2 to 4.4.3 [#374](https://github.com/strukturag/nextcloud-spreed-signaling/pull/374) - build(deps): Bump cirrus-actions/rebase from 1.7 to 1.8 [#379](https://github.com/strukturag/nextcloud-spreed-signaling/pull/379) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.8 to 2.9.9 [#377](https://github.com/strukturag/nextcloud-spreed-signaling/pull/377) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.9 to 2.9.10 [#382](https://github.com/strukturag/nextcloud-spreed-signaling/pull/382) - build(deps): Bump github.com/nats-io/nats.go from 1.21.0 to 1.22.1 [#383](https://github.com/strukturag/nextcloud-spreed-signaling/pull/383) - build(deps): Bump google.golang.org/grpc from 1.51.0 to 1.52.0 [#391](https://github.com/strukturag/nextcloud-spreed-signaling/pull/391) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.10 to 2.9.11 [#387](https://github.com/strukturag/nextcloud-spreed-signaling/pull/387) - Stop using WaitGroup to detect finished message processing. [#394](https://github.com/strukturag/nextcloud-spreed-signaling/pull/394) - Improve handling of throttled responses from Nextcloud. [#395](https://github.com/strukturag/nextcloud-spreed-signaling/pull/395) - Test: add timeout while waiting for etcd event. [#397](https://github.com/strukturag/nextcloud-spreed-signaling/pull/397) - build(deps): Bump github.com/nats-io/nats.go from 1.22.1 to 1.23.0 [#399](https://github.com/strukturag/nextcloud-spreed-signaling/pull/399) - build(deps): Bump go.etcd.io/etcd/api/v3 from 3.5.6 to 3.5.7 [#402](https://github.com/strukturag/nextcloud-spreed-signaling/pull/402) - build(deps): Bump go.etcd.io/etcd/client/v3 from 3.5.6 to 3.5.7 [#403](https://github.com/strukturag/nextcloud-spreed-signaling/pull/403) - build(deps): Bump go.etcd.io/etcd/server/v3 from 3.5.6 to 3.5.7 [#404](https://github.com/strukturag/nextcloud-spreed-signaling/pull/404) - build(deps): Bump golangci/golangci-lint-action from 3.3.1 to 3.4.0 [#405](https://github.com/strukturag/nextcloud-spreed-signaling/pull/405) - build(deps): Bump readthedocs-sphinx-search from 0.1.2 to 0.2.0 in /docs [#407](https://github.com/strukturag/nextcloud-spreed-signaling/pull/407) - build(deps): Bump google.golang.org/grpc from 1.52.0 to 1.52.1 [#406](https://github.com/strukturag/nextcloud-spreed-signaling/pull/406) - build(deps): Bump docker/build-push-action from 3 to 4 [#412](https://github.com/strukturag/nextcloud-spreed-signaling/pull/412) - build(deps): Bump google.golang.org/grpc from 1.52.1 to 1.52.3 [#410](https://github.com/strukturag/nextcloud-spreed-signaling/pull/410) - Explicitly use type "sysConn". [#416](https://github.com/strukturag/nextcloud-spreed-signaling/pull/416) - build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.11 to 2.9.14 [#415](https://github.com/strukturag/nextcloud-spreed-signaling/pull/415) - build(deps): Bump sphinx-rtd-theme from 1.1.1 to 1.2.0 in /docs [#418](https://github.com/strukturag/nextcloud-spreed-signaling/pull/418) - build(deps): Bump google.golang.org/grpc from 1.52.3 to 1.53.0 [#417](https://github.com/strukturag/nextcloud-spreed-signaling/pull/417) - build(deps): Bump golang.org/x/net from 0.5.0 to 0.7.0 [#422](https://github.com/strukturag/nextcloud-spreed-signaling/pull/422) - build(deps): Bump github.com/golang-jwt/jwt/v4 from 4.4.3 to 4.5.0 [#423](https://github.com/strukturag/nextcloud-spreed-signaling/pull/423) - build(deps): Bump sphinx from 5.3.0 to 6.1.3 in /docs [#390](https://github.com/strukturag/nextcloud-spreed-signaling/pull/390) - Various refactorings to simplify code [#400](https://github.com/strukturag/nextcloud-spreed-signaling/pull/400) ### Fixed - Remove @resources from SystemCallFilter [#322](https://github.com/strukturag/nextcloud-spreed-signaling/pull/322) - Fix deadlock for proxy connection issues [#327](https://github.com/strukturag/nextcloud-spreed-signaling/pull/327) - Fix goroutines leak check. [#396](https://github.com/strukturag/nextcloud-spreed-signaling/pull/396) ## 1.0.0 - 2022-08-04 ### Added - Clustering support. [#281](https://github.com/strukturag/nextcloud-spreed-signaling/pull/281) - Send initial "welcome" message when clients connect. [#288](https://github.com/strukturag/nextcloud-spreed-signaling/pull/288) - Support hello auth version "2.0" with JWT. [#251](https://github.com/strukturag/nextcloud-spreed-signaling/pull/251) - dist: add systemd sysusers file. [#275](https://github.com/strukturag/nextcloud-spreed-signaling/pull/275) - Add more tests. [#292](https://github.com/strukturag/nextcloud-spreed-signaling/pull/292) - Add tests for virtual sessions. [#295](https://github.com/strukturag/nextcloud-spreed-signaling/pull/295) - Implement per-backend session limit for clusters. [#296](https://github.com/strukturag/nextcloud-spreed-signaling/pull/296) ### Changed - Don't run "go mod tidy" when building. [#269](https://github.com/strukturag/nextcloud-spreed-signaling/pull/269) - Bump sphinx from 5.0.0 to 5.0.1 in /docs [#270](https://github.com/strukturag/nextcloud-spreed-signaling/pull/270) - Bump sphinx from 5.0.1 to 5.0.2 in /docs [#277](https://github.com/strukturag/nextcloud-spreed-signaling/pull/277) - Move common etcd code to own class. [#282](https://github.com/strukturag/nextcloud-spreed-signaling/pull/282) - Support arbitrary capabilities values. [#287](https://github.com/strukturag/nextcloud-spreed-signaling/pull/287) - dist: harden systemd service unit. [#276](https://github.com/strukturag/nextcloud-spreed-signaling/pull/276) - Update to Go module version of github.com/golang-jwt/jwt [#289](https://github.com/strukturag/nextcloud-spreed-signaling/pull/289) - Disconnect sessions with the same room session id synchronously. [#294](https://github.com/strukturag/nextcloud-spreed-signaling/pull/294) - Bump google.golang.org/grpc from 1.47.0 to 1.48.0 [#297](https://github.com/strukturag/nextcloud-spreed-signaling/pull/297) - Update to github.com/pion/sdp v3.0.5 [#301](https://github.com/strukturag/nextcloud-spreed-signaling/pull/301) - Bump sphinx from 5.0.2 to 5.1.1 in /docs [#303](https://github.com/strukturag/nextcloud-spreed-signaling/pull/303) - make: Include vendored dependencies in tarball. [#300](https://github.com/strukturag/nextcloud-spreed-signaling/pull/300) - docs: update and pin dependencies. [#305](https://github.com/strukturag/nextcloud-spreed-signaling/pull/305) - Bump actions/upload-artifact from 2 to 3 [#307](https://github.com/strukturag/nextcloud-spreed-signaling/pull/307) - Bump actions/download-artifact from 2 to 3 [#308](https://github.com/strukturag/nextcloud-spreed-signaling/pull/308) - Bump google.golang.org/protobuf from 1.28.0 to 1.28.1 [#306](https://github.com/strukturag/nextcloud-spreed-signaling/pull/306) - CI: Also test with Golang 1.19 [#310](https://github.com/strukturag/nextcloud-spreed-signaling/pull/310) ### Fixed - Fix check for async room messages received while not joined to a room. [#274](https://github.com/strukturag/nextcloud-spreed-signaling/pull/274) - Fix testing etcd server not starting up if etcd is running on host. [#283](https://github.com/strukturag/nextcloud-spreed-signaling/pull/283) - Fix CI issues on slow CPUs. [#290](https://github.com/strukturag/nextcloud-spreed-signaling/pull/290) - Fix handling of "unshareScreen" messages and add test. [#293](https://github.com/strukturag/nextcloud-spreed-signaling/pull/293) - Fix Read The Ddocs builds. [#302](https://github.com/strukturag/nextcloud-spreed-signaling/pull/302) ## 0.5.0 - 2022-06-02 ### Added - Add API documentation (previously in https://github.com/nextcloud/spreed) [#194](https://github.com/strukturag/nextcloud-spreed-signaling/pull/194) - CI: Enable gofmt linter. [#196](https://github.com/strukturag/nextcloud-spreed-signaling/pull/196) - CI: Enable revive linter. [#197](https://github.com/strukturag/nextcloud-spreed-signaling/pull/197) - Add API for transient room data. [#193](https://github.com/strukturag/nextcloud-spreed-signaling/pull/193) - Send updated offers to subscribers after publisher renegotiations. [#195](https://github.com/strukturag/nextcloud-spreed-signaling/pull/195) - Add documentation on the available metrics. [#210](https://github.com/strukturag/nextcloud-spreed-signaling/pull/210) - Add special events to update "incall" flags of all sessions. [#208](https://github.com/strukturag/nextcloud-spreed-signaling/pull/208) - CI: Also test with Golang 1.18. [#209](https://github.com/strukturag/nextcloud-spreed-signaling/pull/209) - Support DNS discovery for proxy server URLs. [#214](https://github.com/strukturag/nextcloud-spreed-signaling/pull/214) - CI: Build docker image. [#238](https://github.com/strukturag/nextcloud-spreed-signaling/pull/238) - Add specific id for connections and replace "update" parameter with it. [#229](https://github.com/strukturag/nextcloud-spreed-signaling/pull/229) - Add "permission" for sessions that may not receive display names. [#227](https://github.com/strukturag/nextcloud-spreed-signaling/pull/227) - Add support for request offers to update subscriber connections. [#191](https://github.com/strukturag/nextcloud-spreed-signaling/pull/191) - Support toggling audio/video in subscribed streams. [#239](https://github.com/strukturag/nextcloud-spreed-signaling/pull/239) - CI: Test building coturn/janus Docker images. [#258](https://github.com/strukturag/nextcloud-spreed-signaling/pull/258) - Add command bot for "/rebase". [#260](https://github.com/strukturag/nextcloud-spreed-signaling/pull/260) - Add Go Report card. [#262](https://github.com/strukturag/nextcloud-spreed-signaling/pull/262) - Combine ping requests of different rooms on the same backend. [#250](https://github.com/strukturag/nextcloud-spreed-signaling/pull/250) ### Changed - Bump github.com/gorilla/websocket from 1.4.2 to 1.5.0 [#198](https://github.com/strukturag/nextcloud-spreed-signaling/pull/198) - Bump golangci/golangci-lint-action from 2.5.2 to 3.1.0 [#202](https://github.com/strukturag/nextcloud-spreed-signaling/pull/202) - Bump actions/checkout from 2.4.0 to 3 [#205](https://github.com/strukturag/nextcloud-spreed-signaling/pull/205) - Bump actions/cache from 2.1.7 to 3 [#211](https://github.com/strukturag/nextcloud-spreed-signaling/pull/211) - Return dedicated error if proxy receives token that is not valid yet. [#212](https://github.com/strukturag/nextcloud-spreed-signaling/pull/212) - CI: Only run workflows if relevant files have changed. [#218](https://github.com/strukturag/nextcloud-spreed-signaling/pull/218) - Bump sphinx from 4.2.0 to 4.5.0 in /docs [#216](https://github.com/strukturag/nextcloud-spreed-signaling/pull/216) - Bump github.com/oschwald/maxminddb-golang from 1.8.0 to 1.9.0 [#213](https://github.com/strukturag/nextcloud-spreed-signaling/pull/213) - Only support last two versions of Golang (1.17 / 1.18). [#219](https://github.com/strukturag/nextcloud-spreed-signaling/pull/219) - Bump github.com/golang-jwt/jwt from 3.2.1+incompatible to 3.2.2+incompatible [#161](https://github.com/strukturag/nextcloud-spreed-signaling/pull/161) - Bump github.com/nats-io/nats-server/v2 from 2.2.6 to 2.7.4 [#207](https://github.com/strukturag/nextcloud-spreed-signaling/pull/207) - Update etcd to v3.5.1 [#179](https://github.com/strukturag/nextcloud-spreed-signaling/pull/179) - Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1 [#190](https://github.com/strukturag/nextcloud-spreed-signaling/pull/190) - Bump go.etcd.io/etcd/client/v3 from 3.5.1 to 3.5.2 [#222](https://github.com/strukturag/nextcloud-spreed-signaling/pull/222) - Use features from newer Golang versions. [#220](https://github.com/strukturag/nextcloud-spreed-signaling/pull/220) - Bump actions/setup-go from 2 to 3 [#226](https://github.com/strukturag/nextcloud-spreed-signaling/pull/226) - Send directly to local session with disconnected client. [#228](https://github.com/strukturag/nextcloud-spreed-signaling/pull/228) - Bump github.com/nats-io/nats-server/v2 from 2.7.4 to 2.8.1 [#234](https://github.com/strukturag/nextcloud-spreed-signaling/pull/234) - Bump go.etcd.io/etcd/client/pkg/v3 from 3.5.2 to 3.5.4 [#235](https://github.com/strukturag/nextcloud-spreed-signaling/pull/235) - Bump github/codeql-action from 1 to 2 [#237](https://github.com/strukturag/nextcloud-spreed-signaling/pull/237) - Bump go.etcd.io/etcd/client/v3 from 3.5.2 to 3.5.4 [#236](https://github.com/strukturag/nextcloud-spreed-signaling/pull/236) - Bump github.com/nats-io/nats-server/v2 from 2.8.1 to 2.8.2 [#242](https://github.com/strukturag/nextcloud-spreed-signaling/pull/242) - Bump docker/setup-buildx-action from 1 to 2 [#245](https://github.com/strukturag/nextcloud-spreed-signaling/pull/245) - Bump docker/build-push-action from 2 to 3 [#244](https://github.com/strukturag/nextcloud-spreed-signaling/pull/244) - Bump github.com/nats-io/nats.go from 1.14.0 to 1.15.0 [#243](https://github.com/strukturag/nextcloud-spreed-signaling/pull/243) - Bump readthedocs-sphinx-search from 0.1.1 to 0.1.2 in /docs [#248](https://github.com/strukturag/nextcloud-spreed-signaling/pull/248) - CI: Run when workflow yaml file has changed. [#249](https://github.com/strukturag/nextcloud-spreed-signaling/pull/249) - Bump golangci/golangci-lint-action from 3.1.0 to 3.2.0 [#247](https://github.com/strukturag/nextcloud-spreed-signaling/pull/247) - Move capabilities handling to own file and refactor http client pool. [#252](https://github.com/strukturag/nextcloud-spreed-signaling/pull/252) - Increase allowed body size for backend requests. [#255](https://github.com/strukturag/nextcloud-spreed-signaling/pull/255) - Improve test coverage. [#253](https://github.com/strukturag/nextcloud-spreed-signaling/pull/253) - Switch to official Coturn docker image. [#259](https://github.com/strukturag/nextcloud-spreed-signaling/pull/259) - Bump github.com/prometheus/client_golang from 1.12.1 to 1.12.2 [#256](https://github.com/strukturag/nextcloud-spreed-signaling/pull/256) - Update Dockerfile versions. [#257](https://github.com/strukturag/nextcloud-spreed-signaling/pull/257) - Update Alpine to 3.15 version, fix CVE-2022-28391 [#261](https://github.com/strukturag/nextcloud-spreed-signaling/pull/261) - Bump cirrus-actions/rebase from 1.6 to 1.7 [#263](https://github.com/strukturag/nextcloud-spreed-signaling/pull/263) - Bump github.com/nats-io/nats.go from 1.15.0 to 1.16.0 [#267](https://github.com/strukturag/nextcloud-spreed-signaling/pull/267) - Bump jandelgado/gcov2lcov-action from 1.0.8 to 1.0.9 [#264](https://github.com/strukturag/nextcloud-spreed-signaling/pull/264) - Bump github.com/nats-io/nats-server/v2 from 2.8.2 to 2.8.4 [#266](https://github.com/strukturag/nextcloud-spreed-signaling/pull/266) - Bump sphinx from 4.5.0 to 5.0.0 in /docs [#268](https://github.com/strukturag/nextcloud-spreed-signaling/pull/268) ### Fixed - CI: Fix linter errors. [#206](https://github.com/strukturag/nextcloud-spreed-signaling/pull/206) - CI: Pin dependencies to fix readthedocs build. [#215](https://github.com/strukturag/nextcloud-spreed-signaling/pull/215) - Fix mediaType not updated after publisher renegotiations. [#221](https://github.com/strukturag/nextcloud-spreed-signaling/pull/221) - Fix "signaling_server_messages_total" stat not being incremented. [#190](https://github.com/strukturag/nextcloud-spreed-signaling/pull/190) ## 0.4.1 - 2022-01-25 ### Added - The room session id is included in "joined" events. [#178](https://github.com/strukturag/nextcloud-spreed-signaling/pull/178) - Clients can provide the maximum publishing bandwidth in offer requests. [#183](https://github.com/strukturag/nextcloud-spreed-signaling/pull/183) ### Changed - Change source of country -> continent map. [#177](https://github.com/strukturag/nextcloud-spreed-signaling/pull/177) - Bump actions/cache from 2.1.6 to 2.1.7 [#171](https://github.com/strukturag/nextcloud-spreed-signaling/pull/171) ## 0.4.0 - 2021-11-10 ### Added - Support continent mapping overrides. [#143](https://github.com/strukturag/nextcloud-spreed-signaling/pull/143) - Add prometheus metrics [#99](https://github.com/strukturag/nextcloud-spreed-signaling/pull/99) - Support separate permissions for publishing audio / video. [#157](https://github.com/strukturag/nextcloud-spreed-signaling/pull/157) - Check individual audio/video permissions on change. [#169](https://github.com/strukturag/nextcloud-spreed-signaling/pull/169) - CI: Also test with Go 1.17 [#153](https://github.com/strukturag/nextcloud-spreed-signaling/pull/153) ### Changed - Force HTTPS for backend connections in old-style configurations. [#132](https://github.com/strukturag/nextcloud-spreed-signaling/pull/132) - Only include body in 307/308 redirects if going to same host [#134](https://github.com/strukturag/nextcloud-spreed-signaling/pull/134) - Stop publishers if session is no longer allowed to publish. [#140](https://github.com/strukturag/nextcloud-spreed-signaling/pull/140) - Only allow subscribing if both users are in the same room and call. [#133](https://github.com/strukturag/nextcloud-spreed-signaling/pull/133) - Internal clients always may subscribe all streams. [#159](https://github.com/strukturag/nextcloud-spreed-signaling/pull/159) - Reduce RTT logging [#167](https://github.com/strukturag/nextcloud-spreed-signaling/pull/167) - deps: Migrate to "github.com/golang-jwt/jwt". [#160](https://github.com/strukturag/nextcloud-spreed-signaling/pull/160) - Bump coverallsapp/github-action from 1.1.2 to 1.1.3 [#131](https://github.com/strukturag/nextcloud-spreed-signaling/pull/131) - Bump github.com/google/uuid from 1.2.0 to 1.3.0 [#138](https://github.com/strukturag/nextcloud-spreed-signaling/pull/138) - Bump github.com/prometheus/client_golang from 1.10.0 to 1.11.0 [#144](https://github.com/strukturag/nextcloud-spreed-signaling/pull/144) - Bump github.com/nats-io/nats.go from 1.11.0 to 1.12.1 [#150](https://github.com/strukturag/nextcloud-spreed-signaling/pull/150) - Bump github.com/nats-io/nats.go from 1.12.1 to 1.12.3 [#154](https://github.com/strukturag/nextcloud-spreed-signaling/pull/154) - Bump github.com/nats-io/nats.go from 1.12.3 to 1.13.0 [#158](https://github.com/strukturag/nextcloud-spreed-signaling/pull/158) - Bump actions/checkout from 2.3.4 to 2.3.5 [#163](https://github.com/strukturag/nextcloud-spreed-signaling/pull/163) - Bump actions/checkout from 2.3.5 to 2.4.0 [#166](https://github.com/strukturag/nextcloud-spreed-signaling/pull/166) ### Fixed - Adjusted easyjson for multiarch builds [#129](https://github.com/strukturag/nextcloud-spreed-signaling/pull/129) ## 0.3.0 - 2021-07-01 ### Added - Certificate validation can be disabled for proxy connections - Number of sessions per backend can be limited [#67](https://github.com/strukturag/nextcloud-spreed-signaling/pull/67) - Use Go modules for dependency tracking, drop support for Golang < 1.13 [#88](https://github.com/strukturag/nextcloud-spreed-signaling/pull/88) - Support defining maximum bandwidths at diferent levels [#76](https://github.com/strukturag/nextcloud-spreed-signaling/pull/76) - Show coverage report in PRs [#34](https://github.com/strukturag/nextcloud-spreed-signaling/pull/34) - CI: Also test with Golang 1.16 - CI: Run golint [#32](https://github.com/strukturag/nextcloud-spreed-signaling/pull/32) - CI: Add CodeQL analysis [#112](https://github.com/strukturag/nextcloud-spreed-signaling/pull/112) - Add tests for regular NATS client [#105](https://github.com/strukturag/nextcloud-spreed-signaling/pull/105) - Fetch capabilities to check if "v3" signaling API of Talk should be used. [#119](https://github.com/strukturag/nextcloud-spreed-signaling/pull/119) - Add API to select a simulcast substream / temporal layer [#104](https://github.com/strukturag/nextcloud-spreed-signaling/pull/104) ### Changed - Improved detection of broken connections between server and proxy [#65](https://github.com/strukturag/nextcloud-spreed-signaling/pull/65) - Stop using legacy ptype `listener` [#83](https://github.com/strukturag/nextcloud-spreed-signaling/pull/83) - Update gorilla/mux to 1.8.0 [#89](https://github.com/strukturag/nextcloud-spreed-signaling/pull/89) - Remove unnecessary dependency golang.org/x/net [#90](https://github.com/strukturag/nextcloud-spreed-signaling/pull/90) - Update nats.go to 1.10.0 [#92](https://github.com/strukturag/nextcloud-spreed-signaling/pull/92) - Update maxminddb-golang to 1.8.0 [#91](https://github.com/strukturag/nextcloud-spreed-signaling/pull/91) - Add dependabot integration [#93](https://github.com/strukturag/nextcloud-spreed-signaling/pull/93) - Bump github.com/google/uuid from 1.1.2 to 1.2.0 [#94](https://github.com/strukturag/nextcloud-spreed-signaling/pull/94) - Bump github.com/gorilla/websocket from 1.2.0 to 1.4.2 [#95](https://github.com/strukturag/nextcloud-spreed-signaling/pull/95) - Remove deprecated github.com/gorilla/context - Update to go.etcd.io/etcd 3.4.15 - make: Cache easyjson results. [#96](https://github.com/strukturag/nextcloud-spreed-signaling/pull/96) - Various updates to Docker components [#78](https://github.com/strukturag/nextcloud-spreed-signaling/pull/78) - Bump coverallsapp/github-action from v1.1.1 to v1.1.2 [#102](https://github.com/strukturag/nextcloud-spreed-signaling/pull/102) - Bump jandelgado/gcov2lcov-action from v1.0.2 to v1.0.8 [#103](https://github.com/strukturag/nextcloud-spreed-signaling/pull/103) - Bump actions/cache from 2 to 2.1.5 [#106](https://github.com/strukturag/nextcloud-spreed-signaling/pull/106) - Bump golangci/golangci-lint-action from 2 to 2.5.2 [#107](https://github.com/strukturag/nextcloud-spreed-signaling/pull/107) - Bump actions/checkout from 2 to 2.3.4 [#108](https://github.com/strukturag/nextcloud-spreed-signaling/pull/108) - Bump actions/cache from 2.1.5 to 2.1.6 [#110](https://github.com/strukturag/nextcloud-spreed-signaling/pull/110) - Don't log TURN credentials [#113](https://github.com/strukturag/nextcloud-spreed-signaling/pull/113) - Remove NATS notifications for Janus publishers [#114](https://github.com/strukturag/nextcloud-spreed-signaling/pull/114) - Make client processing asynchronous [#111](https://github.com/strukturag/nextcloud-spreed-signaling/pull/111) - Bump github.com/nats-io/nats-server/v2 from 2.2.1 to 2.2.6 [#116](https://github.com/strukturag/nextcloud-spreed-signaling/pull/116) - Notify new clients about flags of virtual sessions [#121](https://github.com/strukturag/nextcloud-spreed-signaling/pull/121) ### Fixed - Adjusted godeps for multiarch builds [#69](https://github.com/strukturag/nextcloud-spreed-signaling/pull/69) - Add missing lock when accessing internal sessions map - Fixed parallel building [#73](https://github.com/strukturag/nextcloud-spreed-signaling/pull/73) - Make the response from the client auth backend OCS compliant [#74](https://github.com/strukturag/nextcloud-spreed-signaling/pull/74) - Fixed alignment of 64bit members that are accessed atomically [#72](https://github.com/strukturag/nextcloud-spreed-signaling/pull/72) - Only build "godep" binary once [#75](https://github.com/strukturag/nextcloud-spreed-signaling/pull/75) - Update config example for Apache proxy config [#82](https://github.com/strukturag/nextcloud-spreed-signaling/pull/82) - Remove remaining virtual sessions if client session is closed - Fix Caddy v2 example config [#97](https://github.com/strukturag/nextcloud-spreed-signaling/pull/97) - Fix various issues found by golangci-lint [#100](https://github.com/strukturag/nextcloud-spreed-signaling/pull/100) - Support multiple waiters for the same key [#120](https://github.com/strukturag/nextcloud-spreed-signaling/pull/120) - Various test improvements / fixes [#115](https://github.com/strukturag/nextcloud-spreed-signaling/pull/115) ## 0.2.0 - 2020-12-08 ### Added - Reload backends from configuration on SIGHUP [#52](https://github.com/strukturag/nextcloud-spreed-signaling/pull/52) [#53](https://github.com/strukturag/nextcloud-spreed-signaling/pull/53) - Add support for virtual sessions [#61](https://github.com/strukturag/nextcloud-spreed-signaling/pull/61) ### Changed - Default to proxy url type "static" if none is configured - Don't perform request to proxy if context is already done - Mark session as used when proxy connection is interrupted to prevent from timing out too early - Use dedicated (shorter) timeout for proxy requests to avoid using the whole available timeout for the first proxy request - Update logging when creating / deleting publishers / subscribers - Include load in stats response - Send MCU messages through the session [#55](https://github.com/strukturag/nextcloud-spreed-signaling/pull/55) - Add '--full-trickle' to janus command [#57](https://github.com/strukturag/nextcloud-spreed-signaling/pull/57) - README: Add missing information for creating group [#60](https://github.com/strukturag/nextcloud-spreed-signaling/pull/60) - Canonicalize all URLs before comparisons / lookups [#62](https://github.com/strukturag/nextcloud-spreed-signaling/pull/62) ### Fixed - Handle case where etcd cluster is not available during startup - Remove duplicate argument in Dockerfile [#50](https://github.com/strukturag/nextcloud-spreed-signaling/pull/50) - Handle old-style MCU configuration with type but no url - Fix proxy client cleanup code [#56](https://github.com/strukturag/nextcloud-spreed-signaling/pull/56) ## 0.1.0 - 2020-09-07 ### Added - Add Docker support [#7](https://github.com/strukturag/nextcloud-spreed-signaling/pull/7) - Added basic stats API [#16](https://github.com/strukturag/nextcloud-spreed-signaling/pull/16) - Add "reason" field to disinvite messages [#26](https://github.com/strukturag/nextcloud-spreed-signaling/pull/26) - Added support for multiple Nextcloud backends [#28](https://github.com/strukturag/nextcloud-spreed-signaling/pull/28) - Support connecting to multiple Janus servers [#36](https://github.com/strukturag/nextcloud-spreed-signaling/pull/36) - Added support for loading proxy tokens from etcd cluser [#44](https://github.com/strukturag/nextcloud-spreed-signaling/pull/44) - Proxy URLs are reloaded on SIGHUP [#46](https://github.com/strukturag/nextcloud-spreed-signaling/pull/46) - Added support for loading proxy URls from etcd cluster [#47](https://github.com/strukturag/nextcloud-spreed-signaling/pull/47) - Add option to override GeoIP lookups (e.g. for local addresses) [#48](https://github.com/strukturag/nextcloud-spreed-signaling/pull/48) ### Changed - The continent map is no longer downloaded on each build [#29](https://github.com/strukturag/nextcloud-spreed-signaling/pull/29) - NATS messages are processed directly [#35](https://github.com/strukturag/nextcloud-spreed-signaling/pull/35) - Support changed "slowlink" message from Janus > 0.7.3 [#39](https://github.com/strukturag/nextcloud-spreed-signaling/pull/39) - The GeoIP database can be loaded from a local file [#40](https://github.com/strukturag/nextcloud-spreed-signaling/pull/40) - Drop support for Golang < 1.10. ### Fixed - Fixes for building on FreeBSD [#2](https://github.com/strukturag/nextcloud-spreed-signaling/pull/2) - Fixes for typos in comments and error messages [#10](https://github.com/strukturag/nextcloud-spreed-signaling/pull/10) - Remove credentials from log [#13](https://github.com/strukturag/nextcloud-spreed-signaling/pull/13) ### Documentation - Add systemd to docs [#3](https://github.com/strukturag/nextcloud-spreed-signaling/pull/3) - Add caddy server to reverse proxy examples [#5](https://github.com/strukturag/nextcloud-spreed-signaling/pull/5) - Update link to API documentation [#6](https://github.com/strukturag/nextcloud-spreed-signaling/pull/6) - Update build requirements [#12](https://github.com/strukturag/nextcloud-spreed-signaling/pull/12) ## 0.0.13 - 2020-05-12 - Initial OpenSource version. nextcloud-spreed-signaling-1.2.4/LICENSE000066400000000000000000001033301460321600400177550ustar00rootroot00000000000000 GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see . nextcloud-spreed-signaling-1.2.4/Makefile000066400000000000000000000110761460321600400204150ustar00rootroot00000000000000all: build GO := $(shell which go) GOPATH := $(shell "$(GO)" env GOPATH) GODIR := $(shell dirname "$(GO)") GOFMT := "$(GODIR)/gofmt" GOOS ?= linux GOARCH ?= amd64 GOVERSION := $(shell "$(GO)" env GOVERSION | sed "s|go||" ) BINDIR := $(CURDIR)/bin VENDORDIR := "$(CURDIR)/vendor" VERSION := $(shell "$(CURDIR)/scripts/get-version.sh") TARVERSION := $(shell "$(CURDIR)/scripts/get-version.sh" --tar) PACKAGENAME := github.com/strukturag/nextcloud-spreed-signaling ALL_PACKAGES := $(PACKAGENAME) $(PACKAGENAME)/client $(PACKAGENAME)/proxy $(PACKAGENAME)/server PROTO_FILES := $(basename $(wildcard *.proto)) PROTO_GO_FILES := $(addsuffix .pb.go,$(PROTO_FILES)) $(addsuffix _grpc.pb.go,$(PROTO_FILES)) EASYJSON_GO_FILES := \ api_async_easyjson.go \ api_backend_easyjson.go \ api_grpc_easyjson.go \ api_proxy_easyjson.go \ api_signaling_easyjson.go ifneq ($(VERSION),) INTERNALLDFLAGS := -X main.version=$(VERSION) else INTERNALLDFLAGS := endif ifneq ($(RACE),) BUILDARGS := -race else BUILDARGS := endif ifneq ($(CI),) TESTARGS := -race else TESTARGS := endif ifeq ($(TIMEOUT),) TIMEOUT := 60s endif ifneq ($(TEST),) TESTARGS := $(TESTARGS) -run "$(TEST)" endif ifneq ($(COUNT),) TESTARGS := $(TESTARGS) -count $(COUNT) endif ifeq ($(GOARCH), amd64) GOPATHBIN := $(GOPATH)/bin else GOPATHBIN := $(GOPATH)/bin/$(GOOS)_$(GOARCH) endif hook: [ ! -d "$(CURDIR)/.git/hooks" ] || ln -sf "$(CURDIR)/scripts/pre-commit.hook" "$(CURDIR)/.git/hooks/pre-commit" $(GOPATHBIN)/easyjson: go.mod go.sum [ "$(GOPROXY)" = "off" ] || $(GO) get -d github.com/mailru/easyjson/... $(GO) install github.com/mailru/easyjson/... $(GOPATHBIN)/protoc-gen-go: go.mod go.sum $(GO) install google.golang.org/protobuf/cmd/protoc-gen-go $(GOPATHBIN)/protoc-gen-go-grpc: go.mod go.sum [ "$(GOPROXY)" = "off" ] || $(GO) get -d google.golang.org/grpc/cmd/protoc-gen-go-grpc $(GO) install google.golang.org/grpc/cmd/protoc-gen-go-grpc continentmap.go: $(CURDIR)/scripts/get_continent_map.py $@ check-continentmap: set -e ;\ TMP=$$(mktemp -d) ;\ echo Make sure to remove $$TMP on error ;\ $(CURDIR)/scripts/get_continent_map.py $$TMP/continentmap.go ;\ diff -u continentmap.go $$TMP/continentmap.go ;\ rm -rf $$TMP get: $(GO) get $(PACKAGE) fmt: hook | $(PROTO_GO_FILES) $(GOFMT) -s -w *.go client proxy server vet: common $(GO) vet $(ALL_PACKAGES) test: vet common $(GO) test -v -timeout $(TIMEOUT) $(TESTARGS) $(ALL_PACKAGES) cover: vet common rm -f cover.out && \ $(GO) test -v -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \ sed -i "/_easyjson/d" cover.out && \ sed -i "/\.pb\.go/d" cover.out && \ $(GO) tool cover -func=cover.out coverhtml: vet common rm -f cover.out && \ $(GO) test -v -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \ sed -i "/_easyjson/d" cover.out && \ sed -i "/\.pb\.go/d" cover.out && \ $(GO) tool cover -html=cover.out -o coverage.html %_easyjson.go: %.go $(GOPATHBIN)/easyjson | $(PROTO_GO_FILES) rm -f easyjson-bootstrap*.go PATH="$(GODIR)":$(PATH) "$(GOPATHBIN)/easyjson" -all $*.go %.pb.go: %.proto $(GOPATHBIN)/protoc-gen-go $(GOPATHBIN)/protoc-gen-go-grpc PATH="$(GODIR)":"$(GOPATHBIN)":$(PATH) protoc \ --go_out=. --go_opt=paths=source_relative \ $*.proto %_grpc.pb.go: %.proto $(GOPATHBIN)/protoc-gen-go $(GOPATHBIN)/protoc-gen-go-grpc PATH="$(GODIR)":"$(GOPATHBIN)":$(PATH) protoc \ --go-grpc_out=. --go-grpc_opt=paths=source_relative \ $*.proto common: $(EASYJSON_GO_FILES) $(PROTO_GO_FILES) $(BINDIR): mkdir -p "$(BINDIR)" client: common $(BINDIR) $(GO) build $(BUILDARGS) -ldflags '$(INTERNALLDFLAGS)' -o "$(BINDIR)/client" ./client/... server: common $(BINDIR) $(GO) build $(BUILDARGS) -ldflags '$(INTERNALLDFLAGS)' -o "$(BINDIR)/signaling" ./server/... proxy: common $(BINDIR) $(GO) build $(BUILDARGS) -ldflags '$(INTERNALLDFLAGS)' -o "$(BINDIR)/proxy" ./proxy/... clean: rm -f $(EASYJSON_GO_FILES) rm -f easyjson-bootstrap*.go rm -f $(PROTO_GO_FILES) build: server proxy vendor: go.mod go.sum common set -e ;\ rm -rf $(VENDORDIR) $(GO) mod tidy; \ $(GO) mod vendor tarball: vendor git archive \ --prefix=nextcloud-spreed-signaling-$(TARVERSION)/ \ -o nextcloud-spreed-signaling-$(TARVERSION).tar \ HEAD tar rf nextcloud-spreed-signaling-$(TARVERSION).tar \ -C $(CURDIR) \ --mtime="$(shell git log -1 --date=iso8601-strict --format=%cd HEAD)" \ --transform "s//nextcloud-spreed-signaling-$(TARVERSION)\//" \ vendor gzip --force nextcloud-spreed-signaling-$(TARVERSION).tar dist: tarball .NOTPARALLEL: $(EASYJSON_GO_FILES) .PHONY: continentmap.go common vendor .SECONDARY: $(EASYJSON_GO_FILES) $(PROTO_GO_FILES) .DELETE_ON_ERROR: nextcloud-spreed-signaling-1.2.4/README.md000066400000000000000000000320041460321600400202260ustar00rootroot00000000000000# Spreed standalone signaling server ![Build Status](https://github.com/strukturag/nextcloud-spreed-signaling/workflows/test/badge.svg) [![Coverage Status](https://coveralls.io/repos/github/strukturag/nextcloud-spreed-signaling/badge.svg?branch=master)](https://coveralls.io/github/strukturag/nextcloud-spreed-signaling?branch=master) [![Documentation Status](https://readthedocs.org/projects/nextcloud-spreed-signaling/badge/?version=latest)](https://nextcloud-spreed-signaling.readthedocs.io/en/latest/?badge=latest) [![Go Report](https://goreportcard.com/badge/github.com/strukturag/nextcloud-spreed-signaling)](https://goreportcard.com/report/github.com/strukturag/nextcloud-spreed-signaling) This repository contains the standalone signaling server which can be used for Nextcloud Talk (https://apps.nextcloud.com/apps/spreed). See https://nextcloud-spreed-signaling.readthedocs.io/en/latest/ for further information on the API of the signaling server. ## Building The following tools are required for building the signaling server. - git - go >= 1.20 - make - protobuf-compiler >= 3 Usually the last two versions of Go are supported. This follows the release policy of Go: https://go.dev/doc/devel/release#policy All other dependencies are fetched automatically while building. $ make build or on FreeBSD $ gmake build Afterwards the binary is created as `bin/signaling`. ## Configuration A default configuration file is included as `server.conf.in`. Copy this to `server.conf` and adjust as necessary for the local setup. See the file for comments about the different parameters that can be changed. ## Running The signaling server connects to a NATS server (https://nats.io/) to distribute messages between different instances. See the NATS documentation on how to set up a server and run it. Once the NATS server is running (and the URL to it is configured for the signaling server), you can start the signaling server. $ ./bin/signaling By default, the configuration is loaded from `server.conf` in the current directory, but a different path can be passed through the `--config` option. $ ./bin/signaling --config /etc/signaling/server.conf ### Running as daemon #### systemd Create a dedicated group and user: ```bash sudo groupadd --system signaling sudo useradd --system \ --gid signaling \ --shell /usr/sbin/nologin \ --comment "Standalone signaling server for Nextcloud Talk." \ signaling ``` Copy `server.conf.in` to `/etc/signaling/server.conf` and fix permissions: ```bash sudo chmod 600 /etc/signaling/server.conf sudo chown signaling: /etc/signaling/server.conf ``` Copy `dist/init/systemd/signaling.service` to `/etc/systemd/system/signaling.service` (adjust abs. path in `ExecStart` to match your binary location!) Enable and start service: ```bash systemctl enable signaling.service systemctl start signaling.service ``` ### Running with Docker Official docker containers for the signaling server and -proxy are available on Docker Hub at https://hub.docker.com/r/strukturag/nextcloud-spreed-signaling See the `README.md` in the `docker` subfolder for details. #### Docker Compose You will likely have to adjust the Janus command line options depending on the exact network configuration on your server. Refer to [Setup of Janus](#setup-of-janus) and the Janus documentation for how to configure your Janus server. Copy `server.conf.in` to `server.conf` and adjust it to your liking. If you're using the [docker-compose.yml](docker/docker-compose.yml) configuration as is, the MCU Url must be set to `ws://localhost:8188`, the NATS Url must be set to `nats://localhost:4222`, and TURN Servers must be set to `turn:localhost:3478?transport=udp,turn:localhost:3478?transport=tcp`. ```bash docker-compose build docker-compose up -d ``` Please note that docker-compose v2 is required for building while most distributions will ship older versions. You can download a recent version from https://docs.docker.com/compose/install/ ## Setup of NATS server There is a detailed description on how to install and run the NATS server available at https://docs.nats.io/running-a-nats-service/introduction You can use the `gnatsd.conf` file as base for the configuration of the NATS server. ## Setup of Janus A Janus server (from https://github.com/meetecho/janus-gateway) can be used to act as a WebRTC gateway. See the documentation of Janus on how to configure and run the server. At least the `VideoRoom` plugin and the websocket transport of Janus must be enabled. The signaling server uses the `VideoRoom` plugin of Janus to manage sessions. All gateway details are hidden from the clients, all messages are sent through the signaling server. Only WebRTC media is exchanged directly between the gateway and the clients. Edit the `server.conf` and enter the URL to the websocket endpoint of Janus in the section `[mcu]` and key `url`. During startup, the signaling server will connect to Janus and log information of the gateway. The maximum bandwidth per publishing stream can also be configured in the section `[mcu]`, see properties `maxstreambitrate` and `maxscreenbitrate`. ### Use multiple Janus servers To scale the setup and add high availability, a signaling server can connect to one or multiple proxy servers that each provide access to a single Janus server. For that, set the `type` key in section `[mcu]` to `proxy` and set `url` to a space-separated list of URLs where a proxy server is running. Each signaling server that connects to a proxy needs a unique token id and a public / private RSA keypair. The token id must be configured as `token_id` in section `[mcu]`, the path to the private key file as `token_key`. ### Setup of proxy server The proxy server is built with the standard make command `make build` as `bin/proxy` binary. Copy the `proxy.conf.in` as `proxy.conf` and edit section `[tokens]` to the list of allowed token ids and filenames of the public keys for each token id. See the comments in `proxy.conf.in` for other configuration options. When the proxy process receives a `SIGHUP` signal, the list of allowed token ids / public keys is reloaded. A `SIGUSR1` signal can be used to shutdown a proxy process gracefully after all clients have been disconnected. No new publishers will be accepted in this case. ### Clustering The signaling server supports a clustering mode where multiple running servers can be interconnected to form a single "virtual" server. This can be used to increase the capacity of the signaling server or provide a failover setup. For that a central NATS server / cluster must be used by all instances. Each instance must run a GRPC server (enable `listening` in section `grpc` and optionally setup certificate, private key and CA). The list of other GRPC targets must be configured as `targets` in section `grpc` or can be retrieved from an etcd cluster. See `server.conf.in` in section `grpc` for configuration details. ## Setup of frontend webserver Usually the standalone signaling server is running behind a webserver that does the SSL protocol or acts as a load balancer for multiple signaling servers. The configuration examples below assume a pre-configured webserver (nginx or Apache) with a working HTTPS setup, that is listening on the external interface of the server hosting the standalone signaling server. After everything has been set up, the configuration can be tested using `curl`: $ curl -i https://myserver.domain.invalid/standalone-signaling/api/v1/welcome HTTP/1.1 200 OK Date: Thu, 05 Jul 2018 09:28:08 GMT Server: nextcloud-spreed-signaling/1.0.0 Content-Type: application/json; charset=utf-8 Content-Length: 59 {"nextcloud-spreed-signaling":"Welcome","version":"1.0.0"} ### nginx Nginx can be used as frontend for the standalone signaling server without any additional requirements. The backend should be configured separately so it can be changed in a single location and also to allow using multiple backends from a single frontend server. Assuming the standalone signaling server is running on the local interface on port `8080` below, add the following block to the nginx server definition in `/etc/nginx/sites-enabled` (just before the `server` definition): upstream signaling { server 127.0.0.1:8080; } To proxy all requests for the standalone signaling to the correct backend, the following `location` block must be added inside the `server` definition of the same file: location /standalone-signaling/ { proxy_pass http://signaling/; proxy_http_version 1.1; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } location /standalone-signaling/spreed { proxy_pass http://signaling/spreed; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "Upgrade"; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } Example (e.g. `/etc/nginx/sites-enabled/default`): upstream signaling { server 127.0.0.1:8080; } server { listen 443 ssl http2; server_name myserver.domain.invalid; # ... other existing configuration ... location /standalone-signaling/ { proxy_pass http://signaling/; proxy_http_version 1.1; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } location /standalone-signaling/spreed { proxy_pass http://signaling/spreed; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "Upgrade"; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } } ### Apache To configure the Apache webservice as frontend for the standalone signaling server, the modules `mod_proxy_http` and `mod_proxy_wstunnel` must be enabled so WebSocket and API backend requests can be proxied: $ sudo a2enmod proxy $ sudo a2enmod proxy_http $ sudo a2enmod proxy_wstunnel Now the Apache `VirtualHost` configuration can be extended to forward requests to the standalone signaling server (assuming the server is running on the local interface on port `8080` below): # ... existing configuration ... # Enable proxying Websocket requests to the standalone signaling server. ProxyPass "/standalone-signaling/" "ws://127.0.0.1:8080/" RewriteEngine On # Websocket connections from the clients. RewriteRule ^/standalone-signaling/spreed/$ - [L] # Backend connections from Nextcloud. RewriteRule ^/standalone-signaling/api/(.*) http://127.0.0.1:8080/api/$1 [L,P] # ... existing configuration ... ### Caddy #### v1 [Caddy (v1)](https://caddyserver.com/v1/) configuration: ``` myserver.domain.invalid { proxy /standalone-signaling/ http://127.0.0.1:8080 { without /standalone-signaling transparent websocket } } ``` #### v2 [Caddy (v2)](https://caddyserver.com/) configuration: ``` myserver.domain.invalid { route /standalone-signaling/* { uri strip_prefix /standalone-signaling reverse_proxy http://127.0.0.1:8080 } } ``` ## Setup of Nextcloud Talk Login to your Nextcloud as admin and open the additional settings page. Scroll down to the "Talk" section and enter the base URL of your standalone signaling server in the field "External signaling server". Please note that you have to use `https` if your Nextcloud is also running on `https`. Usually you should enter `https://myhostname/standalone-signaling` as URL. The value "Shared secret for external signaling server" must be the same as the property `secret` in section `backend` of your `server.conf`. If you are using a self-signed certificate for development, you need to uncheck the box `Validate SSL certificate` so backend requests from Nextcloud to the signaling server can be performed. ## Benchmarking the server A simple client exists to benchmark the server. Please note that the features that are benchmarked might not cover the whole functionality, check the implementation in `src/client` for details on the client. To authenticate new client connections to the signaling server, the client starts a dummy authentication handler on a local interface and passes the URL in the `hello` request. Therefore the signaling server should be configured to allow all backend hosts (option `allowall` in section `backend`). The client is not compiled by default, but can be using the `client` target: $ make client Usage: $ ./bin/client Usage of ./bin/client: -addr string http service address (default "localhost:28080") -config string config file to use (default "server.conf") -maxClients int number of client connections (default 100) nextcloud-spreed-signaling-1.2.4/allowed_ips.go000066400000000000000000000043041460321600400216020ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "fmt" "net" "strings" ) type AllowedIps struct { allowed []*net.IPNet } func (a *AllowedIps) Empty() bool { return len(a.allowed) == 0 } func (a *AllowedIps) Allowed(ip net.IP) bool { for _, i := range a.allowed { if i.Contains(ip) { return true } } return false } func parseIPNet(s string) (*net.IPNet, error) { var ipnet *net.IPNet if strings.ContainsRune(s, '/') { var err error if _, ipnet, err = net.ParseCIDR(s); err != nil { return nil, fmt.Errorf("invalid IP address/subnet %s: %w", s, err) } } else { ip := net.ParseIP(s) if ip == nil { return nil, fmt.Errorf("invalid IP address %s", s) } ipnet = &net.IPNet{ IP: ip, Mask: net.CIDRMask(len(ip)*8, len(ip)*8), } } return ipnet, nil } func ParseAllowedIps(allowed string) (*AllowedIps, error) { var allowedIps []*net.IPNet for _, ip := range strings.Split(allowed, ",") { ip = strings.TrimSpace(ip) if ip != "" { i, err := parseIPNet(ip) if err != nil { return nil, err } allowedIps = append(allowedIps, i) } } result := &AllowedIps{ allowed: allowedIps, } return result, nil } func DefaultAllowedIps() *AllowedIps { allowedIps := []*net.IPNet{ { IP: net.ParseIP("127.0.0.1"), Mask: net.CIDRMask(32, 32), }, } result := &AllowedIps{ allowed: allowedIps, } return result } nextcloud-spreed-signaling-1.2.4/allowed_ips_test.go000066400000000000000000000033201460321600400226360ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "net" "testing" ) func TestAllowedIps(t *testing.T) { a, err := ParseAllowedIps("127.0.0.1, 192.168.0.1, 192.168.1.1/24") if err != nil { t.Fatal(err) } if a.Empty() { t.Fatal("should not be empty") } allowed := []string{ "127.0.0.1", "192.168.0.1", "192.168.1.1", "192.168.1.100", } notAllowed := []string{ "192.168.0.2", "10.1.2.3", } for _, addr := range allowed { t.Run(addr, func(t *testing.T) { ip := net.ParseIP(addr) if ip == nil { t.Errorf("error parsing %s", addr) } else if !a.Allowed(ip) { t.Errorf("should allow %s", addr) } }) } for _, addr := range notAllowed { t.Run(addr, func(t *testing.T) { ip := net.ParseIP(addr) if ip == nil { t.Errorf("error parsing %s", addr) } else if a.Allowed(ip) { t.Errorf("should not allow %s", addr) } }) } } nextcloud-spreed-signaling-1.2.4/api_async.go000066400000000000000000000035341460321600400212520ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "encoding/json" "fmt" "time" ) type AsyncMessage struct { SendTime time.Time `json:"sendtime"` Type string `json:"type"` Message *ServerMessage `json:"message,omitempty"` Room *BackendServerRoomRequest `json:"room,omitempty"` Permissions []Permission `json:"permissions,omitempty"` AsyncRoom *AsyncRoomMessage `json:"asyncroom,omitempty"` SendOffer *SendOfferMessage `json:"sendoffer,omitempty"` Id string `json:"id"` } func (m *AsyncMessage) String() string { data, err := json.Marshal(m) if err != nil { return fmt.Sprintf("Could not serialize %#v: %s", m, err) } return string(data) } type AsyncRoomMessage struct { Type string `json:"type"` SessionId string `json:"sessionid,omitempty"` ClientType string `json:"clienttype,omitempty"` } type SendOfferMessage struct { MessageId string `json:"messageid,omitempty"` SessionId string `json:"sessionid"` Data *MessageClientMessageData `json:"data"` } nextcloud-spreed-signaling-1.2.4/api_backend.go000066400000000000000000000305371460321600400215270ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "crypto/hmac" "crypto/rand" "crypto/sha256" "crypto/subtle" "encoding/hex" "encoding/json" "fmt" "net/http" "net/url" "regexp" "strings" "time" ) const ( BackendVersion = "1.0" HeaderBackendSignalingRandom = "Spreed-Signaling-Random" HeaderBackendSignalingChecksum = "Spreed-Signaling-Checksum" HeaderBackendServer = "Spreed-Signaling-Backend" ConfigGroupSignaling = "signaling" ConfigKeyHelloV2TokenKey = "hello-v2-token-key" ConfigKeySessionPingLimit = "session-ping-limit" ) func newRandomString(length int) string { b := make([]byte, length/2) if _, err := rand.Read(b); err != nil { panic(err) } return hex.EncodeToString(b) } func CalculateBackendChecksum(random string, body []byte, secret []byte) string { mac := hmac.New(sha256.New, secret) mac.Write([]byte(random)) // nolint mac.Write(body) // nolint return hex.EncodeToString(mac.Sum(nil)) } func AddBackendChecksum(r *http.Request, body []byte, secret []byte) { // Add checksum so the backend can validate the request. rnd := newRandomString(64) checksum := CalculateBackendChecksum(rnd, body, secret) r.Header.Set(HeaderBackendSignalingRandom, rnd) r.Header.Set(HeaderBackendSignalingChecksum, checksum) } func ValidateBackendChecksum(r *http.Request, body []byte, secret []byte) bool { rnd := r.Header.Get(HeaderBackendSignalingRandom) checksum := r.Header.Get(HeaderBackendSignalingChecksum) return ValidateBackendChecksumValue(checksum, rnd, body, secret) } func ValidateBackendChecksumValue(checksum string, random string, body []byte, secret []byte) bool { verify := CalculateBackendChecksum(random, body, secret) return subtle.ConstantTimeCompare([]byte(verify), []byte(checksum)) == 1 } // Requests from Nextcloud to the signaling server. type BackendServerRoomRequest struct { room *Room Type string `json:"type"` Invite *BackendRoomInviteRequest `json:"invite,omitempty"` Disinvite *BackendRoomDisinviteRequest `json:"disinvite,omitempty"` Update *BackendRoomUpdateRequest `json:"update,omitempty"` Delete *BackendRoomDeleteRequest `json:"delete,omitempty"` InCall *BackendRoomInCallRequest `json:"incall,omitempty"` Participants *BackendRoomParticipantsRequest `json:"participants,omitempty"` Message *BackendRoomMessageRequest `json:"message,omitempty"` SwitchTo *BackendRoomSwitchToMessageRequest `json:"switchto,omitempty"` Dialout *BackendRoomDialoutRequest `json:"dialout,omitempty"` Transient *BackendRoomTransientRequest `json:"transient,omitempty"` // Internal properties ReceivedTime int64 `json:"received,omitempty"` } type BackendRoomInviteRequest struct { UserIds []string `json:"userids,omitempty"` // TODO(jojo): We should get rid of "AllUserIds" and find a better way to // notify existing users the room has changed and they need to update it. AllUserIds []string `json:"alluserids,omitempty"` Properties *json.RawMessage `json:"properties,omitempty"` } type BackendRoomDisinviteRequest struct { UserIds []string `json:"userids,omitempty"` SessionIds []string `json:"sessionids,omitempty"` // TODO(jojo): We should get rid of "AllUserIds" and find a better way to // notify existing users the room has changed and they need to update it. AllUserIds []string `json:"alluserids,omitempty"` Properties *json.RawMessage `json:"properties,omitempty"` } type BackendRoomUpdateRequest struct { UserIds []string `json:"userids,omitempty"` Properties *json.RawMessage `json:"properties,omitempty"` } type BackendRoomDeleteRequest struct { UserIds []string `json:"userids,omitempty"` } type BackendRoomInCallRequest struct { // TODO(jojo): Change "InCall" to "int" when #914 has landed in NC Talk. InCall json.RawMessage `json:"incall,omitempty"` All bool `json:"all,omitempty"` Changed []map[string]interface{} `json:"changed,omitempty"` Users []map[string]interface{} `json:"users,omitempty"` } type BackendRoomParticipantsRequest struct { Changed []map[string]interface{} `json:"changed,omitempty"` Users []map[string]interface{} `json:"users,omitempty"` } type BackendRoomMessageRequest struct { Data *json.RawMessage `json:"data,omitempty"` } type BackendRoomSwitchToSessionsList []string type BackendRoomSwitchToSessionsMap map[string]json.RawMessage type BackendRoomSwitchToMessageRequest struct { // Target room id RoomId string `json:"roomid"` // Sessions is either a BackendRoomSwitchToSessionsList or a // BackendRoomSwitchToSessionsMap. // In the map, the key is the session id, the value additional details // (or null) for the session. The details will be included in the request // to the connected client. Sessions *json.RawMessage `json:"sessions,omitempty"` // Internal properties SessionsList BackendRoomSwitchToSessionsList `json:"sessionslist,omitempty"` SessionsMap BackendRoomSwitchToSessionsMap `json:"sessionsmap,omitempty"` } type BackendRoomDialoutRequest struct { // E.164 number to dial (e.g. "+1234567890") Number string `json:"number"` Options json.RawMessage `json:"options,omitempty"` } var ( checkE164Number = regexp.MustCompile(`^\+\d{2,}$`) ) func isValidNumber(s string) bool { return checkE164Number.MatchString(s) } func (r *BackendRoomDialoutRequest) ValidateNumber() *Error { if r.Number == "" { return NewError("number_missing", "No number provided") } if !isValidNumber(r.Number) { return NewError("invalid_number", "Expected E.164 number.") } return nil } type TransientAction string const ( TransientActionSet TransientAction = "set" TransientActionDelete TransientAction = "delete" ) type BackendRoomTransientRequest struct { Action TransientAction `json:"action"` Key string `json:"key"` Value interface{} `json:"value,omitempty"` TTL time.Duration `json:"ttl,omitempty"` } type BackendServerRoomResponse struct { Type string `json:"type"` Dialout *BackendRoomDialoutResponse `json:"dialout,omitempty"` } type BackendRoomDialoutError struct { Code string `json:"code"` Message string `json:"message,omitempty"` } type BackendRoomDialoutResponse struct { CallId string `json:"callid,omitempty"` Error *Error `json:"error,omitempty"` } // Requests from the signaling server to the Nextcloud backend. type BackendClientAuthRequest struct { Version string `json:"version"` Params *json.RawMessage `json:"params"` } type BackendClientRequest struct { json.Marshaler json.Unmarshaler Type string `json:"type"` Auth *BackendClientAuthRequest `json:"auth,omitempty"` Room *BackendClientRoomRequest `json:"room,omitempty"` Ping *BackendClientPingRequest `json:"ping,omitempty"` Session *BackendClientSessionRequest `json:"session,omitempty"` } func NewBackendClientAuthRequest(params *json.RawMessage) *BackendClientRequest { return &BackendClientRequest{ Type: "auth", Auth: &BackendClientAuthRequest{ Version: BackendVersion, Params: params, }, } } type BackendClientResponse struct { json.Marshaler json.Unmarshaler Type string `json:"type"` Error *Error `json:"error,omitempty"` Auth *BackendClientAuthResponse `json:"auth,omitempty"` Room *BackendClientRoomResponse `json:"room,omitempty"` Ping *BackendClientRingResponse `json:"ping,omitempty"` Session *BackendClientSessionResponse `json:"session,omitempty"` } type BackendClientAuthResponse struct { Version string `json:"version"` UserId string `json:"userid"` User *json.RawMessage `json:"user"` } type BackendClientRoomRequest struct { Version string `json:"version"` RoomId string `json:"roomid"` Action string `json:"action,omitempty"` UserId string `json:"userid"` SessionId string `json:"sessionid"` // For Nextcloud Talk with SIP support. ActorId string `json:"actorid,omitempty"` ActorType string `json:"actortype,omitempty"` InCall int `json:"incall,omitempty"` } func NewBackendClientRoomRequest(roomid string, userid string, sessionid string) *BackendClientRequest { return &BackendClientRequest{ Type: "room", Room: &BackendClientRoomRequest{ Version: BackendVersion, RoomId: roomid, UserId: userid, SessionId: sessionid, }, } } type BackendClientRoomResponse struct { Version string `json:"version"` RoomId string `json:"roomid"` Properties *json.RawMessage `json:"properties"` // Optional information about the Nextcloud Talk session. Can be used for // example to define a "userid" for otherwise anonymous users. // See "RoomSessionData" for a possible content. Session *json.RawMessage `json:"session,omitempty"` Permissions *[]Permission `json:"permissions,omitempty"` } type RoomSessionData struct { UserId string `json:"userid,omitempty"` } type BackendPingEntry struct { UserId string `json:"userid,omitempty"` SessionId string `json:"sessionid"` } type BackendClientPingRequest struct { Version string `json:"version"` RoomId string `json:"roomid"` Entries []BackendPingEntry `json:"entries"` } func NewBackendClientPingRequest(roomid string, entries []BackendPingEntry) *BackendClientRequest { return &BackendClientRequest{ Type: "ping", Ping: &BackendClientPingRequest{ Version: BackendVersion, RoomId: roomid, Entries: entries, }, } } type BackendClientRingResponse struct { Version string `json:"version"` RoomId string `json:"roomid"` } type BackendClientSessionRequest struct { Version string `json:"version"` RoomId string `json:"roomid"` Action string `json:"action"` SessionId string `json:"sessionid"` UserId string `json:"userid,omitempty"` User *json.RawMessage `json:"user,omitempty"` } type BackendClientSessionResponse struct { Version string `json:"version"` RoomId string `json:"roomid"` } func NewBackendClientSessionRequest(roomid string, action string, sessionid string, msg *AddSessionInternalClientMessage) *BackendClientRequest { request := &BackendClientRequest{ Type: "session", Session: &BackendClientSessionRequest{ Version: BackendVersion, RoomId: roomid, Action: action, SessionId: sessionid, }, } if msg != nil { request.Session.UserId = msg.UserId request.Session.User = msg.User } return request } type OcsMeta struct { Status string `json:"status"` StatusCode int `json:"statuscode"` Message string `json:"message"` } type OcsBody struct { Meta OcsMeta `json:"meta"` Data *json.RawMessage `json:"data"` } type OcsResponse struct { json.Marshaler json.Unmarshaler Ocs *OcsBody `json:"ocs"` } // See https://tools.ietf.org/html/draft-uberti-behave-turn-rest-00 type TurnCredentials struct { Username string `json:"username"` Password string `json:"password"` TTL int64 `json:"ttl"` URIs []string `json:"uris"` } // Information on a backend in the etcd cluster. type BackendInformationEtcd struct { parsedUrl *url.URL Url string `json:"url"` Secret string `json:"secret"` MaxStreamBitrate int `json:"maxstreambitrate,omitempty"` MaxScreenBitrate int `json:"maxscreenbitrate,omitempty"` SessionLimit uint64 `json:"sessionlimit,omitempty"` } func (p *BackendInformationEtcd) CheckValid() error { if p.Url == "" { return fmt.Errorf("url missing") } if p.Secret == "" { return fmt.Errorf("secret missing") } parsedUrl, err := url.Parse(p.Url) if err != nil { return fmt.Errorf("invalid url: %w", err) } if strings.Contains(parsedUrl.Host, ":") && hasStandardPort(parsedUrl) { parsedUrl.Host = parsedUrl.Hostname() p.Url = parsedUrl.String() } p.parsedUrl = parsedUrl return nil } nextcloud-spreed-signaling-1.2.4/api_backend_test.go000066400000000000000000000045161460321600400225640ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "net/http" "testing" ) func TestBackendChecksum(t *testing.T) { rnd := newRandomString(32) body := []byte{1, 2, 3, 4, 5} secret := []byte("shared-secret") check1 := CalculateBackendChecksum(rnd, body, secret) check2 := CalculateBackendChecksum(rnd, body, secret) if check1 != check2 { t.Errorf("Expected equal checksums, got %s and %s", check1, check2) } if !ValidateBackendChecksumValue(check1, rnd, body, secret) { t.Errorf("Checksum %s could not be validated", check1) } if ValidateBackendChecksumValue(check1[1:], rnd, body, secret) { t.Errorf("Checksum %s should not be valid", check1[1:]) } if ValidateBackendChecksumValue(check1[:len(check1)-1], rnd, body, secret) { t.Errorf("Checksum %s should not be valid", check1[:len(check1)-1]) } request := &http.Request{ Header: make(http.Header), } request.Header.Set("Spreed-Signaling-Random", rnd) request.Header.Set("Spreed-Signaling-Checksum", check1) if !ValidateBackendChecksum(request, body, secret) { t.Errorf("Checksum %s could not be validated from request", check1) } } func TestValidNumbers(t *testing.T) { valid := []string{ "+12", "+12345", } invalid := []string{ "+1", "12345", " +12345", " +12345 ", "+123-45", } for _, number := range valid { if !isValidNumber(number) { t.Errorf("number %s should be valid", number) } } for _, number := range invalid { if isValidNumber(number) { t.Errorf("number %s should not be valid", number) } } } nextcloud-spreed-signaling-1.2.4/api_grpc.go000066400000000000000000000023501460321600400210630ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "fmt" ) // Information on a GRPC target in the etcd cluster. type GrpcTargetInformationEtcd struct { Address string `json:"address"` } func (p *GrpcTargetInformationEtcd) CheckValid() error { if l := len(p.Address); l == 0 { return fmt.Errorf("address missing") } else if p.Address[l-1] == '/' { p.Address = p.Address[:l-1] } return nil } nextcloud-spreed-signaling-1.2.4/api_proxy.go000066400000000000000000000147551460321600400213250ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2020 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "encoding/json" "fmt" "github.com/golang-jwt/jwt/v4" ) type ProxyClientMessage struct { json.Marshaler json.Unmarshaler // The unique request id (optional). Id string `json:"id,omitempty"` // The type of the request. Type string `json:"type"` // Filled for type "hello" Hello *HelloProxyClientMessage `json:"hello,omitempty"` Bye *ByeProxyClientMessage `json:"bye,omitempty"` Command *CommandProxyClientMessage `json:"command,omitempty"` Payload *PayloadProxyClientMessage `json:"payload,omitempty"` } func (m *ProxyClientMessage) CheckValid() error { switch m.Type { case "": return fmt.Errorf("type missing") case "hello": if m.Hello == nil { return fmt.Errorf("hello missing") } else if err := m.Hello.CheckValid(); err != nil { return err } case "bye": if m.Bye != nil { // Bye contents are optional if err := m.Bye.CheckValid(); err != nil { return err } } case "command": if m.Command == nil { return fmt.Errorf("command missing") } else if err := m.Command.CheckValid(); err != nil { return err } case "payload": if m.Payload == nil { return fmt.Errorf("payload missing") } else if err := m.Payload.CheckValid(); err != nil { return err } } return nil } func (m *ProxyClientMessage) NewErrorServerMessage(e *Error) *ProxyServerMessage { return &ProxyServerMessage{ Id: m.Id, Type: "error", Error: e, } } func (m *ProxyClientMessage) NewWrappedErrorServerMessage(e error) *ProxyServerMessage { return m.NewErrorServerMessage(NewError("internal_error", e.Error())) } // ProxyServerMessage is a message that is sent from the server to a client. type ProxyServerMessage struct { json.Marshaler json.Unmarshaler Id string `json:"id,omitempty"` Type string `json:"type"` Error *Error `json:"error,omitempty"` Hello *HelloProxyServerMessage `json:"hello,omitempty"` Bye *ByeProxyServerMessage `json:"bye,omitempty"` Command *CommandProxyServerMessage `json:"command,omitempty"` Payload *PayloadProxyServerMessage `json:"payload,omitempty"` Event *EventProxyServerMessage `json:"event,omitempty"` } func (r *ProxyServerMessage) CloseAfterSend(session Session) bool { switch r.Type { case "bye": return true default: return false } } // Type "hello" type TokenClaims struct { jwt.RegisteredClaims } type HelloProxyClientMessage struct { Version string `json:"version"` ResumeId string `json:"resumeid"` Features []string `json:"features,omitempty"` // The authentication credentials. Token string `json:"token"` } func (m *HelloProxyClientMessage) CheckValid() error { if m.Version != HelloVersionV1 { return fmt.Errorf("unsupported hello version: %s", m.Version) } if m.ResumeId == "" { if m.Token == "" { return fmt.Errorf("token missing") } } return nil } type HelloProxyServerMessage struct { Version string `json:"version"` SessionId string `json:"sessionid"` Server *WelcomeServerMessage `json:"server,omitempty"` } // Type "bye" type ByeProxyClientMessage struct { } func (m *ByeProxyClientMessage) CheckValid() error { // No additional validation required. return nil } type ByeProxyServerMessage struct { Reason string `json:"reason"` } // Type "command" type CommandProxyClientMessage struct { Type string `json:"type"` Sid string `json:"sid,omitempty"` StreamType StreamType `json:"streamType,omitempty"` PublisherId string `json:"publisherId,omitempty"` ClientId string `json:"clientId,omitempty"` Bitrate int `json:"bitrate,omitempty"` MediaTypes MediaType `json:"mediatypes,omitempty"` } func (m *CommandProxyClientMessage) CheckValid() error { switch m.Type { case "": return fmt.Errorf("type missing") case "create-publisher": if m.StreamType == "" { return fmt.Errorf("stream type missing") } case "create-subscriber": if m.PublisherId == "" { return fmt.Errorf("publisher id missing") } if m.StreamType == "" { return fmt.Errorf("stream type missing") } case "delete-publisher": fallthrough case "delete-subscriber": if m.ClientId == "" { return fmt.Errorf("client id missing") } } return nil } type CommandProxyServerMessage struct { Id string `json:"id,omitempty"` Sid string `json:"sid,omitempty"` Bitrate int `json:"bitrate,omitempty"` } // Type "payload" type PayloadProxyClientMessage struct { Type string `json:"type"` ClientId string `json:"clientId"` Sid string `json:"sid,omitempty"` Payload map[string]interface{} `json:"payload,omitempty"` } func (m *PayloadProxyClientMessage) CheckValid() error { switch m.Type { case "": return fmt.Errorf("type missing") case "offer": fallthrough case "answer": fallthrough case "candidate": if len(m.Payload) == 0 { return fmt.Errorf("payload missing") } case "endOfCandidates": fallthrough case "requestoffer": // No payload required. } if m.ClientId == "" { return fmt.Errorf("client id missing") } return nil } type PayloadProxyServerMessage struct { Type string `json:"type"` ClientId string `json:"clientId"` Payload map[string]interface{} `json:"payload"` } // Type "event" type EventProxyServerMessage struct { Type string `json:"type"` ClientId string `json:"clientId,omitempty"` Load int64 `json:"load,omitempty"` Sid string `json:"sid,omitempty"` } // Information on a proxy in the etcd cluster. type ProxyInformationEtcd struct { Address string `json:"address"` } func (p *ProxyInformationEtcd) CheckValid() error { if p.Address == "" { return fmt.Errorf("address missing") } if p.Address[len(p.Address)-1] != '/' { p.Address += "/" } return nil } nextcloud-spreed-signaling-1.2.4/api_signaling.go000066400000000000000000000571741460321600400221210ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "encoding/json" "errors" "fmt" "log" "net/url" "sort" "strings" "time" "github.com/golang-jwt/jwt/v4" ) const ( // Version 1.0 validates auth params against the Nextcloud instance. HelloVersionV1 = "1.0" // Version 2.0 validates auth params encoded as JWT. HelloVersionV2 = "2.0" ) // ClientMessage is a message that is sent from a client to the server. type ClientMessage struct { json.Marshaler json.Unmarshaler // The unique request id (optional). Id string `json:"id,omitempty"` // The type of the request. Type string `json:"type"` // Filled for type "hello" Hello *HelloClientMessage `json:"hello,omitempty"` Bye *ByeClientMessage `json:"bye,omitempty"` Room *RoomClientMessage `json:"room,omitempty"` Message *MessageClientMessage `json:"message,omitempty"` Control *ControlClientMessage `json:"control,omitempty"` Internal *InternalClientMessage `json:"internal,omitempty"` TransientData *TransientDataClientMessage `json:"transient,omitempty"` } func (m *ClientMessage) CheckValid() error { switch m.Type { case "": return fmt.Errorf("type missing") case "hello": if m.Hello == nil { return fmt.Errorf("hello missing") } else if err := m.Hello.CheckValid(); err != nil { return err } case "bye": // No additional check required. case "room": if m.Room == nil { return fmt.Errorf("room missing") } else if err := m.Room.CheckValid(); err != nil { return err } case "message": if m.Message == nil { return fmt.Errorf("message missing") } else if err := m.Message.CheckValid(); err != nil { return err } case "control": if m.Control == nil { return fmt.Errorf("control missing") } else if err := m.Control.CheckValid(); err != nil { return err } case "internal": if m.Internal == nil { return fmt.Errorf("internal missing") } else if err := m.Internal.CheckValid(); err != nil { return err } case "transient": if m.TransientData == nil { return fmt.Errorf("transient missing") } else if err := m.TransientData.CheckValid(); err != nil { return err } } return nil } func (m *ClientMessage) String() string { data, err := json.Marshal(m) if err != nil { return fmt.Sprintf("Could not serialize %#v: %s", m, err) } return string(data) } func (m *ClientMessage) NewErrorServerMessage(e *Error) *ServerMessage { return &ServerMessage{ Id: m.Id, Type: "error", Error: e, } } func (m *ClientMessage) NewWrappedErrorServerMessage(e error) *ServerMessage { if e, ok := e.(*Error); ok { return m.NewErrorServerMessage(e) } return m.NewErrorServerMessage(NewError("internal_error", e.Error())) } // ServerMessage is a message that is sent from the server to a client. type ServerMessage struct { json.Marshaler json.Unmarshaler Id string `json:"id,omitempty"` Type string `json:"type"` Error *Error `json:"error,omitempty"` Welcome *WelcomeServerMessage `json:"welcome,omitempty"` Hello *HelloServerMessage `json:"hello,omitempty"` Bye *ByeServerMessage `json:"bye,omitempty"` Room *RoomServerMessage `json:"room,omitempty"` Message *MessageServerMessage `json:"message,omitempty"` Control *ControlServerMessage `json:"control,omitempty"` Event *EventServerMessage `json:"event,omitempty"` TransientData *TransientDataServerMessage `json:"transient,omitempty"` Internal *InternalServerMessage `json:"internal,omitempty"` Dialout *DialoutInternalClientMessage `json:"dialout,omitempty"` } func (r *ServerMessage) CloseAfterSend(session Session) bool { if r.Type == "bye" { return true } if r.Type == "event" { if evt := r.Event; evt != nil && evt.Target == "roomlist" && evt.Type == "disinvite" { // Only close session / connection if the disinvite was for the room // the session is currently in. if session != nil && evt.Disinvite != nil { if room := session.GetRoom(); room != nil && evt.Disinvite.RoomId == room.Id() { return true } } } } return false } func (r *ServerMessage) IsChatRefresh() bool { if r.Type != "message" || r.Message == nil || r.Message.Data == nil || len(*r.Message.Data) == 0 { return false } var data MessageServerMessageData if err := json.Unmarshal(*r.Message.Data, &data); err != nil { return false } if data.Type != "chat" || data.Chat == nil { return false } return data.Chat.Refresh } func (r *ServerMessage) IsParticipantsUpdate() bool { if r.Type != "event" || r.Event == nil { return false } if event := r.Event; event.Target != "participants" || event.Type != "update" { return false } return true } func (r *ServerMessage) String() string { data, err := json.Marshal(r) if err != nil { return fmt.Sprintf("Could not serialize %#v: %s", r, err) } return string(data) } type Error struct { Code string `json:"code"` Message string `json:"message"` Details json.RawMessage `json:"details,omitempty"` } func NewError(code string, message string) *Error { return NewErrorDetail(code, message, nil) } func NewErrorDetail(code string, message string, details interface{}) *Error { var rawDetails json.RawMessage if details != nil { var err error if rawDetails, err = json.Marshal(details); err != nil { log.Printf("Could not marshal details %+v for error %s with %s: %s", details, code, message, err) return NewError("internal_error", "Could not marshal error details") } } return &Error{ Code: code, Message: message, Details: rawDetails, } } func (e *Error) Error() string { return e.Message } type WelcomeServerMessage struct { Version string `json:"version"` Features []string `json:"features,omitempty"` Country string `json:"country,omitempty"` } func NewWelcomeServerMessage(version string, feature ...string) *WelcomeServerMessage { message := &WelcomeServerMessage{ Version: version, Features: feature, } if len(feature) > 0 { sort.Strings(message.Features) } return message } func (m *WelcomeServerMessage) AddFeature(feature ...string) { newFeatures := make([]string, len(m.Features)) copy(newFeatures, m.Features) for _, feat := range feature { found := false for _, f := range newFeatures { if f == feat { found = true break } } if !found { newFeatures = append(newFeatures, feat) } } sort.Strings(newFeatures) m.Features = newFeatures } func (m *WelcomeServerMessage) RemoveFeature(feature ...string) { newFeatures := make([]string, len(m.Features)) copy(newFeatures, m.Features) for _, feat := range feature { idx := sort.SearchStrings(newFeatures, feat) if idx < len(newFeatures) && newFeatures[idx] == feat { newFeatures = append(newFeatures[:idx], newFeatures[idx+1:]...) } } m.Features = newFeatures } const ( HelloClientTypeClient = "client" HelloClientTypeInternal = "internal" HelloClientTypeVirtual = "virtual" ) func hasStandardPort(u *url.URL) bool { switch u.Scheme { case "http": return u.Port() == "80" case "https": return u.Port() == "443" default: return false } } type ClientTypeInternalAuthParams struct { Random string `json:"random"` Token string `json:"token"` Backend string `json:"backend"` parsedBackend *url.URL } func (p *ClientTypeInternalAuthParams) CheckValid() error { if p.Backend == "" { return fmt.Errorf("backend missing") } else if u, err := url.Parse(p.Backend); err != nil { return err } else { if strings.Contains(u.Host, ":") && hasStandardPort(u) { u.Host = u.Hostname() } p.parsedBackend = u } return nil } type HelloV2AuthParams struct { Token string `json:"token"` } func (p *HelloV2AuthParams) CheckValid() error { if p.Token == "" { return fmt.Errorf("token missing") } return nil } type HelloV2TokenClaims struct { jwt.RegisteredClaims UserData *json.RawMessage `json:"userdata,omitempty"` } type HelloClientMessageAuth struct { // The client type that is connecting. Leave empty to use the default // "HelloClientTypeClient" Type string `json:"type,omitempty"` Params *json.RawMessage `json:"params"` Url string `json:"url"` parsedUrl *url.URL internalParams ClientTypeInternalAuthParams helloV2Params HelloV2AuthParams } // Type "hello" type HelloClientMessage struct { Version string `json:"version"` ResumeId string `json:"resumeid"` Features []string `json:"features,omitempty"` // The authentication credentials. Auth HelloClientMessageAuth `json:"auth"` } func (m *HelloClientMessage) CheckValid() error { if m.Version != HelloVersionV1 && m.Version != HelloVersionV2 { return InvalidHelloVersion } if m.ResumeId == "" { if m.Auth.Params == nil || len(*m.Auth.Params) == 0 { return fmt.Errorf("params missing") } if m.Auth.Type == "" { m.Auth.Type = HelloClientTypeClient } switch m.Auth.Type { case HelloClientTypeClient: if m.Auth.Url == "" { return fmt.Errorf("url missing") } else if u, err := url.ParseRequestURI(m.Auth.Url); err != nil { return err } else { if strings.Contains(u.Host, ":") && hasStandardPort(u) { u.Host = u.Hostname() } m.Auth.parsedUrl = u } switch m.Version { case HelloVersionV1: // No additional validation necessary. case HelloVersionV2: if err := json.Unmarshal(*m.Auth.Params, &m.Auth.helloV2Params); err != nil { return err } else if err := m.Auth.helloV2Params.CheckValid(); err != nil { return err } } case HelloClientTypeInternal: if err := json.Unmarshal(*m.Auth.Params, &m.Auth.internalParams); err != nil { return err } else if err := m.Auth.internalParams.CheckValid(); err != nil { return err } default: return fmt.Errorf("unsupported auth type") } } return nil } const ( // Features to send to all clients. ServerFeatureMcu = "mcu" ServerFeatureSimulcast = "simulcast" ServerFeatureUpdateSdp = "update-sdp" ServerFeatureAudioVideoPermissions = "audio-video-permissions" ServerFeatureTransientData = "transient-data" ServerFeatureInCallAll = "incall-all" ServerFeatureWelcome = "welcome" ServerFeatureHelloV2 = "hello-v2" ServerFeatureSwitchTo = "switchto" ServerFeatureDialout = "dialout" // Features to send to internal clients only. ServerFeatureInternalVirtualSessions = "virtual-sessions" // Possible client features from the "hello" request. ClientFeatureInternalInCall = "internal-incall" ClientFeatureStartDialout = "start-dialout" ) var ( DefaultFeatures = []string{ ServerFeatureAudioVideoPermissions, ServerFeatureTransientData, ServerFeatureInCallAll, ServerFeatureWelcome, ServerFeatureHelloV2, ServerFeatureSwitchTo, ServerFeatureDialout, } DefaultFeaturesInternal = []string{ ServerFeatureInternalVirtualSessions, ServerFeatureTransientData, ServerFeatureInCallAll, ServerFeatureWelcome, ServerFeatureHelloV2, ServerFeatureSwitchTo, ServerFeatureDialout, } DefaultWelcomeFeatures = []string{ ServerFeatureAudioVideoPermissions, ServerFeatureInternalVirtualSessions, ServerFeatureTransientData, ServerFeatureInCallAll, ServerFeatureWelcome, ServerFeatureHelloV2, ServerFeatureSwitchTo, ServerFeatureDialout, } ) type HelloServerMessage struct { Version string `json:"version"` SessionId string `json:"sessionid"` ResumeId string `json:"resumeid"` UserId string `json:"userid"` // TODO: Remove once all clients have switched to the "welcome" message. Server *WelcomeServerMessage `json:"server,omitempty"` } // Type "bye" type ByeClientMessage struct { } func (m *ByeClientMessage) CheckValid() error { // No additional validation required. return nil } type ByeServerMessage struct { Reason string `json:"reason"` } // Type "room" type RoomClientMessage struct { RoomId string `json:"roomid"` SessionId string `json:"sessionid,omitempty"` } func (m *RoomClientMessage) CheckValid() error { // No additional validation required. return nil } type RoomServerMessage struct { RoomId string `json:"roomid"` Properties *json.RawMessage `json:"properties,omitempty"` } type RoomErrorDetails struct { Room *RoomServerMessage `json:"room"` } // Type "message" const ( RecipientTypeSession = "session" RecipientTypeUser = "user" RecipientTypeRoom = "room" ) type MessageClientMessageRecipient struct { Type string `json:"type"` SessionId string `json:"sessionid,omitempty"` UserId string `json:"userid,omitempty"` } type MessageClientMessage struct { Recipient MessageClientMessageRecipient `json:"recipient"` Data *json.RawMessage `json:"data"` } type MessageClientMessageData struct { Type string `json:"type"` Sid string `json:"sid"` RoomType string `json:"roomType"` Bitrate int `json:"bitrate,omitempty"` Payload map[string]interface{} `json:"payload"` } func (m *MessageClientMessageData) CheckValid() error { if !IsValidStreamType(m.RoomType) { return fmt.Errorf("invalid room type: %s", m.RoomType) } return nil } func (m *MessageClientMessage) CheckValid() error { if m.Data == nil || len(*m.Data) == 0 { return fmt.Errorf("message empty") } switch m.Recipient.Type { case RecipientTypeRoom: // No additional checks required. case RecipientTypeSession: if m.Recipient.SessionId == "" { return fmt.Errorf("session id missing") } case RecipientTypeUser: if m.Recipient.UserId == "" { return fmt.Errorf("user id missing") } default: return fmt.Errorf("unsupported recipient type %v", m.Recipient.Type) } return nil } type MessageServerMessageSender struct { Type string `json:"type"` SessionId string `json:"sessionid,omitempty"` UserId string `json:"userid,omitempty"` } type MessageServerMessageDataChat struct { Refresh bool `json:"refresh"` } type MessageServerMessageData struct { Type string `json:"type"` Chat *MessageServerMessageDataChat `json:"chat,omitempty"` } type MessageServerMessage struct { Sender *MessageServerMessageSender `json:"sender"` Recipient *MessageClientMessageRecipient `json:"recipient,omitempty"` Data *json.RawMessage `json:"data"` } // Type "control" type ControlClientMessage struct { MessageClientMessage } func (m *ControlClientMessage) CheckValid() error { return m.MessageClientMessage.CheckValid() } type ControlServerMessage struct { Sender *MessageServerMessageSender `json:"sender"` Recipient *MessageClientMessageRecipient `json:"recipient,omitempty"` Data *json.RawMessage `json:"data"` } // Type "internal" type CommonSessionInternalClientMessage struct { SessionId string `json:"sessionid"` RoomId string `json:"roomid"` } func (m *CommonSessionInternalClientMessage) CheckValid() error { if m.SessionId == "" { return fmt.Errorf("sessionid missing") } if m.RoomId == "" { return fmt.Errorf("roomid missing") } return nil } type AddSessionOptions struct { ActorId string `json:"actorId,omitempty"` ActorType string `json:"actorType,omitempty"` } type AddSessionInternalClientMessage struct { CommonSessionInternalClientMessage UserId string `json:"userid,omitempty"` User *json.RawMessage `json:"user,omitempty"` Flags uint32 `json:"flags,omitempty"` InCall *int `json:"incall,omitempty"` Options *AddSessionOptions `json:"options,omitempty"` } func (m *AddSessionInternalClientMessage) CheckValid() error { return m.CommonSessionInternalClientMessage.CheckValid() } type UpdateSessionInternalClientMessage struct { CommonSessionInternalClientMessage Flags *uint32 `json:"flags,omitempty"` InCall *int `json:"incall,omitempty"` } func (m *UpdateSessionInternalClientMessage) CheckValid() error { return m.CommonSessionInternalClientMessage.CheckValid() } type RemoveSessionInternalClientMessage struct { CommonSessionInternalClientMessage UserId string `json:"userid,omitempty"` } func (m *RemoveSessionInternalClientMessage) CheckValid() error { return m.CommonSessionInternalClientMessage.CheckValid() } type InCallInternalClientMessage struct { InCall int `json:"incall"` } func (m *InCallInternalClientMessage) CheckValid() error { return nil } type DialoutStatus string var ( DialoutStatusAccepted DialoutStatus = "accepted" DialoutStatusRinging DialoutStatus = "ringing" DialoutStatusConnected DialoutStatus = "connected" DialoutStatusRejected DialoutStatus = "rejected" DialoutStatusCleared DialoutStatus = "cleared" ) type DialoutStatusInternalClientMessage struct { CallId string `json:"callid"` Status DialoutStatus `json:"status"` // Cause is set if Status is "cleared" or "rejected". Cause string `json:"cause,omitempty"` Code int `json:"code,omitempty"` Message string `json:"message,omitempty"` } type DialoutInternalClientMessage struct { Type string `json:"type"` RoomId string `json:"roomid,omitempty"` Error *Error `json:"error,omitempty"` Status *DialoutStatusInternalClientMessage `json:"status,omitempty"` } func (m *DialoutInternalClientMessage) CheckValid() error { switch m.Type { case "": return errors.New("type missing") case "error": if m.Error == nil { return errors.New("error missing") } case "status": if m.Status == nil { return errors.New("status missing") } } return nil } type InternalClientMessage struct { Type string `json:"type"` AddSession *AddSessionInternalClientMessage `json:"addsession,omitempty"` UpdateSession *UpdateSessionInternalClientMessage `json:"updatesession,omitempty"` RemoveSession *RemoveSessionInternalClientMessage `json:"removesession,omitempty"` InCall *InCallInternalClientMessage `json:"incall,omitempty"` Dialout *DialoutInternalClientMessage `json:"dialout,omitempty"` } func (m *InternalClientMessage) CheckValid() error { switch m.Type { case "": return errors.New("type missing") case "addsession": if m.AddSession == nil { return fmt.Errorf("addsession missing") } else if err := m.AddSession.CheckValid(); err != nil { return err } case "updatesession": if m.UpdateSession == nil { return fmt.Errorf("updatesession missing") } else if err := m.UpdateSession.CheckValid(); err != nil { return err } case "removesession": if m.RemoveSession == nil { return fmt.Errorf("removesession missing") } else if err := m.RemoveSession.CheckValid(); err != nil { return err } case "incall": if m.InCall == nil { return fmt.Errorf("incall missing") } else if err := m.InCall.CheckValid(); err != nil { return err } case "dialout": if m.Dialout == nil { return fmt.Errorf("dialout missing") } else if err := m.Dialout.CheckValid(); err != nil { return err } } return nil } type InternalServerDialoutRequest struct { RoomId string `json:"roomid"` Backend string `json:"backend"` Request *BackendRoomDialoutRequest `json:"request"` } type InternalServerMessage struct { Type string `json:"type"` Dialout *InternalServerDialoutRequest `json:"dialout,omitempty"` } // Type "event" type RoomEventServerMessage struct { RoomId string `json:"roomid"` Properties *json.RawMessage `json:"properties,omitempty"` // TODO(jojo): Change "InCall" to "int" when #914 has landed in NC Talk. InCall *json.RawMessage `json:"incall,omitempty"` Changed []map[string]interface{} `json:"changed,omitempty"` Users []map[string]interface{} `json:"users,omitempty"` All bool `json:"all,omitempty"` } func (m *RoomEventServerMessage) String() string { data, err := json.Marshal(m) if err != nil { return fmt.Sprintf("Could not serialize %#v: %s", m, err) } return string(data) } const ( DisinviteReasonDisinvited = "disinvited" DisinviteReasonDeleted = "deleted" ) type RoomDisinviteEventServerMessage struct { RoomEventServerMessage Reason string `json:"reason"` } type RoomEventMessage struct { RoomId string `json:"roomid"` Data *json.RawMessage `json:"data,omitempty"` } type RoomFlagsServerMessage struct { RoomId string `json:"roomid"` SessionId string `json:"sessionid"` Flags uint32 `json:"flags"` } type ChatComment map[string]interface{} type RoomEventMessageDataChat struct { Comment *ChatComment `json:"comment,omitempty"` } type RoomEventMessageData struct { Type string `json:"type"` Chat *RoomEventMessageDataChat `json:"chat,omitempty"` } type EventServerMessage struct { Target string `json:"target"` Type string `json:"type"` // Used for target "room" Join []*EventServerMessageSessionEntry `json:"join,omitempty"` Leave []string `json:"leave,omitempty"` Change []*EventServerMessageSessionEntry `json:"change,omitempty"` SwitchTo *EventServerMessageSwitchTo `json:"switchto,omitempty"` // Used for target "roomlist" / "participants" Invite *RoomEventServerMessage `json:"invite,omitempty"` Disinvite *RoomDisinviteEventServerMessage `json:"disinvite,omitempty"` Update *RoomEventServerMessage `json:"update,omitempty"` Flags *RoomFlagsServerMessage `json:"flags,omitempty"` // Used for target "message" Message *RoomEventMessage `json:"message,omitempty"` } func (m *EventServerMessage) String() string { data, err := json.Marshal(m) if err != nil { return fmt.Sprintf("Could not serialize %#v: %s", m, err) } return string(data) } type EventServerMessageSessionEntry struct { SessionId string `json:"sessionid"` UserId string `json:"userid"` User *json.RawMessage `json:"user,omitempty"` RoomSessionId string `json:"roomsessionid,omitempty"` } func (e *EventServerMessageSessionEntry) Clone() *EventServerMessageSessionEntry { return &EventServerMessageSessionEntry{ SessionId: e.SessionId, UserId: e.UserId, User: e.User, RoomSessionId: e.RoomSessionId, } } type EventServerMessageSwitchTo struct { RoomId string `json:"roomid"` Details json.RawMessage `json:"details,omitempty"` } // MCU-related types type AnswerOfferMessage struct { To string `json:"to"` From string `json:"from"` Type string `json:"type"` RoomType string `json:"roomType"` Payload map[string]interface{} `json:"payload"` Sid string `json:"sid,omitempty"` } // Type "transient" type TransientDataClientMessage struct { Type string `json:"type"` Key string `json:"key,omitempty"` Value *json.RawMessage `json:"value,omitempty"` TTL time.Duration `json:"ttl,omitempty"` } func (m *TransientDataClientMessage) CheckValid() error { switch m.Type { case "set": if m.Key == "" { return fmt.Errorf("key missing") } // A "nil" value is allowed and will remove the key. case "remove": if m.Key == "" { return fmt.Errorf("key missing") } } return nil } type TransientDataServerMessage struct { Type string `json:"type"` Key string `json:"key,omitempty"` OldValue interface{} `json:"oldvalue,omitempty"` Value interface{} `json:"value,omitempty"` Data map[string]interface{} `json:"data,omitempty"` } nextcloud-spreed-signaling-1.2.4/api_signaling_test.go000066400000000000000000000264521460321600400231530ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "encoding/json" "fmt" "reflect" "sort" "testing" ) type testCheckValid interface { CheckValid() error } func wrapMessage(messageType string, msg testCheckValid) *ClientMessage { wrapped := &ClientMessage{ Type: messageType, } switch messageType { case "hello": wrapped.Hello = msg.(*HelloClientMessage) case "message": wrapped.Message = msg.(*MessageClientMessage) case "bye": wrapped.Bye = msg.(*ByeClientMessage) case "room": wrapped.Room = msg.(*RoomClientMessage) default: return nil } return wrapped } func testMessages(t *testing.T, messageType string, valid_messages []testCheckValid, invalid_messages []testCheckValid) { for _, msg := range valid_messages { if err := msg.CheckValid(); err != nil { t.Errorf("Message %+v should be valid, got %s", msg, err) } // If the inner message is valid, it should also be valid in a wrapped // ClientMessage. if wrapped := wrapMessage(messageType, msg); wrapped == nil { t.Errorf("Unknown message type: %s", messageType) } else if err := wrapped.CheckValid(); err != nil { t.Errorf("Message %+v should be valid, got %s", wrapped, err) } } for _, msg := range invalid_messages { if err := msg.CheckValid(); err == nil { t.Errorf("Message %+v should not be valid", msg) } // If the inner message is invalid, it should also be invalid in a // wrapped ClientMessage. if wrapped := wrapMessage(messageType, msg); wrapped == nil { t.Errorf("Unknown message type: %s", messageType) } else if err := wrapped.CheckValid(); err == nil { t.Errorf("Message %+v should not be valid", wrapped) } } } func TestClientMessage(t *testing.T) { // The message needs a type. msg := ClientMessage{} if err := msg.CheckValid(); err == nil { t.Errorf("Message %+v should not be valid", msg) } } func TestHelloClientMessage(t *testing.T) { internalAuthParams := []byte("{\"backend\":\"https://domain.invalid\"}") tokenAuthParams := []byte("{\"token\":\"invalid-token\"}") valid_messages := []testCheckValid{ // Hello version 1 &HelloClientMessage{ Version: HelloVersionV1, Auth: HelloClientMessageAuth{ Params: &json.RawMessage{'{', '}'}, Url: "https://domain.invalid", }, }, &HelloClientMessage{ Version: HelloVersionV1, Auth: HelloClientMessageAuth{ Type: "client", Params: &json.RawMessage{'{', '}'}, Url: "https://domain.invalid", }, }, &HelloClientMessage{ Version: HelloVersionV1, Auth: HelloClientMessageAuth{ Type: "internal", Params: (*json.RawMessage)(&internalAuthParams), }, }, &HelloClientMessage{ Version: HelloVersionV1, ResumeId: "the-resume-id", }, // Hello version 2 &HelloClientMessage{ Version: HelloVersionV2, Auth: HelloClientMessageAuth{ Params: (*json.RawMessage)(&tokenAuthParams), Url: "https://domain.invalid", }, }, &HelloClientMessage{ Version: HelloVersionV2, Auth: HelloClientMessageAuth{ Type: "client", Params: (*json.RawMessage)(&tokenAuthParams), Url: "https://domain.invalid", }, }, &HelloClientMessage{ Version: HelloVersionV2, ResumeId: "the-resume-id", }, } invalid_messages := []testCheckValid{ // Hello version 1 &HelloClientMessage{}, &HelloClientMessage{Version: "0.0"}, &HelloClientMessage{Version: HelloVersionV1}, &HelloClientMessage{ Version: HelloVersionV1, Auth: HelloClientMessageAuth{ Params: &json.RawMessage{'{', '}'}, Type: "invalid-type", }, }, &HelloClientMessage{ Version: HelloVersionV1, Auth: HelloClientMessageAuth{ Url: "https://domain.invalid", }, }, &HelloClientMessage{ Version: HelloVersionV1, Auth: HelloClientMessageAuth{ Params: &json.RawMessage{'{', '}'}, }, }, &HelloClientMessage{ Version: HelloVersionV1, Auth: HelloClientMessageAuth{ Params: &json.RawMessage{'{', '}'}, Url: "invalid-url", }, }, &HelloClientMessage{ Version: HelloVersionV1, Auth: HelloClientMessageAuth{ Type: "internal", Params: &json.RawMessage{'{', '}'}, }, }, &HelloClientMessage{ Version: HelloVersionV1, Auth: HelloClientMessageAuth{ Type: "internal", Params: &json.RawMessage{'x', 'y', 'z'}, // Invalid JSON. }, }, // Hello version 2 &HelloClientMessage{ Version: HelloVersionV2, Auth: HelloClientMessageAuth{ Url: "https://domain.invalid", }, }, &HelloClientMessage{ Version: HelloVersionV2, Auth: HelloClientMessageAuth{ Params: (*json.RawMessage)(&tokenAuthParams), }, }, &HelloClientMessage{ Version: HelloVersionV2, Auth: HelloClientMessageAuth{ Params: (*json.RawMessage)(&tokenAuthParams), Url: "invalid-url", }, }, &HelloClientMessage{ Version: HelloVersionV2, Auth: HelloClientMessageAuth{ Params: (*json.RawMessage)(&internalAuthParams), Url: "https://domain.invalid", }, }, &HelloClientMessage{ Version: HelloVersionV2, Auth: HelloClientMessageAuth{ Params: &json.RawMessage{'x', 'y', 'z'}, // Invalid JSON. Url: "https://domain.invalid", }, }, } testMessages(t, "hello", valid_messages, invalid_messages) // A "hello" message must be present msg := ClientMessage{ Type: "hello", } if err := msg.CheckValid(); err == nil { t.Errorf("Message %+v should not be valid", msg) } } func TestMessageClientMessage(t *testing.T) { valid_messages := []testCheckValid{ &MessageClientMessage{ Recipient: MessageClientMessageRecipient{ Type: "session", SessionId: "the-session-id", }, Data: &json.RawMessage{'{', '}'}, }, &MessageClientMessage{ Recipient: MessageClientMessageRecipient{ Type: "user", UserId: "the-user-id", }, Data: &json.RawMessage{'{', '}'}, }, &MessageClientMessage{ Recipient: MessageClientMessageRecipient{ Type: "room", }, Data: &json.RawMessage{'{', '}'}, }, } invalid_messages := []testCheckValid{ &MessageClientMessage{}, &MessageClientMessage{ Recipient: MessageClientMessageRecipient{ Type: "session", SessionId: "the-session-id", }, }, &MessageClientMessage{ Recipient: MessageClientMessageRecipient{ Type: "session", }, Data: &json.RawMessage{'{', '}'}, }, &MessageClientMessage{ Recipient: MessageClientMessageRecipient{ Type: "session", UserId: "the-user-id", }, Data: &json.RawMessage{'{', '}'}, }, &MessageClientMessage{ Recipient: MessageClientMessageRecipient{ Type: "user", }, Data: &json.RawMessage{'{', '}'}, }, &MessageClientMessage{ Recipient: MessageClientMessageRecipient{ Type: "user", UserId: "the-user-id", }, }, &MessageClientMessage{ Recipient: MessageClientMessageRecipient{ Type: "user", SessionId: "the-user-id", }, Data: &json.RawMessage{'{', '}'}, }, &MessageClientMessage{ Recipient: MessageClientMessageRecipient{ Type: "unknown-type", }, Data: &json.RawMessage{'{', '}'}, }, } testMessages(t, "message", valid_messages, invalid_messages) // A "message" message must be present msg := ClientMessage{ Type: "message", } if err := msg.CheckValid(); err == nil { t.Errorf("Message %+v should not be valid", msg) } } func TestByeClientMessage(t *testing.T) { // Any "bye" message is valid. valid_messages := []testCheckValid{ &ByeClientMessage{}, } invalid_messages := []testCheckValid{} testMessages(t, "bye", valid_messages, invalid_messages) // The "bye" message is optional. msg := ClientMessage{ Type: "bye", } if err := msg.CheckValid(); err != nil { t.Errorf("Message %+v should be valid, got %s", msg, err) } } func TestRoomClientMessage(t *testing.T) { // Any "room" message is valid. valid_messages := []testCheckValid{ &RoomClientMessage{}, } invalid_messages := []testCheckValid{} testMessages(t, "room", valid_messages, invalid_messages) // But a "room" message must be present msg := ClientMessage{ Type: "room", } if err := msg.CheckValid(); err == nil { t.Errorf("Message %+v should not be valid", msg) } } func TestErrorMessages(t *testing.T) { id := "request-id" msg := ClientMessage{ Id: id, } err1 := msg.NewErrorServerMessage(&Error{}) if err1.Id != id { t.Errorf("Expected id %s, got %+v", id, err1) } if err1.Type != "error" || err1.Error == nil { t.Errorf("Expected type \"error\", got %+v", err1) } err2 := msg.NewWrappedErrorServerMessage(fmt.Errorf("test-error")) if err2.Id != id { t.Errorf("Expected id %s, got %+v", id, err2) } if err2.Type != "error" || err2.Error == nil { t.Errorf("Expected type \"error\", got %+v", err2) } if err2.Error.Code != "internal_error" { t.Errorf("Expected code \"internal_error\", got %+v", err2) } if err2.Error.Message != "test-error" { t.Errorf("Expected message \"test-error\", got %+v", err2) } // Test "error" interface if err2.Error.Error() != "test-error" { t.Errorf("Expected error string \"test-error\", got %+v", err2) } } func TestIsChatRefresh(t *testing.T) { var msg ServerMessage data_true := []byte("{\"type\":\"chat\",\"chat\":{\"refresh\":true}}") msg = ServerMessage{ Type: "message", Message: &MessageServerMessage{ Data: (*json.RawMessage)(&data_true), }, } if !msg.IsChatRefresh() { t.Error("message should be detected as chat refresh") } data_false := []byte("{\"type\":\"chat\",\"chat\":{\"refresh\":false}}") msg = ServerMessage{ Type: "message", Message: &MessageServerMessage{ Data: (*json.RawMessage)(&data_false), }, } if msg.IsChatRefresh() { t.Error("message should not be detected as chat refresh") } } func assertEqualStrings(t *testing.T, expected, result []string) { t.Helper() if expected == nil { expected = make([]string, 0) } else { sort.Strings(expected) } if result == nil { result = make([]string, 0) } else { sort.Strings(result) } if !reflect.DeepEqual(expected, result) { t.Errorf("Expected %+v, got %+v", expected, result) } } func Test_Welcome_AddRemoveFeature(t *testing.T) { var msg WelcomeServerMessage assertEqualStrings(t, []string{}, msg.Features) msg.AddFeature("one", "two", "one") assertEqualStrings(t, []string{"one", "two"}, msg.Features) if !sort.StringsAreSorted(msg.Features) { t.Errorf("features should be sorted, got %+v", msg.Features) } msg.AddFeature("three") assertEqualStrings(t, []string{"one", "two", "three"}, msg.Features) if !sort.StringsAreSorted(msg.Features) { t.Errorf("features should be sorted, got %+v", msg.Features) } msg.RemoveFeature("three", "one") assertEqualStrings(t, []string{"two"}, msg.Features) } nextcloud-spreed-signaling-1.2.4/async_events.go000066400000000000000000000130361460321600400220030ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import "sync" type AsyncBackendRoomEventListener interface { ProcessBackendRoomRequest(message *AsyncMessage) } type AsyncRoomEventListener interface { ProcessAsyncRoomMessage(message *AsyncMessage) } type AsyncUserEventListener interface { ProcessAsyncUserMessage(message *AsyncMessage) } type AsyncSessionEventListener interface { ProcessAsyncSessionMessage(message *AsyncMessage) } type AsyncEvents interface { Close() RegisterBackendRoomListener(roomId string, backend *Backend, listener AsyncBackendRoomEventListener) error UnregisterBackendRoomListener(roomId string, backend *Backend, listener AsyncBackendRoomEventListener) RegisterRoomListener(roomId string, backend *Backend, listener AsyncRoomEventListener) error UnregisterRoomListener(roomId string, backend *Backend, listener AsyncRoomEventListener) RegisterUserListener(userId string, backend *Backend, listener AsyncUserEventListener) error UnregisterUserListener(userId string, backend *Backend, listener AsyncUserEventListener) RegisterSessionListener(sessionId string, backend *Backend, listener AsyncSessionEventListener) error UnregisterSessionListener(sessionId string, backend *Backend, listener AsyncSessionEventListener) PublishBackendRoomMessage(roomId string, backend *Backend, message *AsyncMessage) error PublishRoomMessage(roomId string, backend *Backend, message *AsyncMessage) error PublishUserMessage(userId string, backend *Backend, message *AsyncMessage) error PublishSessionMessage(sessionId string, backend *Backend, message *AsyncMessage) error } func NewAsyncEvents(url string) (AsyncEvents, error) { client, err := NewNatsClient(url) if err != nil { return nil, err } return NewAsyncEventsNats(client) } type asyncBackendRoomSubscriber struct { mu sync.Mutex listeners map[AsyncBackendRoomEventListener]bool } func (s *asyncBackendRoomSubscriber) processBackendRoomRequest(message *AsyncMessage) { s.mu.Lock() defer s.mu.Unlock() for listener := range s.listeners { s.mu.Unlock() listener.ProcessBackendRoomRequest(message) s.mu.Lock() } } func (s *asyncBackendRoomSubscriber) addListener(listener AsyncBackendRoomEventListener) { s.mu.Lock() defer s.mu.Unlock() if s.listeners == nil { s.listeners = make(map[AsyncBackendRoomEventListener]bool) } s.listeners[listener] = true } func (s *asyncBackendRoomSubscriber) removeListener(listener AsyncBackendRoomEventListener) bool { s.mu.Lock() defer s.mu.Unlock() delete(s.listeners, listener) return len(s.listeners) > 0 } type asyncRoomSubscriber struct { mu sync.Mutex listeners map[AsyncRoomEventListener]bool } func (s *asyncRoomSubscriber) processAsyncRoomMessage(message *AsyncMessage) { s.mu.Lock() defer s.mu.Unlock() for listener := range s.listeners { s.mu.Unlock() listener.ProcessAsyncRoomMessage(message) s.mu.Lock() } } func (s *asyncRoomSubscriber) addListener(listener AsyncRoomEventListener) { s.mu.Lock() defer s.mu.Unlock() if s.listeners == nil { s.listeners = make(map[AsyncRoomEventListener]bool) } s.listeners[listener] = true } func (s *asyncRoomSubscriber) removeListener(listener AsyncRoomEventListener) bool { s.mu.Lock() defer s.mu.Unlock() delete(s.listeners, listener) return len(s.listeners) > 0 } type asyncUserSubscriber struct { mu sync.Mutex listeners map[AsyncUserEventListener]bool } func (s *asyncUserSubscriber) processAsyncUserMessage(message *AsyncMessage) { s.mu.Lock() defer s.mu.Unlock() for listener := range s.listeners { s.mu.Unlock() listener.ProcessAsyncUserMessage(message) s.mu.Lock() } } func (s *asyncUserSubscriber) addListener(listener AsyncUserEventListener) { s.mu.Lock() defer s.mu.Unlock() if s.listeners == nil { s.listeners = make(map[AsyncUserEventListener]bool) } s.listeners[listener] = true } func (s *asyncUserSubscriber) removeListener(listener AsyncUserEventListener) bool { s.mu.Lock() defer s.mu.Unlock() delete(s.listeners, listener) return len(s.listeners) > 0 } type asyncSessionSubscriber struct { mu sync.Mutex listeners map[AsyncSessionEventListener]bool } func (s *asyncSessionSubscriber) processAsyncSessionMessage(message *AsyncMessage) { s.mu.Lock() defer s.mu.Unlock() for listener := range s.listeners { s.mu.Unlock() listener.ProcessAsyncSessionMessage(message) s.mu.Lock() } } func (s *asyncSessionSubscriber) addListener(listener AsyncSessionEventListener) { s.mu.Lock() defer s.mu.Unlock() if s.listeners == nil { s.listeners = make(map[AsyncSessionEventListener]bool) } s.listeners[listener] = true } func (s *asyncSessionSubscriber) removeListener(listener AsyncSessionEventListener) bool { s.mu.Lock() defer s.mu.Unlock() delete(s.listeners, listener) return len(s.listeners) > 0 } nextcloud-spreed-signaling-1.2.4/async_events_nats.go000066400000000000000000000267631460321600400230430ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "log" "sync" "time" "github.com/nats-io/nats.go" ) func GetSubjectForBackendRoomId(roomId string, backend *Backend) string { if backend == nil || backend.IsCompat() { return GetEncodedSubject("backend.room", roomId) } return GetEncodedSubject("backend.room", roomId+"|"+backend.Id()) } func GetSubjectForRoomId(roomId string, backend *Backend) string { if backend == nil || backend.IsCompat() { return GetEncodedSubject("room", roomId) } return GetEncodedSubject("room", roomId+"|"+backend.Id()) } func GetSubjectForUserId(userId string, backend *Backend) string { if backend == nil || backend.IsCompat() { return GetEncodedSubject("user", userId) } return GetEncodedSubject("user", userId+"|"+backend.Id()) } func GetSubjectForSessionId(sessionId string, backend *Backend) string { return "session." + sessionId } type asyncSubscriberNats struct { key string client NatsClient receiver chan *nats.Msg closeChan chan struct{} subscription NatsSubscription processMessage func(*nats.Msg) } func newAsyncSubscriberNats(key string, client NatsClient) (*asyncSubscriberNats, error) { receiver := make(chan *nats.Msg, 64) sub, err := client.Subscribe(key, receiver) if err != nil { return nil, err } result := &asyncSubscriberNats{ key: key, client: client, receiver: receiver, closeChan: make(chan struct{}), subscription: sub, } return result, nil } func (s *asyncSubscriberNats) run() { defer func() { if err := s.subscription.Unsubscribe(); err != nil { log.Printf("Error unsubscribing %s: %s", s.key, err) } }() for { select { case msg := <-s.receiver: s.processMessage(msg) for count := len(s.receiver); count > 0; count-- { s.processMessage(<-s.receiver) } case <-s.closeChan: return } } } func (s *asyncSubscriberNats) close() { close(s.closeChan) } type asyncBackendRoomSubscriberNats struct { *asyncSubscriberNats asyncBackendRoomSubscriber } func newAsyncBackendRoomSubscriberNats(key string, client NatsClient) (*asyncBackendRoomSubscriberNats, error) { sub, err := newAsyncSubscriberNats(key, client) if err != nil { return nil, err } result := &asyncBackendRoomSubscriberNats{ asyncSubscriberNats: sub, } result.processMessage = result.doProcessMessage go result.run() return result, nil } func (s *asyncBackendRoomSubscriberNats) doProcessMessage(msg *nats.Msg) { var message AsyncMessage if err := s.client.Decode(msg, &message); err != nil { log.Printf("Could not decode NATS message %+v, %s", msg, err) return } s.processBackendRoomRequest(&message) } type asyncRoomSubscriberNats struct { asyncRoomSubscriber *asyncSubscriberNats } func newAsyncRoomSubscriberNats(key string, client NatsClient) (*asyncRoomSubscriberNats, error) { sub, err := newAsyncSubscriberNats(key, client) if err != nil { return nil, err } result := &asyncRoomSubscriberNats{ asyncSubscriberNats: sub, } result.processMessage = result.doProcessMessage go result.run() return result, nil } func (s *asyncRoomSubscriberNats) doProcessMessage(msg *nats.Msg) { var message AsyncMessage if err := s.client.Decode(msg, &message); err != nil { log.Printf("Could not decode nats message %+v, %s", msg, err) return } s.processAsyncRoomMessage(&message) } type asyncUserSubscriberNats struct { *asyncSubscriberNats asyncUserSubscriber } func newAsyncUserSubscriberNats(key string, client NatsClient) (*asyncUserSubscriberNats, error) { sub, err := newAsyncSubscriberNats(key, client) if err != nil { return nil, err } result := &asyncUserSubscriberNats{ asyncSubscriberNats: sub, } result.processMessage = result.doProcessMessage go result.run() return result, nil } func (s *asyncUserSubscriberNats) doProcessMessage(msg *nats.Msg) { var message AsyncMessage if err := s.client.Decode(msg, &message); err != nil { log.Printf("Could not decode nats message %+v, %s", msg, err) return } s.processAsyncUserMessage(&message) } type asyncSessionSubscriberNats struct { *asyncSubscriberNats asyncSessionSubscriber } func newAsyncSessionSubscriberNats(key string, client NatsClient) (*asyncSessionSubscriberNats, error) { sub, err := newAsyncSubscriberNats(key, client) if err != nil { return nil, err } result := &asyncSessionSubscriberNats{ asyncSubscriberNats: sub, } result.processMessage = result.doProcessMessage go result.run() return result, nil } func (s *asyncSessionSubscriberNats) doProcessMessage(msg *nats.Msg) { var message AsyncMessage if err := s.client.Decode(msg, &message); err != nil { log.Printf("Could not decode nats message %+v, %s", msg, err) return } s.processAsyncSessionMessage(&message) } type asyncEventsNats struct { mu sync.Mutex client NatsClient backendRoomSubscriptions map[string]*asyncBackendRoomSubscriberNats roomSubscriptions map[string]*asyncRoomSubscriberNats userSubscriptions map[string]*asyncUserSubscriberNats sessionSubscriptions map[string]*asyncSessionSubscriberNats } func NewAsyncEventsNats(client NatsClient) (AsyncEvents, error) { events := &asyncEventsNats{ client: client, backendRoomSubscriptions: make(map[string]*asyncBackendRoomSubscriberNats), roomSubscriptions: make(map[string]*asyncRoomSubscriberNats), userSubscriptions: make(map[string]*asyncUserSubscriberNats), sessionSubscriptions: make(map[string]*asyncSessionSubscriberNats), } return events, nil } func (e *asyncEventsNats) Close() { e.mu.Lock() defer e.mu.Unlock() var wg sync.WaitGroup wg.Add(1) go func(subscriptions map[string]*asyncBackendRoomSubscriberNats) { defer wg.Done() for _, sub := range subscriptions { sub.close() } }(e.backendRoomSubscriptions) wg.Add(1) go func(subscriptions map[string]*asyncRoomSubscriberNats) { defer wg.Done() for _, sub := range subscriptions { sub.close() } }(e.roomSubscriptions) wg.Add(1) go func(subscriptions map[string]*asyncUserSubscriberNats) { defer wg.Done() for _, sub := range subscriptions { sub.close() } }(e.userSubscriptions) wg.Add(1) go func(subscriptions map[string]*asyncSessionSubscriberNats) { defer wg.Done() for _, sub := range subscriptions { sub.close() } }(e.sessionSubscriptions) e.backendRoomSubscriptions = make(map[string]*asyncBackendRoomSubscriberNats) e.roomSubscriptions = make(map[string]*asyncRoomSubscriberNats) e.userSubscriptions = make(map[string]*asyncUserSubscriberNats) e.sessionSubscriptions = make(map[string]*asyncSessionSubscriberNats) wg.Wait() e.client.Close() } func (e *asyncEventsNats) RegisterBackendRoomListener(roomId string, backend *Backend, listener AsyncBackendRoomEventListener) error { key := GetSubjectForBackendRoomId(roomId, backend) e.mu.Lock() defer e.mu.Unlock() sub, found := e.backendRoomSubscriptions[key] if !found { var err error if sub, err = newAsyncBackendRoomSubscriberNats(key, e.client); err != nil { return err } e.backendRoomSubscriptions[key] = sub } sub.addListener(listener) return nil } func (e *asyncEventsNats) UnregisterBackendRoomListener(roomId string, backend *Backend, listener AsyncBackendRoomEventListener) { key := GetSubjectForBackendRoomId(roomId, backend) e.mu.Lock() defer e.mu.Unlock() sub, found := e.backendRoomSubscriptions[key] if !found { return } if !sub.removeListener(listener) { delete(e.backendRoomSubscriptions, key) sub.close() } } func (e *asyncEventsNats) RegisterRoomListener(roomId string, backend *Backend, listener AsyncRoomEventListener) error { key := GetSubjectForRoomId(roomId, backend) e.mu.Lock() defer e.mu.Unlock() sub, found := e.roomSubscriptions[key] if !found { var err error if sub, err = newAsyncRoomSubscriberNats(key, e.client); err != nil { return err } e.roomSubscriptions[key] = sub } sub.addListener(listener) return nil } func (e *asyncEventsNats) UnregisterRoomListener(roomId string, backend *Backend, listener AsyncRoomEventListener) { key := GetSubjectForRoomId(roomId, backend) e.mu.Lock() defer e.mu.Unlock() sub, found := e.roomSubscriptions[key] if !found { return } if !sub.removeListener(listener) { delete(e.roomSubscriptions, key) sub.close() } } func (e *asyncEventsNats) RegisterUserListener(roomId string, backend *Backend, listener AsyncUserEventListener) error { key := GetSubjectForUserId(roomId, backend) e.mu.Lock() defer e.mu.Unlock() sub, found := e.userSubscriptions[key] if !found { var err error if sub, err = newAsyncUserSubscriberNats(key, e.client); err != nil { return err } e.userSubscriptions[key] = sub } sub.addListener(listener) return nil } func (e *asyncEventsNats) UnregisterUserListener(roomId string, backend *Backend, listener AsyncUserEventListener) { key := GetSubjectForUserId(roomId, backend) e.mu.Lock() defer e.mu.Unlock() sub, found := e.userSubscriptions[key] if !found { return } if !sub.removeListener(listener) { delete(e.userSubscriptions, key) sub.close() } } func (e *asyncEventsNats) RegisterSessionListener(sessionId string, backend *Backend, listener AsyncSessionEventListener) error { key := GetSubjectForSessionId(sessionId, backend) e.mu.Lock() defer e.mu.Unlock() sub, found := e.sessionSubscriptions[key] if !found { var err error if sub, err = newAsyncSessionSubscriberNats(key, e.client); err != nil { return err } e.sessionSubscriptions[key] = sub } sub.addListener(listener) return nil } func (e *asyncEventsNats) UnregisterSessionListener(sessionId string, backend *Backend, listener AsyncSessionEventListener) { key := GetSubjectForSessionId(sessionId, backend) e.mu.Lock() defer e.mu.Unlock() sub, found := e.sessionSubscriptions[key] if !found { return } if !sub.removeListener(listener) { delete(e.sessionSubscriptions, key) sub.close() } } func (e *asyncEventsNats) publish(subject string, message *AsyncMessage) error { message.SendTime = time.Now() return e.client.Publish(subject, message) } func (e *asyncEventsNats) PublishBackendRoomMessage(roomId string, backend *Backend, message *AsyncMessage) error { subject := GetSubjectForBackendRoomId(roomId, backend) return e.publish(subject, message) } func (e *asyncEventsNats) PublishRoomMessage(roomId string, backend *Backend, message *AsyncMessage) error { subject := GetSubjectForRoomId(roomId, backend) return e.publish(subject, message) } func (e *asyncEventsNats) PublishUserMessage(userId string, backend *Backend, message *AsyncMessage) error { subject := GetSubjectForUserId(userId, backend) return e.publish(subject, message) } func (e *asyncEventsNats) PublishSessionMessage(sessionId string, backend *Backend, message *AsyncMessage) error { subject := GetSubjectForSessionId(sessionId, backend) return e.publish(subject, message) } nextcloud-spreed-signaling-1.2.4/async_events_test.go000066400000000000000000000035001460321600400230350ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "strings" "testing" ) var ( eventBackendsForTest = []string{ "loopback", "nats", } ) func getAsyncEventsForTest(t *testing.T) AsyncEvents { var events AsyncEvents if strings.HasSuffix(t.Name(), "/nats") { events = getRealAsyncEventsForTest(t) } else { events = getLoopbackAsyncEventsForTest(t) } t.Cleanup(func() { events.Close() }) return events } func getRealAsyncEventsForTest(t *testing.T) AsyncEvents { url := startLocalNatsServer(t) events, err := NewAsyncEvents(url) if err != nil { t.Fatal(err) } return events } func getLoopbackAsyncEventsForTest(t *testing.T) AsyncEvents { events, err := NewAsyncEvents(NatsLoopbackUrl) if err != nil { t.Fatal(err) } t.Cleanup(func() { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() nats := (events.(*asyncEventsNats)).client (nats).(*LoopbackNatsClient).waitForSubscriptionsEmpty(ctx, t) }) return events } nextcloud-spreed-signaling-1.2.4/backend_client.go000066400000000000000000000141711460321600400222300ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "log" "net/http" "net/url" "strings" "github.com/dlintw/goconf" ) var ( ErrNotRedirecting = errors.New("not redirecting to different host") ErrUnsupportedContentType = errors.New("unsupported_content_type") ErrIncompleteResponse = errors.New("incomplete OCS response") ErrThrottledResponse = errors.New("throttled OCS response") ) type BackendClient struct { hub *Hub version string backends *BackendConfiguration pool *HttpClientPool capabilities *Capabilities } func NewBackendClient(config *goconf.ConfigFile, maxConcurrentRequestsPerHost int, version string, etcdClient *EtcdClient) (*BackendClient, error) { backends, err := NewBackendConfiguration(config, etcdClient) if err != nil { return nil, err } skipverify, _ := config.GetBool("backend", "skipverify") if skipverify { log.Println("WARNING: Backend verification is disabled!") } pool, err := NewHttpClientPool(maxConcurrentRequestsPerHost, skipverify) if err != nil { return nil, err } capabilities, err := NewCapabilities(version, pool) if err != nil { return nil, err } return &BackendClient{ version: version, backends: backends, pool: pool, capabilities: capabilities, }, nil } func (b *BackendClient) Close() { b.backends.Close() } func (b *BackendClient) Reload(config *goconf.ConfigFile) { b.backends.Reload(config) } func (b *BackendClient) GetCompatBackend() *Backend { return b.backends.GetCompatBackend() } func (b *BackendClient) GetBackend(u *url.URL) *Backend { return b.backends.GetBackend(u) } func (b *BackendClient) GetBackends() []*Backend { return b.backends.GetBackends() } func (b *BackendClient) IsUrlAllowed(u *url.URL) bool { return b.backends.IsUrlAllowed(u) } func isOcsRequest(u *url.URL) bool { return strings.Contains(u.Path, "/ocs/v2.php") || strings.Contains(u.Path, "/ocs/v1.php") } // PerformJSONRequest sends a JSON POST request to the given url and decodes // the result into "response". func (b *BackendClient) PerformJSONRequest(ctx context.Context, u *url.URL, request interface{}, response interface{}) error { if u == nil { return fmt.Errorf("no url passed to perform JSON request %+v", request) } secret := b.backends.GetSecret(u) if secret == nil { return fmt.Errorf("no backend secret configured for for %s", u) } var requestUrl *url.URL if b.capabilities.HasCapabilityFeature(ctx, u, FeatureSignalingV3Api) { newUrl := *u newUrl.Path = strings.Replace(newUrl.Path, "/spreed/api/v1/signaling/", "/spreed/api/v3/signaling/", -1) newUrl.Path = strings.Replace(newUrl.Path, "/spreed/api/v2/signaling/", "/spreed/api/v3/signaling/", -1) requestUrl = &newUrl } else { requestUrl = u } c, pool, err := b.pool.Get(ctx, u) if err != nil { log.Printf("Could not get client for host %s: %s", u.Host, err) return err } defer pool.Put(c) data, err := json.Marshal(request) if err != nil { log.Printf("Could not marshal request %+v: %s", request, err) return err } req, err := http.NewRequestWithContext(ctx, "POST", requestUrl.String(), bytes.NewReader(data)) if err != nil { log.Printf("Could not create request to %s: %s", requestUrl, err) return err } req.Header.Set("Content-Type", "application/json") req.Header.Set("Accept", "application/json") req.Header.Set("OCS-APIRequest", "true") req.Header.Set("User-Agent", "nextcloud-spreed-signaling/"+b.version) if b.hub != nil { req.Header.Set("X-Spreed-Signaling-Features", strings.Join(b.hub.info.Features, ", ")) } // Add checksum so the backend can validate the request. AddBackendChecksum(req, data, secret) resp, err := c.Do(req) if err != nil { log.Printf("Could not send request %s to %s: %s", string(data), req.URL, err) return err } defer resp.Body.Close() ct := resp.Header.Get("Content-Type") if !strings.HasPrefix(ct, "application/json") { log.Printf("Received unsupported content-type from %s: %s (%s)", req.URL, ct, resp.Status) return ErrUnsupportedContentType } body, err := io.ReadAll(resp.Body) if err != nil { log.Printf("Could not read response body from %s: %s", req.URL, err) return err } if isOcsRequest(u) || req.Header.Get("OCS-APIRequest") != "" { // OCS response are wrapped in an OCS container that needs to be parsed // to get the actual contents: // { // "ocs": { // "meta": { ... }, // "data": { ... } // } // } var ocs OcsResponse if err := json.Unmarshal(body, &ocs); err != nil { log.Printf("Could not decode OCS response %s from %s: %s", string(body), req.URL, err) return err } else if ocs.Ocs == nil || ocs.Ocs.Data == nil { log.Printf("Incomplete OCS response %s from %s", string(body), req.URL) return ErrIncompleteResponse } switch ocs.Ocs.Meta.StatusCode { case http.StatusTooManyRequests: log.Printf("Throttled OCS response %s from %s", string(body), req.URL) return ErrThrottledResponse } if err := json.Unmarshal(*ocs.Ocs.Data, response); err != nil { log.Printf("Could not decode OCS response body %s from %s: %s", string(*ocs.Ocs.Data), req.URL, err) return err } } else if err := json.Unmarshal(body, response); err != nil { log.Printf("Could not decode response body %s from %s: %s", string(body), req.URL, err) return err } return nil } nextcloud-spreed-signaling-1.2.4/backend_client_test.go000066400000000000000000000143321460321600400232660ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "encoding/json" "errors" "io" "net/http" "net/http/httptest" "net/url" "reflect" "strings" "testing" "github.com/dlintw/goconf" "github.com/gorilla/mux" ) func returnOCS(t *testing.T, w http.ResponseWriter, body []byte) { response := OcsResponse{ Ocs: &OcsBody{ Meta: OcsMeta{ Status: "OK", StatusCode: http.StatusOK, Message: "OK", }, Data: (*json.RawMessage)(&body), }, } if strings.Contains(t.Name(), "Throttled") { response.Ocs.Meta = OcsMeta{ Status: "failure", StatusCode: 429, Message: "Reached maximum delay", } } data, err := json.Marshal(response) if err != nil { t.Fatal(err) return } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) if _, err := w.Write(data); err != nil { t.Error(err) } } func TestPostOnRedirect(t *testing.T) { r := mux.NewRouter() r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, "/ocs/v2.php/two", http.StatusTemporaryRedirect) }) r.HandleFunc("/ocs/v2.php/two", func(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { t.Fatal(err) return } var request map[string]string if err := json.Unmarshal(body, &request); err != nil { t.Fatal(err) return } returnOCS(t, w, body) }) server := httptest.NewServer(r) defer server.Close() u, err := url.Parse(server.URL + "/ocs/v2.php/one") if err != nil { t.Fatal(err) } config := goconf.NewConfigFile() config.AddOption("backend", "allowed", u.Host) config.AddOption("backend", "secret", string(testBackendSecret)) if u.Scheme == "http" { config.AddOption("backend", "allowhttp", "true") } client, err := NewBackendClient(config, 1, "0.0", nil) if err != nil { t.Fatal(err) } ctx := context.Background() request := map[string]string{ "foo": "bar", } var response map[string]string err = client.PerformJSONRequest(ctx, u, request, &response) if err != nil { t.Fatal(err) } if response == nil || !reflect.DeepEqual(request, response) { t.Errorf("Expected %+v, got %+v", request, response) } } func TestPostOnRedirectDifferentHost(t *testing.T) { r := mux.NewRouter() r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, "http://domain.invalid/ocs/v2.php/two", http.StatusTemporaryRedirect) }) server := httptest.NewServer(r) defer server.Close() u, err := url.Parse(server.URL + "/ocs/v2.php/one") if err != nil { t.Fatal(err) } config := goconf.NewConfigFile() config.AddOption("backend", "allowed", u.Host) config.AddOption("backend", "secret", string(testBackendSecret)) if u.Scheme == "http" { config.AddOption("backend", "allowhttp", "true") } client, err := NewBackendClient(config, 1, "0.0", nil) if err != nil { t.Fatal(err) } ctx := context.Background() request := map[string]string{ "foo": "bar", } var response map[string]string err = client.PerformJSONRequest(ctx, u, request, &response) if err != nil { // The redirect to a different host should have failed. if !errors.Is(err, ErrNotRedirecting) { t.Fatal(err) } } else { t.Fatal("The redirect should have failed") } } func TestPostOnRedirectStatusFound(t *testing.T) { r := mux.NewRouter() r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, "/ocs/v2.php/two", http.StatusFound) }) r.HandleFunc("/ocs/v2.php/two", func(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { t.Fatal(err) return } if len(body) > 0 { t.Errorf("Should not have received any body, got %s", string(body)) } returnOCS(t, w, []byte("{}")) }) server := httptest.NewServer(r) defer server.Close() u, err := url.Parse(server.URL + "/ocs/v2.php/one") if err != nil { t.Fatal(err) } config := goconf.NewConfigFile() config.AddOption("backend", "allowed", u.Host) config.AddOption("backend", "secret", string(testBackendSecret)) if u.Scheme == "http" { config.AddOption("backend", "allowhttp", "true") } client, err := NewBackendClient(config, 1, "0.0", nil) if err != nil { t.Fatal(err) } ctx := context.Background() request := map[string]string{ "foo": "bar", } var response map[string]string err = client.PerformJSONRequest(ctx, u, request, &response) if err != nil { t.Error(err) } if len(response) > 0 { t.Errorf("Expected empty response, got %+v", response) } } func TestHandleThrottled(t *testing.T) { r := mux.NewRouter() r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) { returnOCS(t, w, []byte("[]")) }) server := httptest.NewServer(r) defer server.Close() u, err := url.Parse(server.URL + "/ocs/v2.php/one") if err != nil { t.Fatal(err) } config := goconf.NewConfigFile() config.AddOption("backend", "allowed", u.Host) config.AddOption("backend", "secret", string(testBackendSecret)) if u.Scheme == "http" { config.AddOption("backend", "allowhttp", "true") } client, err := NewBackendClient(config, 1, "0.0", nil) if err != nil { t.Fatal(err) } ctx := context.Background() request := map[string]string{ "foo": "bar", } var response map[string]string err = client.PerformJSONRequest(ctx, u, request, &response) if err == nil { t.Error("should have triggered an error") } else if !errors.Is(err, ErrThrottledResponse) { t.Error(err) } } nextcloud-spreed-signaling-1.2.4/backend_configuration.go000066400000000000000000000125471460321600400236260ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2020 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "fmt" "net/url" "strings" "sync" "github.com/dlintw/goconf" ) const ( BackendTypeStatic = "static" BackendTypeEtcd = "etcd" DefaultBackendType = BackendTypeStatic ) var ( SessionLimitExceeded = NewError("session_limit_exceeded", "Too many sessions connected for this backend.") ) type Backend struct { id string url string parsedUrl *url.URL secret []byte compat bool allowHttp bool maxStreamBitrate int maxScreenBitrate int sessionLimit uint64 sessionsLock sync.Mutex sessions map[string]bool } func (b *Backend) Id() string { return b.id } func (b *Backend) Secret() []byte { return b.secret } func (b *Backend) IsCompat() bool { return b.compat } func (b *Backend) IsUrlAllowed(u *url.URL) bool { switch u.Scheme { case "https": return true case "http": return b.allowHttp default: return false } } func (b *Backend) Url() string { return b.url } func (b *Backend) ParsedUrl() *url.URL { return b.parsedUrl } func (b *Backend) Limit() int { return int(b.sessionLimit) } func (b *Backend) Len() int { b.sessionsLock.Lock() defer b.sessionsLock.Unlock() return len(b.sessions) } func (b *Backend) AddSession(session Session) error { if session.ClientType() == HelloClientTypeInternal || session.ClientType() == HelloClientTypeVirtual { // Internal and virtual sessions are not counting to the limit. return nil } if b.sessionLimit == 0 { // Not limited return nil } b.sessionsLock.Lock() defer b.sessionsLock.Unlock() if b.sessions == nil { b.sessions = make(map[string]bool) } else if uint64(len(b.sessions)) >= b.sessionLimit { statsBackendLimitExceededTotal.WithLabelValues(b.id).Inc() return SessionLimitExceeded } b.sessions[session.PublicId()] = true return nil } func (b *Backend) RemoveSession(session Session) { b.sessionsLock.Lock() defer b.sessionsLock.Unlock() delete(b.sessions, session.PublicId()) } type BackendStorage interface { Close() Reload(config *goconf.ConfigFile) GetCompatBackend() *Backend GetBackend(u *url.URL) *Backend GetBackends() []*Backend } type backendStorageCommon struct { mu sync.RWMutex backends map[string][]*Backend } func (s *backendStorageCommon) GetBackends() []*Backend { s.mu.RLock() defer s.mu.RUnlock() var result []*Backend for _, entries := range s.backends { result = append(result, entries...) } return result } func (s *backendStorageCommon) getBackendLocked(u *url.URL) *Backend { s.mu.RLock() defer s.mu.RUnlock() entries, found := s.backends[u.Host] if !found { return nil } url := u.String() if url[len(url)-1] != '/' { url += "/" } for _, entry := range entries { if !entry.IsUrlAllowed(u) { continue } if entry.url == "" { // Old-style configuration, only hosts are configured. return entry } else if strings.HasPrefix(url, entry.url) { return entry } } return nil } type BackendConfiguration struct { storage BackendStorage } func NewBackendConfiguration(config *goconf.ConfigFile, etcdClient *EtcdClient) (*BackendConfiguration, error) { backendType, _ := config.GetString("backend", "backendtype") if backendType == "" { backendType = DefaultBackendType } RegisterBackendConfigurationStats() var storage BackendStorage var err error switch backendType { case BackendTypeStatic: storage, err = NewBackendStorageStatic(config) case BackendTypeEtcd: storage, err = NewBackendStorageEtcd(config, etcdClient) default: err = fmt.Errorf("unknown backend type: %s", backendType) } if err != nil { return nil, err } return &BackendConfiguration{ storage: storage, }, nil } func (b *BackendConfiguration) Close() { b.storage.Close() } func (b *BackendConfiguration) Reload(config *goconf.ConfigFile) { b.storage.Reload(config) } func (b *BackendConfiguration) GetCompatBackend() *Backend { return b.storage.GetCompatBackend() } func (b *BackendConfiguration) GetBackend(u *url.URL) *Backend { if strings.Contains(u.Host, ":") && hasStandardPort(u) { u.Host = u.Hostname() } return b.storage.GetBackend(u) } func (b *BackendConfiguration) GetBackends() []*Backend { return b.storage.GetBackends() } func (b *BackendConfiguration) IsUrlAllowed(u *url.URL) bool { if u == nil { // Reject all invalid URLs. return false } backend := b.GetBackend(u) return backend != nil } func (b *BackendConfiguration) GetSecret(u *url.URL) []byte { if u == nil { // Reject all invalid URLs. return nil } entry := b.GetBackend(u) if entry == nil { return nil } return entry.Secret() } nextcloud-spreed-signaling-1.2.4/backend_configuration_stats_prometheus.go000066400000000000000000000031451460321600400273110ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "github.com/prometheus/client_golang/prometheus" ) var ( statsBackendLimitExceededTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "backend", Name: "session_limit_exceeded_total", Help: "The number of times the session limit exceeded", }, []string{"backend"}) statsBackendsCurrent = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "signaling", Subsystem: "backend", Name: "current", Help: "The current number of configured backends", }) backendConfigurationStats = []prometheus.Collector{ statsBackendLimitExceededTotal, statsBackendsCurrent, } ) func RegisterBackendConfigurationStats() { registerAll(backendConfigurationStats...) } nextcloud-spreed-signaling-1.2.4/backend_configuration_test.go000066400000000000000000000627221460321600400246650ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2020 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "bytes" "context" "net/url" "reflect" "sort" "testing" "github.com/dlintw/goconf" "github.com/prometheus/client_golang/prometheus/testutil" ) func testUrls(t *testing.T, config *BackendConfiguration, valid_urls []string, invalid_urls []string) { for _, u := range valid_urls { u := u t.Run(u, func(t *testing.T) { parsed, err := url.ParseRequestURI(u) if err != nil { t.Errorf("The url %s should be valid, got %s", u, err) return } if !config.IsUrlAllowed(parsed) { t.Errorf("The url %s should be allowed", u) } if secret := config.GetSecret(parsed); !bytes.Equal(secret, testBackendSecret) { t.Errorf("Expected secret %s for url %s, got %s", string(testBackendSecret), u, string(secret)) } }) } for _, u := range invalid_urls { u := u t.Run(u, func(t *testing.T) { parsed, _ := url.ParseRequestURI(u) if config.IsUrlAllowed(parsed) { t.Errorf("The url %s should not be allowed", u) } }) } } func testBackends(t *testing.T, config *BackendConfiguration, valid_urls [][]string, invalid_urls []string) { for _, entry := range valid_urls { entry := entry t.Run(entry[0], func(t *testing.T) { u := entry[0] parsed, err := url.ParseRequestURI(u) if err != nil { t.Errorf("The url %s should be valid, got %s", u, err) return } if !config.IsUrlAllowed(parsed) { t.Errorf("The url %s should be allowed", u) } s := entry[1] if secret := config.GetSecret(parsed); !bytes.Equal(secret, []byte(s)) { t.Errorf("Expected secret %s for url %s, got %s", string(s), u, string(secret)) } }) } for _, u := range invalid_urls { u := u t.Run(u, func(t *testing.T) { parsed, _ := url.ParseRequestURI(u) if config.IsUrlAllowed(parsed) { t.Errorf("The url %s should not be allowed", u) } }) } } func TestIsUrlAllowed_Compat(t *testing.T) { // Old-style configuration valid_urls := []string{ "http://domain.invalid", "https://domain.invalid", } invalid_urls := []string{ "http://otherdomain.invalid", "https://otherdomain.invalid", "domain.invalid", } config := goconf.NewConfigFile() config.AddOption("backend", "allowed", "domain.invalid") config.AddOption("backend", "allowhttp", "true") config.AddOption("backend", "secret", string(testBackendSecret)) cfg, err := NewBackendConfiguration(config, nil) if err != nil { t.Fatal(err) } testUrls(t, cfg, valid_urls, invalid_urls) } func TestIsUrlAllowed_CompatForceHttps(t *testing.T) { // Old-style configuration, force HTTPS valid_urls := []string{ "https://domain.invalid", } invalid_urls := []string{ "http://domain.invalid", "http://otherdomain.invalid", "https://otherdomain.invalid", "domain.invalid", } config := goconf.NewConfigFile() config.AddOption("backend", "allowed", "domain.invalid") config.AddOption("backend", "secret", string(testBackendSecret)) cfg, err := NewBackendConfiguration(config, nil) if err != nil { t.Fatal(err) } testUrls(t, cfg, valid_urls, invalid_urls) } func TestIsUrlAllowed(t *testing.T) { valid_urls := [][]string{ {"https://domain.invalid/foo", string(testBackendSecret) + "-foo"}, {"https://domain.invalid/foo/", string(testBackendSecret) + "-foo"}, {"https://domain.invalid:443/foo/", string(testBackendSecret) + "-foo"}, {"https://domain.invalid/foo/folder", string(testBackendSecret) + "-foo"}, {"https://domain.invalid/bar", string(testBackendSecret) + "-bar"}, {"https://domain.invalid/bar/", string(testBackendSecret) + "-bar"}, {"https://domain.invalid:443/bar/", string(testBackendSecret) + "-bar"}, {"https://domain.invalid/bar/folder/", string(testBackendSecret) + "-bar"}, {"http://domain.invalid/baz", string(testBackendSecret) + "-baz"}, {"http://domain.invalid/baz/", string(testBackendSecret) + "-baz"}, {"http://domain.invalid:80/baz/", string(testBackendSecret) + "-baz"}, {"http://domain.invalid/baz/folder/", string(testBackendSecret) + "-baz"}, {"https://otherdomain.invalid/", string(testBackendSecret) + "-lala"}, {"https://otherdomain.invalid/folder/", string(testBackendSecret) + "-lala"}, } invalid_urls := []string{ "http://domain.invalid", "http://domain.invalid/", "https://domain.invalid", "https://domain.invalid/", "http://domain.invalid/foo", "http://domain.invalid/foo/", "https://domain.invalid:8443/foo/", "https://www.domain.invalid/foo/", "https://domain.invalid/baz/", } config := goconf.NewConfigFile() config.AddOption("backend", "backends", "foo, bar, baz, lala, missing") config.AddOption("foo", "url", "https://domain.invalid/foo") config.AddOption("foo", "secret", string(testBackendSecret)+"-foo") config.AddOption("bar", "url", "https://domain.invalid:443/bar/") config.AddOption("bar", "secret", string(testBackendSecret)+"-bar") config.AddOption("baz", "url", "http://domain.invalid/baz") config.AddOption("baz", "secret", string(testBackendSecret)+"-baz") config.AddOption("lala", "url", "https://otherdomain.invalid/") config.AddOption("lala", "secret", string(testBackendSecret)+"-lala") cfg, err := NewBackendConfiguration(config, nil) if err != nil { t.Fatal(err) } testBackends(t, cfg, valid_urls, invalid_urls) } func TestIsUrlAllowed_EmptyAllowlist(t *testing.T) { valid_urls := []string{} invalid_urls := []string{ "http://domain.invalid", "https://domain.invalid", "domain.invalid", } config := goconf.NewConfigFile() config.AddOption("backend", "allowed", "") config.AddOption("backend", "secret", string(testBackendSecret)) cfg, err := NewBackendConfiguration(config, nil) if err != nil { t.Fatal(err) } testUrls(t, cfg, valid_urls, invalid_urls) } func TestIsUrlAllowed_AllowAll(t *testing.T) { valid_urls := []string{ "http://domain.invalid", "https://domain.invalid", "https://domain.invalid:443", } invalid_urls := []string{ "domain.invalid", } config := goconf.NewConfigFile() config.AddOption("backend", "allowall", "true") config.AddOption("backend", "allowed", "") config.AddOption("backend", "secret", string(testBackendSecret)) cfg, err := NewBackendConfiguration(config, nil) if err != nil { t.Fatal(err) } testUrls(t, cfg, valid_urls, invalid_urls) } type ParseBackendIdsTestcase struct { s string ids []string } func TestParseBackendIds(t *testing.T) { testcases := []ParseBackendIdsTestcase{ {"", nil}, {"backend1", []string{"backend1"}}, {" backend1 ", []string{"backend1"}}, {"backend1,", []string{"backend1"}}, {"backend1,backend1", []string{"backend1"}}, {"backend1, backend2", []string{"backend1", "backend2"}}, {"backend1,backend2, backend1", []string{"backend1", "backend2"}}, } for _, test := range testcases { ids := getConfiguredBackendIDs(test.s) if !reflect.DeepEqual(ids, test.ids) { t.Errorf("List of ids differs, expected %+v, got %+v", test.ids, ids) } } } func TestBackendReloadNoChange(t *testing.T) { current := testutil.ToFloat64(statsBackendsCurrent) original_config := goconf.NewConfigFile() original_config.AddOption("backend", "backends", "backend1, backend2") original_config.AddOption("backend", "allowall", "false") original_config.AddOption("backend1", "url", "http://domain1.invalid") original_config.AddOption("backend1", "secret", string(testBackendSecret)+"-backend1") original_config.AddOption("backend2", "url", "http://domain2.invalid") original_config.AddOption("backend2", "secret", string(testBackendSecret)+"-backend2") o_cfg, err := NewBackendConfiguration(original_config, nil) if err != nil { t.Fatal(err) } checkStatsValue(t, statsBackendsCurrent, current+2) new_config := goconf.NewConfigFile() new_config.AddOption("backend", "backends", "backend1, backend2") new_config.AddOption("backend", "allowall", "false") new_config.AddOption("backend1", "url", "http://domain1.invalid") new_config.AddOption("backend1", "secret", string(testBackendSecret)+"-backend1") new_config.AddOption("backend2", "url", "http://domain2.invalid") new_config.AddOption("backend2", "secret", string(testBackendSecret)+"-backend2") n_cfg, err := NewBackendConfiguration(new_config, nil) if err != nil { t.Fatal(err) } checkStatsValue(t, statsBackendsCurrent, current+4) o_cfg.Reload(original_config) checkStatsValue(t, statsBackendsCurrent, current+4) if !reflect.DeepEqual(n_cfg, o_cfg) { t.Error("BackendConfiguration should be equal after Reload") } } func TestBackendReloadChangeExistingURL(t *testing.T) { current := testutil.ToFloat64(statsBackendsCurrent) original_config := goconf.NewConfigFile() original_config.AddOption("backend", "backends", "backend1, backend2") original_config.AddOption("backend", "allowall", "false") original_config.AddOption("backend1", "url", "http://domain1.invalid") original_config.AddOption("backend1", "secret", string(testBackendSecret)+"-backend1") original_config.AddOption("backend2", "url", "http://domain2.invalid") original_config.AddOption("backend2", "secret", string(testBackendSecret)+"-backend2") o_cfg, err := NewBackendConfiguration(original_config, nil) if err != nil { t.Fatal(err) } checkStatsValue(t, statsBackendsCurrent, current+2) new_config := goconf.NewConfigFile() new_config.AddOption("backend", "backends", "backend1, backend2") new_config.AddOption("backend", "allowall", "false") new_config.AddOption("backend1", "url", "http://domain3.invalid") new_config.AddOption("backend1", "secret", string(testBackendSecret)+"-backend1") new_config.AddOption("backend1", "sessionlimit", "10") new_config.AddOption("backend2", "url", "http://domain2.invalid") new_config.AddOption("backend2", "secret", string(testBackendSecret)+"-backend2") n_cfg, err := NewBackendConfiguration(new_config, nil) if err != nil { t.Fatal(err) } checkStatsValue(t, statsBackendsCurrent, current+4) original_config.RemoveOption("backend1", "url") original_config.AddOption("backend1", "url", "http://domain3.invalid") original_config.AddOption("backend1", "sessionlimit", "10") o_cfg.Reload(original_config) checkStatsValue(t, statsBackendsCurrent, current+4) if !reflect.DeepEqual(n_cfg, o_cfg) { t.Error("BackendConfiguration should be equal after Reload") } } func TestBackendReloadChangeSecret(t *testing.T) { current := testutil.ToFloat64(statsBackendsCurrent) original_config := goconf.NewConfigFile() original_config.AddOption("backend", "backends", "backend1, backend2") original_config.AddOption("backend", "allowall", "false") original_config.AddOption("backend1", "url", "http://domain1.invalid") original_config.AddOption("backend1", "secret", string(testBackendSecret)+"-backend1") original_config.AddOption("backend2", "url", "http://domain2.invalid") original_config.AddOption("backend2", "secret", string(testBackendSecret)+"-backend2") o_cfg, err := NewBackendConfiguration(original_config, nil) if err != nil { t.Fatal(err) } checkStatsValue(t, statsBackendsCurrent, current+2) new_config := goconf.NewConfigFile() new_config.AddOption("backend", "backends", "backend1, backend2") new_config.AddOption("backend", "allowall", "false") new_config.AddOption("backend1", "url", "http://domain1.invalid") new_config.AddOption("backend1", "secret", string(testBackendSecret)+"-backend3") new_config.AddOption("backend2", "url", "http://domain2.invalid") new_config.AddOption("backend2", "secret", string(testBackendSecret)+"-backend2") n_cfg, err := NewBackendConfiguration(new_config, nil) if err != nil { t.Fatal(err) } checkStatsValue(t, statsBackendsCurrent, current+4) original_config.RemoveOption("backend1", "secret") original_config.AddOption("backend1", "secret", string(testBackendSecret)+"-backend3") o_cfg.Reload(original_config) checkStatsValue(t, statsBackendsCurrent, current+4) if !reflect.DeepEqual(n_cfg, o_cfg) { t.Error("BackendConfiguration should be equal after Reload") } } func TestBackendReloadAddBackend(t *testing.T) { current := testutil.ToFloat64(statsBackendsCurrent) original_config := goconf.NewConfigFile() original_config.AddOption("backend", "backends", "backend1") original_config.AddOption("backend", "allowall", "false") original_config.AddOption("backend1", "url", "http://domain1.invalid") original_config.AddOption("backend1", "secret", string(testBackendSecret)+"-backend1") o_cfg, err := NewBackendConfiguration(original_config, nil) if err != nil { t.Fatal(err) } checkStatsValue(t, statsBackendsCurrent, current+1) new_config := goconf.NewConfigFile() new_config.AddOption("backend", "backends", "backend1, backend2") new_config.AddOption("backend", "allowall", "false") new_config.AddOption("backend1", "url", "http://domain1.invalid") new_config.AddOption("backend1", "secret", string(testBackendSecret)+"-backend1") new_config.AddOption("backend2", "url", "http://domain2.invalid") new_config.AddOption("backend2", "secret", string(testBackendSecret)+"-backend2") new_config.AddOption("backend2", "sessionlimit", "10") n_cfg, err := NewBackendConfiguration(new_config, nil) if err != nil { t.Fatal(err) } checkStatsValue(t, statsBackendsCurrent, current+3) original_config.RemoveOption("backend", "backends") original_config.AddOption("backend", "backends", "backend1, backend2") original_config.AddOption("backend2", "url", "http://domain2.invalid") original_config.AddOption("backend2", "secret", string(testBackendSecret)+"-backend2") original_config.AddOption("backend2", "sessionlimit", "10") o_cfg.Reload(original_config) checkStatsValue(t, statsBackendsCurrent, current+4) if !reflect.DeepEqual(n_cfg, o_cfg) { t.Error("BackendConfiguration should be equal after Reload") } } func TestBackendReloadRemoveHost(t *testing.T) { current := testutil.ToFloat64(statsBackendsCurrent) original_config := goconf.NewConfigFile() original_config.AddOption("backend", "backends", "backend1, backend2") original_config.AddOption("backend", "allowall", "false") original_config.AddOption("backend1", "url", "http://domain1.invalid") original_config.AddOption("backend1", "secret", string(testBackendSecret)+"-backend1") original_config.AddOption("backend2", "url", "http://domain2.invalid") original_config.AddOption("backend2", "secret", string(testBackendSecret)+"-backend2") o_cfg, err := NewBackendConfiguration(original_config, nil) if err != nil { t.Fatal(err) } checkStatsValue(t, statsBackendsCurrent, current+2) new_config := goconf.NewConfigFile() new_config.AddOption("backend", "backends", "backend1") new_config.AddOption("backend", "allowall", "false") new_config.AddOption("backend1", "url", "http://domain1.invalid") new_config.AddOption("backend1", "secret", string(testBackendSecret)+"-backend1") n_cfg, err := NewBackendConfiguration(new_config, nil) if err != nil { t.Fatal(err) } checkStatsValue(t, statsBackendsCurrent, current+3) original_config.RemoveOption("backend", "backends") original_config.AddOption("backend", "backends", "backend1") original_config.RemoveSection("backend2") o_cfg.Reload(original_config) checkStatsValue(t, statsBackendsCurrent, current+2) if !reflect.DeepEqual(n_cfg, o_cfg) { t.Error("BackendConfiguration should be equal after Reload") } } func TestBackendReloadRemoveBackendFromSharedHost(t *testing.T) { current := testutil.ToFloat64(statsBackendsCurrent) original_config := goconf.NewConfigFile() original_config.AddOption("backend", "backends", "backend1, backend2") original_config.AddOption("backend", "allowall", "false") original_config.AddOption("backend1", "url", "http://domain1.invalid/foo/") original_config.AddOption("backend1", "secret", string(testBackendSecret)+"-backend1") original_config.AddOption("backend2", "url", "http://domain1.invalid/bar/") original_config.AddOption("backend2", "secret", string(testBackendSecret)+"-backend2") o_cfg, err := NewBackendConfiguration(original_config, nil) if err != nil { t.Fatal(err) } checkStatsValue(t, statsBackendsCurrent, current+2) new_config := goconf.NewConfigFile() new_config.AddOption("backend", "backends", "backend1") new_config.AddOption("backend", "allowall", "false") new_config.AddOption("backend1", "url", "http://domain1.invalid/foo/") new_config.AddOption("backend1", "secret", string(testBackendSecret)+"-backend1") n_cfg, err := NewBackendConfiguration(new_config, nil) if err != nil { t.Fatal(err) } checkStatsValue(t, statsBackendsCurrent, current+3) original_config.RemoveOption("backend", "backends") original_config.AddOption("backend", "backends", "backend1") original_config.RemoveSection("backend2") o_cfg.Reload(original_config) checkStatsValue(t, statsBackendsCurrent, current+2) if !reflect.DeepEqual(n_cfg, o_cfg) { t.Error("BackendConfiguration should be equal after Reload") } } func sortBackends(backends []*Backend) []*Backend { result := make([]*Backend, len(backends)) copy(result, backends) sort.Slice(result, func(i, j int) bool { return result[i].Id() < result[j].Id() }) return result } func mustParse(s string) *url.URL { p, err := url.Parse(s) if err != nil { panic(err) } return p } func TestBackendConfiguration_Etcd(t *testing.T) { etcd, client := NewEtcdClientForTest(t) url1 := "https://domain1.invalid/foo" initialSecret1 := string(testBackendSecret) + "-backend1-initial" secret1 := string(testBackendSecret) + "-backend1" SetEtcdValue(etcd, "/backends/1_one", []byte("{\"url\":\""+url1+"\",\"secret\":\""+initialSecret1+"\"}")) config := goconf.NewConfigFile() config.AddOption("backend", "backendtype", "etcd") config.AddOption("backend", "backendprefix", "/backends") cfg, err := NewBackendConfiguration(config, client) if err != nil { t.Fatal(err) } defer cfg.Close() storage := cfg.storage.(*backendStorageEtcd) ch := storage.getWakeupChannelForTesting() ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() if err := storage.WaitForInitialized(ctx); err != nil { t.Fatal(err) } if backends := sortBackends(cfg.GetBackends()); len(backends) != 1 { t.Errorf("Expected one backend, got %+v", backends) } else if backends[0].url != url1 { t.Errorf("Expected backend url %s, got %s", url1, backends[0].url) } else if string(backends[0].secret) != initialSecret1 { t.Errorf("Expected backend secret %s, got %s", initialSecret1, string(backends[0].secret)) } else if backend := cfg.GetBackend(mustParse(url1)); backend != backends[0] { t.Errorf("Expected backend %+v, got %+v", backends[0], backend) } drainWakeupChannel(ch) SetEtcdValue(etcd, "/backends/1_one", []byte("{\"url\":\""+url1+"\",\"secret\":\""+secret1+"\"}")) <-ch if backends := sortBackends(cfg.GetBackends()); len(backends) != 1 { t.Errorf("Expected one backend, got %+v", backends) } else if backends[0].url != url1 { t.Errorf("Expected backend url %s, got %s", url1, backends[0].url) } else if string(backends[0].secret) != secret1 { t.Errorf("Expected backend secret %s, got %s", secret1, string(backends[0].secret)) } else if backend := cfg.GetBackend(mustParse(url1)); backend != backends[0] { t.Errorf("Expected backend %+v, got %+v", backends[0], backend) } url2 := "https://domain1.invalid/bar" secret2 := string(testBackendSecret) + "-backend2" drainWakeupChannel(ch) SetEtcdValue(etcd, "/backends/2_two", []byte("{\"url\":\""+url2+"\",\"secret\":\""+secret2+"\"}")) <-ch if backends := sortBackends(cfg.GetBackends()); len(backends) != 2 { t.Errorf("Expected two backends, got %+v", backends) } else if backends[0].url != url1 { t.Errorf("Expected backend url %s, got %s", url1, backends[0].url) } else if string(backends[0].secret) != secret1 { t.Errorf("Expected backend secret %s, got %s", secret1, string(backends[0].secret)) } else if backends[1].url != url2 { t.Errorf("Expected backend url %s, got %s", url2, backends[1].url) } else if string(backends[1].secret) != secret2 { t.Errorf("Expected backend secret %s, got %s", secret2, string(backends[1].secret)) } else if backend := cfg.GetBackend(mustParse(url1)); backend != backends[0] { t.Errorf("Expected backend %+v, got %+v", backends[0], backend) } else if backend := cfg.GetBackend(mustParse(url2)); backend != backends[1] { t.Errorf("Expected backend %+v, got %+v", backends[1], backend) } url3 := "https://domain2.invalid/foo" secret3 := string(testBackendSecret) + "-backend3" drainWakeupChannel(ch) SetEtcdValue(etcd, "/backends/3_three", []byte("{\"url\":\""+url3+"\",\"secret\":\""+secret3+"\"}")) <-ch if backends := sortBackends(cfg.GetBackends()); len(backends) != 3 { t.Errorf("Expected three backends, got %+v", backends) } else if backends[0].url != url1 { t.Errorf("Expected backend url %s, got %s", url1, backends[0].url) } else if string(backends[0].secret) != secret1 { t.Errorf("Expected backend secret %s, got %s", secret1, string(backends[0].secret)) } else if backends[1].url != url2 { t.Errorf("Expected backend url %s, got %s", url2, backends[1].url) } else if string(backends[1].secret) != secret2 { t.Errorf("Expected backend secret %s, got %s", secret2, string(backends[1].secret)) } else if backends[2].url != url3 { t.Errorf("Expected backend url %s, got %s", url3, backends[2].url) } else if string(backends[2].secret) != secret3 { t.Errorf("Expected backend secret %s, got %s", secret3, string(backends[2].secret)) } else if backend := cfg.GetBackend(mustParse(url1)); backend != backends[0] { t.Errorf("Expected backend %+v, got %+v", backends[0], backend) } else if backend := cfg.GetBackend(mustParse(url2)); backend != backends[1] { t.Errorf("Expected backend %+v, got %+v", backends[1], backend) } else if backend := cfg.GetBackend(mustParse(url3)); backend != backends[2] { t.Errorf("Expected backend %+v, got %+v", backends[2], backend) } drainWakeupChannel(ch) DeleteEtcdValue(etcd, "/backends/1_one") <-ch if backends := sortBackends(cfg.GetBackends()); len(backends) != 2 { t.Errorf("Expected two backends, got %+v", backends) } else if backends[0].url != url2 { t.Errorf("Expected backend url %s, got %s", url2, backends[0].url) } else if string(backends[0].secret) != secret2 { t.Errorf("Expected backend secret %s, got %s", secret2, string(backends[0].secret)) } else if backends[1].url != url3 { t.Errorf("Expected backend url %s, got %s", url3, backends[1].url) } else if string(backends[1].secret) != secret3 { t.Errorf("Expected backend secret %s, got %s", secret3, string(backends[1].secret)) } drainWakeupChannel(ch) DeleteEtcdValue(etcd, "/backends/2_two") <-ch if backends := sortBackends(cfg.GetBackends()); len(backends) != 1 { t.Errorf("Expected one backend, got %+v", backends) } else if backends[0].url != url3 { t.Errorf("Expected backend url %s, got %s", url3, backends[0].url) } else if string(backends[0].secret) != secret3 { t.Errorf("Expected backend secret %s, got %s", secret3, string(backends[0].secret)) } if _, found := storage.backends["domain1.invalid"]; found { t.Errorf("Should have removed host information for %s", "domain1.invalid") } } func TestBackendCommonSecret(t *testing.T) { u1, err := url.Parse("http://domain1.invalid") if err != nil { t.Fatal(err) } u2, err := url.Parse("http://domain2.invalid") if err != nil { t.Fatal(err) } original_config := goconf.NewConfigFile() original_config.AddOption("backend", "backends", "backend1, backend2") original_config.AddOption("backend", "secret", string(testBackendSecret)) original_config.AddOption("backend1", "url", u1.String()) original_config.AddOption("backend2", "url", u2.String()) original_config.AddOption("backend2", "secret", string(testBackendSecret)+"-backend2") cfg, err := NewBackendConfiguration(original_config, nil) if err != nil { t.Fatal(err) } if b1 := cfg.GetBackend(u1); b1 == nil { t.Error("didn't get backend") } else if !bytes.Equal(b1.Secret(), testBackendSecret) { t.Errorf("expected secret %s, got %s", string(testBackendSecret), string(b1.Secret())) } if b2 := cfg.GetBackend(u2); b2 == nil { t.Error("didn't get backend") } else if !bytes.Equal(b2.Secret(), []byte(string(testBackendSecret)+"-backend2")) { t.Errorf("expected secret %s, got %s", string(testBackendSecret)+"-backend2", string(b2.Secret())) } updated_config := goconf.NewConfigFile() updated_config.AddOption("backend", "backends", "backend1, backend2") updated_config.AddOption("backend", "secret", string(testBackendSecret)) updated_config.AddOption("backend1", "url", u1.String()) updated_config.AddOption("backend1", "secret", string(testBackendSecret)+"-backend1") updated_config.AddOption("backend2", "url", u2.String()) cfg.Reload(updated_config) if b1 := cfg.GetBackend(u1); b1 == nil { t.Error("didn't get backend") } else if !bytes.Equal(b1.Secret(), []byte(string(testBackendSecret)+"-backend1")) { t.Errorf("expected secret %s, got %s", string(testBackendSecret)+"-backend1", string(b1.Secret())) } if b2 := cfg.GetBackend(u2); b2 == nil { t.Error("didn't get backend") } else if !bytes.Equal(b2.Secret(), testBackendSecret) { t.Errorf("expected secret %s, got %s", string(testBackendSecret), string(b2.Secret())) } } nextcloud-spreed-signaling-1.2.4/backend_server.go000066400000000000000000000650351460321600400222650ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "crypto/hmac" "crypto/rand" "crypto/sha1" "encoding/base64" "encoding/json" "errors" "fmt" "io" "log" "net" "net/http" "net/url" "reflect" "regexp" "strings" "sync" "sync/atomic" "time" "github.com/dlintw/goconf" "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus/promhttp" ) const ( maxBodySize = 256 * 1024 randomUsernameLength = 32 sessionIdNotInMeeting = "0" ) type BackendServer struct { hub *Hub events AsyncEvents roomSessions RoomSessions version string welcomeMessage string turnapikey string turnsecret []byte turnvalid time.Duration turnservers []string statsAllowedIps *AllowedIps invalidSecret []byte } func NewBackendServer(config *goconf.ConfigFile, hub *Hub, version string) (*BackendServer, error) { turnapikey, _ := config.GetString("turn", "apikey") turnsecret, _ := config.GetString("turn", "secret") turnservers, _ := config.GetString("turn", "servers") // TODO(jojo): Make the validity for TURN credentials configurable. turnvalid := 24 * time.Hour var turnserverslist []string for _, s := range strings.Split(turnservers, ",") { s = strings.TrimSpace(s) if s != "" { turnserverslist = append(turnserverslist, s) } } if len(turnserverslist) != 0 { if turnapikey == "" { return nil, fmt.Errorf("need a TURN API key if TURN servers are configured") } if turnsecret == "" { return nil, fmt.Errorf("need a shared TURN secret if TURN servers are configured") } log.Printf("Using configured TURN API key") log.Printf("Using configured shared TURN secret") for _, s := range turnserverslist { log.Printf("Adding \"%s\" as TURN server", s) } } statsAllowed, _ := config.GetString("stats", "allowed_ips") statsAllowedIps, err := ParseAllowedIps(statsAllowed) if err != nil { return nil, err } if !statsAllowedIps.Empty() { log.Printf("Only allowing access to the stats endpoint from %s", statsAllowed) } else { log.Printf("No IPs configured for the stats endpoint, only allowing access from 127.0.0.1") statsAllowedIps = DefaultAllowedIps() } invalidSecret := make([]byte, 32) if _, err := rand.Read(invalidSecret); err != nil { return nil, err } return &BackendServer{ hub: hub, events: hub.events, roomSessions: hub.roomSessions, version: version, turnapikey: turnapikey, turnsecret: []byte(turnsecret), turnvalid: turnvalid, turnservers: turnserverslist, statsAllowedIps: statsAllowedIps, invalidSecret: invalidSecret, }, nil } func (b *BackendServer) Start(r *mux.Router) error { welcome := map[string]string{ "nextcloud-spreed-signaling": "Welcome", "version": b.version, } welcomeMessage, err := json.Marshal(welcome) if err != nil { // Should never happen. return err } b.welcomeMessage = string(welcomeMessage) + "\n" s := r.PathPrefix("/api/v1").Subrouter() s.HandleFunc("/welcome", b.setComonHeaders(b.welcomeFunc)).Methods("GET") s.HandleFunc("/room/{roomid}", b.setComonHeaders(b.parseRequestBody(b.roomHandler))).Methods("POST") s.HandleFunc("/stats", b.setComonHeaders(b.validateStatsRequest(b.statsHandler))).Methods("GET") // Expose prometheus metrics at "/metrics". r.HandleFunc("/metrics", b.setComonHeaders(b.validateStatsRequest(b.metricsHandler))).Methods("GET") // Provide a REST service to get TURN credentials. // See https://tools.ietf.org/html/draft-uberti-behave-turn-rest-00 r.HandleFunc("/turn/credentials", b.setComonHeaders(b.getTurnCredentials)).Methods("GET") return nil } func (b *BackendServer) setComonHeaders(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Server", "nextcloud-spreed-signaling/"+b.version) w.Header().Set("X-Spreed-Signaling-Features", strings.Join(b.hub.info.Features, ", ")) f(w, r) } } func (b *BackendServer) welcomeFunc(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=utf-8") w.WriteHeader(http.StatusOK) io.WriteString(w, b.welcomeMessage) // nolint } func calculateTurnSecret(username string, secret []byte, valid time.Duration) (string, string) { expires := time.Now().Add(valid) username = fmt.Sprintf("%d:%s", expires.Unix(), username) m := hmac.New(sha1.New, secret) m.Write([]byte(username)) // nolint password := base64.StdEncoding.EncodeToString(m.Sum(nil)) return username, password } func (b *BackendServer) getTurnCredentials(w http.ResponseWriter, r *http.Request) { q := r.URL.Query() service := q.Get("service") username := q.Get("username") key := q.Get("key") if key == "" { // The RFC actually defines "key" to be the parameter, but Janus sends it as "api". key = q.Get("api") } if service != "turn" || key == "" { w.WriteHeader(http.StatusBadRequest) io.WriteString(w, "Invalid service and/or key sent.\n") // nolint return } if key != b.turnapikey { w.WriteHeader(http.StatusForbidden) io.WriteString(w, "Not allowed to access this service.\n") // nolint return } if len(b.turnservers) == 0 { w.WriteHeader(http.StatusNotFound) io.WriteString(w, "No TURN servers available.\n") // nolint return } if username == "" { // Make sure to include an actual username in the credentials. username = newRandomString(randomUsernameLength) } username, password := calculateTurnSecret(username, b.turnsecret, b.turnvalid) result := TurnCredentials{ Username: username, Password: password, TTL: int64(b.turnvalid.Seconds()), URIs: b.turnservers, } data, err := json.Marshal(result) if err != nil { log.Printf("Could not serialize TURN credentials: %s", err) w.WriteHeader(http.StatusInternalServerError) io.WriteString(w, "Could not serialize credentials.") // nolint return } if data[len(data)-1] != '\n' { data = append(data, '\n') } w.Header().Set("Content-Type", "application/json; charset=utf-8") w.WriteHeader(http.StatusOK) w.Write(data) // nolint } func (b *BackendServer) parseRequestBody(f func(http.ResponseWriter, *http.Request, []byte)) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { // Sanity checks if r.ContentLength == -1 { http.Error(w, "Length required", http.StatusLengthRequired) return } else if r.ContentLength > maxBodySize { http.Error(w, "Request entity too large", http.StatusRequestEntityTooLarge) return } ct := r.Header.Get("Content-Type") if !strings.HasPrefix(ct, "application/json") { log.Printf("Received unsupported content-type: %s", ct) http.Error(w, "Unsupported Content-Type", http.StatusBadRequest) return } if r.Header.Get(HeaderBackendSignalingRandom) == "" || r.Header.Get(HeaderBackendSignalingChecksum) == "" { http.Error(w, "Authentication check failed", http.StatusForbidden) return } body, err := io.ReadAll(r.Body) if err != nil { log.Println("Error reading body: ", err) http.Error(w, "Could not read body", http.StatusBadRequest) return } f(w, r, body) } } func (b *BackendServer) sendRoomInvite(roomid string, backend *Backend, userids []string, properties *json.RawMessage) { msg := &AsyncMessage{ Type: "message", Message: &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "roomlist", Type: "invite", Invite: &RoomEventServerMessage{ RoomId: roomid, Properties: properties, }, }, }, } for _, userid := range userids { if err := b.events.PublishUserMessage(userid, backend, msg); err != nil { log.Printf("Could not publish room invite for user %s in backend %s: %s", userid, backend.Id(), err) } } } func (b *BackendServer) sendRoomDisinvite(roomid string, backend *Backend, reason string, userids []string, sessionids []string) { msg := &AsyncMessage{ Type: "message", Message: &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "roomlist", Type: "disinvite", Disinvite: &RoomDisinviteEventServerMessage{ RoomEventServerMessage: RoomEventServerMessage{ RoomId: roomid, }, Reason: reason, }, }, }, } for _, userid := range userids { if err := b.events.PublishUserMessage(userid, backend, msg); err != nil { log.Printf("Could not publish room disinvite for user %s in backend %s: %s", userid, backend.Id(), err) } } timeout := time.Second ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() var wg sync.WaitGroup for _, sessionid := range sessionids { if sessionid == sessionIdNotInMeeting { // Ignore entries that are no longer in the meeting. continue } wg.Add(1) go func(sessionid string) { defer wg.Done() if sid, err := b.lookupByRoomSessionId(ctx, sessionid, nil); err != nil { log.Printf("Could not lookup by room session %s: %s", sessionid, err) } else if sid != "" { if err := b.events.PublishSessionMessage(sid, backend, msg); err != nil { log.Printf("Could not publish room disinvite for session %s: %s", sid, err) } } }(sessionid) } wg.Wait() } func (b *BackendServer) sendRoomUpdate(roomid string, backend *Backend, notified_userids []string, all_userids []string, properties *json.RawMessage) { msg := &AsyncMessage{ Type: "message", Message: &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "roomlist", Type: "update", Update: &RoomEventServerMessage{ RoomId: roomid, Properties: properties, }, }, }, } notified := make(map[string]bool) for _, userid := range notified_userids { notified[userid] = true } // Only send to users not notified otherwise. for _, userid := range all_userids { if notified[userid] { continue } if err := b.events.PublishUserMessage(userid, backend, msg); err != nil { log.Printf("Could not publish room update for user %s in backend %s: %s", userid, backend.Id(), err) } } } func (b *BackendServer) lookupByRoomSessionId(ctx context.Context, roomSessionId string, cache *ConcurrentStringStringMap) (string, error) { if roomSessionId == sessionIdNotInMeeting { log.Printf("Trying to lookup empty room session id: %s", roomSessionId) return "", nil } if cache != nil { if result, found := cache.Get(roomSessionId); found { return result, nil } } sid, err := b.roomSessions.LookupSessionId(ctx, roomSessionId, "") if err == ErrNoSuchRoomSession { return "", nil } else if err != nil { return "", err } if cache != nil { cache.Set(roomSessionId, sid) } return sid, nil } func (b *BackendServer) fixupUserSessions(ctx context.Context, cache *ConcurrentStringStringMap, users []map[string]interface{}) []map[string]interface{} { if len(users) == 0 { return users } var wg sync.WaitGroup for _, user := range users { roomSessionIdOb, found := user["sessionId"] if !found { continue } roomSessionId, ok := roomSessionIdOb.(string) if !ok { log.Printf("User %+v has invalid room session id, ignoring", user) delete(user, "sessionId") continue } if roomSessionId == sessionIdNotInMeeting { log.Printf("User %+v is not in the meeting, ignoring", user) delete(user, "sessionId") continue } wg.Add(1) go func(roomSessionId string, u map[string]interface{}) { defer wg.Done() if sessionId, err := b.lookupByRoomSessionId(ctx, roomSessionId, cache); err != nil { log.Printf("Could not lookup by room session %s: %s", roomSessionId, err) delete(u, "sessionId") } else if sessionId != "" { u["sessionId"] = sessionId } else { // sessionId == "" delete(u, "sessionId") } }(roomSessionId, user) } wg.Wait() result := make([]map[string]interface{}, 0, len(users)) for _, user := range users { if _, found := user["sessionId"]; found { result = append(result, user) } } return result } func (b *BackendServer) sendRoomIncall(roomid string, backend *Backend, request *BackendServerRoomRequest) error { if !request.InCall.All { timeout := time.Second ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() var cache ConcurrentStringStringMap // Convert (Nextcloud) session ids to signaling session ids. request.InCall.Users = b.fixupUserSessions(ctx, &cache, request.InCall.Users) // Entries in "Changed" are most likely already fetched through the "Users" list. request.InCall.Changed = b.fixupUserSessions(ctx, &cache, request.InCall.Changed) if len(request.InCall.Users) == 0 && len(request.InCall.Changed) == 0 { return nil } } message := &AsyncMessage{ Type: "room", Room: request, } return b.events.PublishBackendRoomMessage(roomid, backend, message) } func (b *BackendServer) sendRoomParticipantsUpdate(roomid string, backend *Backend, request *BackendServerRoomRequest) error { timeout := time.Second // Convert (Nextcloud) session ids to signaling session ids. ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() var cache ConcurrentStringStringMap request.Participants.Users = b.fixupUserSessions(ctx, &cache, request.Participants.Users) request.Participants.Changed = b.fixupUserSessions(ctx, &cache, request.Participants.Changed) if len(request.Participants.Users) == 0 && len(request.Participants.Changed) == 0 { return nil } var wg sync.WaitGroup loop: for _, user := range request.Participants.Changed { permissionsInterface, found := user["permissions"] if !found { continue } sessionId := user["sessionId"].(string) permissionsList, ok := permissionsInterface.([]interface{}) if !ok { log.Printf("Received invalid permissions %+v (%s) for session %s", permissionsInterface, reflect.TypeOf(permissionsInterface), sessionId) continue } var permissions []Permission for idx, ob := range permissionsList { permission, ok := ob.(string) if !ok { log.Printf("Received invalid permission at position %d %+v (%s) for session %s", idx, ob, reflect.TypeOf(ob), sessionId) continue loop } permissions = append(permissions, Permission(permission)) } wg.Add(1) go func(sessionId string, permissions []Permission) { defer wg.Done() message := &AsyncMessage{ Type: "permissions", Permissions: permissions, } if err := b.events.PublishSessionMessage(sessionId, backend, message); err != nil { log.Printf("Could not send permissions update (%+v) to session %s: %s", permissions, sessionId, err) } }(sessionId, permissions) } wg.Wait() message := &AsyncMessage{ Type: "room", Room: request, } return b.events.PublishBackendRoomMessage(roomid, backend, message) } func (b *BackendServer) sendRoomMessage(roomid string, backend *Backend, request *BackendServerRoomRequest) error { message := &AsyncMessage{ Type: "room", Room: request, } return b.events.PublishBackendRoomMessage(roomid, backend, message) } func (b *BackendServer) sendRoomSwitchTo(roomid string, backend *Backend, request *BackendServerRoomRequest) error { timeout := time.Second // Convert (Nextcloud) session ids to signaling session ids. ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() var wg sync.WaitGroup var mu sync.Mutex if request.SwitchTo.Sessions != nil { // We support both a list of sessions or a map with additional details per session. if (*request.SwitchTo.Sessions)[0] == '[' { var sessionsList BackendRoomSwitchToSessionsList if err := json.Unmarshal(*request.SwitchTo.Sessions, &sessionsList); err != nil { return err } if len(sessionsList) == 0 { return nil } var internalSessionsList BackendRoomSwitchToSessionsList for _, roomSessionId := range sessionsList { if roomSessionId == sessionIdNotInMeeting { continue } wg.Add(1) go func(roomSessionId string) { defer wg.Done() if sessionId, err := b.lookupByRoomSessionId(ctx, roomSessionId, nil); err != nil { log.Printf("Could not lookup by room session %s: %s", roomSessionId, err) } else if sessionId != "" { mu.Lock() defer mu.Unlock() internalSessionsList = append(internalSessionsList, sessionId) } }(roomSessionId) } wg.Wait() mu.Lock() defer mu.Unlock() if len(internalSessionsList) == 0 { return nil } request.SwitchTo.SessionsList = internalSessionsList request.SwitchTo.SessionsMap = nil } else { var sessionsMap BackendRoomSwitchToSessionsMap if err := json.Unmarshal(*request.SwitchTo.Sessions, &sessionsMap); err != nil { return err } if len(sessionsMap) == 0 { return nil } internalSessionsMap := make(BackendRoomSwitchToSessionsMap) for roomSessionId, details := range sessionsMap { if roomSessionId == sessionIdNotInMeeting { continue } wg.Add(1) go func(roomSessionId string, details json.RawMessage) { defer wg.Done() if sessionId, err := b.lookupByRoomSessionId(ctx, roomSessionId, nil); err != nil { log.Printf("Could not lookup by room session %s: %s", roomSessionId, err) } else if sessionId != "" { mu.Lock() defer mu.Unlock() internalSessionsMap[sessionId] = details } }(roomSessionId, details) } wg.Wait() mu.Lock() defer mu.Unlock() if len(internalSessionsMap) == 0 { return nil } request.SwitchTo.SessionsList = nil request.SwitchTo.SessionsMap = internalSessionsMap } } request.SwitchTo.Sessions = nil message := &AsyncMessage{ Type: "room", Room: request, } return b.events.PublishBackendRoomMessage(roomid, backend, message) } type BackendResponseWithStatus interface { Status() int } type DialoutErrorResponse struct { BackendServerRoomResponse status int } func (r *DialoutErrorResponse) Status() int { return r.status } func returnDialoutError(status int, err *Error) (any, error) { response := &DialoutErrorResponse{ BackendServerRoomResponse: BackendServerRoomResponse{ Type: "dialout", Dialout: &BackendRoomDialoutResponse{ Error: err, }, }, status: status, } return response, nil } var checkNumeric = regexp.MustCompile(`^[0-9]+$`) func isNumeric(s string) bool { return checkNumeric.MatchString(s) } func (b *BackendServer) startDialout(roomid string, backend *Backend, backendUrl string, request *BackendServerRoomRequest) (any, error) { if err := request.Dialout.ValidateNumber(); err != nil { return returnDialoutError(http.StatusBadRequest, err) } if !isNumeric(roomid) { return returnDialoutError(http.StatusBadRequest, NewError("invalid_roomid", "The room id must be numeric.")) } session := b.hub.GetDialoutSession(roomid, backend) if session == nil { return returnDialoutError(http.StatusNotFound, NewError("no_client_available", "No available client found to trigger dialout.")) } url := backend.Url() if url == "" { // Old-style compat backend, use client-provided URL. url = backendUrl if url != "" && url[len(url)-1] != '/' { url += "/" } } id := newRandomString(32) msg := &ServerMessage{ Id: id, Type: "internal", Internal: &InternalServerMessage{ Type: "dialout", Dialout: &InternalServerDialoutRequest{ RoomId: roomid, Backend: url, Request: request.Dialout, }, }, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() var response atomic.Pointer[DialoutInternalClientMessage] session.HandleResponse(id, func(message *ClientMessage) bool { response.Store(message.Internal.Dialout) cancel() // Don't send error to other sessions in the room. return message.Internal.Dialout.Error != nil }) defer session.ClearResponseHandler(id) if !session.SendMessage(msg) { return returnDialoutError(http.StatusBadGateway, NewError("error_notify", "Could not notify about new dialout.")) } <-ctx.Done() if err := ctx.Err(); err != nil && !errors.Is(err, context.Canceled) { return returnDialoutError(http.StatusGatewayTimeout, NewError("timeout", "Timeout while waiting for dialout to start.")) } dialout := response.Load() if dialout == nil { return returnDialoutError(http.StatusBadGateway, NewError("error_notify", "No dialout response received.")) } switch dialout.Type { case "error": return returnDialoutError(http.StatusBadGateway, dialout.Error) case "status": if dialout.Status.Status != DialoutStatusAccepted { log.Printf("Received unsupported dialout status when triggering dialout: %+v", dialout) return returnDialoutError(http.StatusBadGateway, NewError("unsupported_status", "Unsupported dialout status received.")) } return &BackendServerRoomResponse{ Type: "dialout", Dialout: &BackendRoomDialoutResponse{ CallId: dialout.Status.CallId, }, }, nil } log.Printf("Received unsupported dialout type when triggering dialout: %+v", dialout) return returnDialoutError(http.StatusBadGateway, NewError("unsupported_type", "Unsupported dialout type received.")) } func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body []byte) { v := mux.Vars(r) roomid := v["roomid"] var backend *Backend backendUrl := r.Header.Get(HeaderBackendServer) if backendUrl != "" { if u, err := url.Parse(backendUrl); err == nil { backend = b.hub.backend.GetBackend(u) } if backend == nil { // Unknown backend URL passed, return immediately. http.Error(w, "Authentication check failed", http.StatusForbidden) return } } if backend == nil { if compatBackend := b.hub.backend.GetCompatBackend(); compatBackend != nil { // Old-style configuration using a single secret for all backends. backend = compatBackend } else { // Old-style Talk, find backend that created the checksum. // TODO(fancycode): Remove once all supported Talk versions send the backend header. for _, b := range b.hub.backend.GetBackends() { if ValidateBackendChecksum(r, body, b.Secret()) { backend = b break } } } if backend == nil { http.Error(w, "Authentication check failed", http.StatusForbidden) return } } if !ValidateBackendChecksum(r, body, backend.Secret()) { http.Error(w, "Authentication check failed", http.StatusForbidden) return } var request BackendServerRoomRequest if err := json.Unmarshal(body, &request); err != nil { log.Printf("Error decoding body %s: %s", string(body), err) http.Error(w, "Could not read body", http.StatusBadRequest) return } request.ReceivedTime = time.Now().UnixNano() var response any var err error switch request.Type { case "invite": b.sendRoomInvite(roomid, backend, request.Invite.UserIds, request.Invite.Properties) b.sendRoomUpdate(roomid, backend, request.Invite.UserIds, request.Invite.AllUserIds, request.Invite.Properties) case "disinvite": b.sendRoomDisinvite(roomid, backend, DisinviteReasonDisinvited, request.Disinvite.UserIds, request.Disinvite.SessionIds) b.sendRoomUpdate(roomid, backend, request.Disinvite.UserIds, request.Disinvite.AllUserIds, request.Disinvite.Properties) case "update": message := &AsyncMessage{ Type: "room", Room: &request, } err = b.events.PublishBackendRoomMessage(roomid, backend, message) b.sendRoomUpdate(roomid, backend, nil, request.Update.UserIds, request.Update.Properties) case "delete": message := &AsyncMessage{ Type: "room", Room: &request, } err = b.events.PublishBackendRoomMessage(roomid, backend, message) b.sendRoomDisinvite(roomid, backend, DisinviteReasonDeleted, request.Delete.UserIds, nil) case "incall": err = b.sendRoomIncall(roomid, backend, &request) case "participants": err = b.sendRoomParticipantsUpdate(roomid, backend, &request) case "message": err = b.sendRoomMessage(roomid, backend, &request) case "switchto": err = b.sendRoomSwitchTo(roomid, backend, &request) case "dialout": response, err = b.startDialout(roomid, backend, backendUrl, &request) default: http.Error(w, "Unsupported request type: "+request.Type, http.StatusBadRequest) return } if err != nil { log.Printf("Error processing %s for room %s: %s", string(body), roomid, err) http.Error(w, "Error while processing", http.StatusInternalServerError) return } var responseData []byte responseStatus := http.StatusOK if response == nil { // TODO(jojo): Return better response struct. responseData = []byte("{}") } else { if s, ok := response.(BackendResponseWithStatus); ok { responseStatus = s.Status() } responseData, err = json.Marshal(response) if err != nil { log.Printf("Could not serialize backend response %+v: %s", response, err) responseStatus = http.StatusInternalServerError responseData = []byte("{\"error\":\"could_not_serialize\"}") } } w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Header().Set("X-Content-Type-Options", "nosniff") w.WriteHeader(responseStatus) w.Write(responseData) // nolint } func (b *BackendServer) allowStatsAccess(r *http.Request) bool { addr := getRealUserIP(r) if strings.Contains(addr, ":") { if host, _, err := net.SplitHostPort(addr); err == nil { addr = host } } ip := net.ParseIP(addr) if ip == nil { return false } return b.statsAllowedIps.Allowed(ip) } func (b *BackendServer) validateStatsRequest(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { if !b.allowStatsAccess(r) { http.Error(w, "Authentication check failed", http.StatusForbidden) return } f(w, r) } } func (b *BackendServer) statsHandler(w http.ResponseWriter, r *http.Request) { stats := b.hub.GetStats() statsData, err := json.MarshalIndent(stats, "", " ") if err != nil { log.Printf("Could not serialize stats %+v: %s", stats, err) http.Error(w, "Internal server error", http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Header().Set("X-Content-Type-Options", "nosniff") w.WriteHeader(http.StatusOK) w.Write(statsData) // nolint } func (b *BackendServer) metricsHandler(w http.ResponseWriter, r *http.Request) { promhttp.Handler().ServeHTTP(w, r) } nextcloud-spreed-signaling-1.2.4/backend_server_test.go000066400000000000000000001613451460321600400233250ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "bytes" "context" "crypto/hmac" "crypto/sha1" "encoding/base64" "encoding/json" "fmt" "io" "net/http" "net/http/httptest" "net/textproto" "net/url" "reflect" "strings" "sync" "testing" "time" "github.com/dlintw/goconf" "github.com/gorilla/mux" "github.com/gorilla/websocket" ) var ( turnApiKey = "TheApiKey" turnSecret = "TheTurnSecret" turnServersString = "turn:1.2.3.4:9991?transport=udp,turn:1.2.3.4:9991?transport=tcp" turnServers = strings.Split(turnServersString, ",") ) func CreateBackendServerForTest(t *testing.T) (*goconf.ConfigFile, *BackendServer, AsyncEvents, *Hub, *mux.Router, *httptest.Server) { return CreateBackendServerForTestFromConfig(t, nil) } func CreateBackendServerForTestWithTurn(t *testing.T) (*goconf.ConfigFile, *BackendServer, AsyncEvents, *Hub, *mux.Router, *httptest.Server) { config := goconf.NewConfigFile() config.AddOption("turn", "apikey", turnApiKey) config.AddOption("turn", "secret", turnSecret) config.AddOption("turn", "servers", turnServersString) return CreateBackendServerForTestFromConfig(t, config) } func CreateBackendServerForTestFromConfig(t *testing.T, config *goconf.ConfigFile) (*goconf.ConfigFile, *BackendServer, AsyncEvents, *Hub, *mux.Router, *httptest.Server) { r := mux.NewRouter() registerBackendHandler(t, r) server := httptest.NewServer(r) t.Cleanup(func() { server.Close() }) if config == nil { config = goconf.NewConfigFile() } u, err := url.Parse(server.URL) if err != nil { t.Fatal(err) } if strings.Contains(t.Name(), "Compat") { config.AddOption("backend", "allowed", u.Host) config.AddOption("backend", "secret", string(testBackendSecret)) } else { backendId := "backend1" config.AddOption("backend", "backends", backendId) config.AddOption(backendId, "url", server.URL) config.AddOption(backendId, "secret", string(testBackendSecret)) } if u.Scheme == "http" { config.AddOption("backend", "allowhttp", "true") } config.AddOption("sessions", "hashkey", "12345678901234567890123456789012") config.AddOption("sessions", "blockkey", "09876543210987654321098765432109") config.AddOption("clients", "internalsecret", string(testInternalSecret)) config.AddOption("geoip", "url", "none") events := getAsyncEventsForTest(t) hub, err := NewHub(config, events, nil, nil, nil, r, "no-version") if err != nil { t.Fatal(err) } b, err := NewBackendServer(config, hub, "no-version") if err != nil { t.Fatal(err) } if err := b.Start(r); err != nil { t.Fatal(err) } go hub.Run() t.Cleanup(func() { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() WaitForHub(ctx, t, hub) }) return config, b, events, hub, r, server } func CreateBackendServerWithClusteringForTest(t *testing.T) (*BackendServer, *BackendServer, *Hub, *Hub, *httptest.Server, *httptest.Server) { return CreateBackendServerWithClusteringForTestFromConfig(t, nil, nil) } func CreateBackendServerWithClusteringForTestFromConfig(t *testing.T, config1 *goconf.ConfigFile, config2 *goconf.ConfigFile) (*BackendServer, *BackendServer, *Hub, *Hub, *httptest.Server, *httptest.Server) { r1 := mux.NewRouter() registerBackendHandler(t, r1) server1 := httptest.NewServer(r1) t.Cleanup(func() { server1.Close() }) r2 := mux.NewRouter() registerBackendHandler(t, r2) server2 := httptest.NewServer(r2) t.Cleanup(func() { server2.Close() }) nats := startLocalNatsServer(t) grpcServer1, addr1 := NewGrpcServerForTest(t) grpcServer2, addr2 := NewGrpcServerForTest(t) if config1 == nil { config1 = goconf.NewConfigFile() } u1, err := url.Parse(server1.URL) if err != nil { t.Fatal(err) } config1.AddOption("backend", "allowed", u1.Host) if u1.Scheme == "http" { config1.AddOption("backend", "allowhttp", "true") } config1.AddOption("backend", "secret", string(testBackendSecret)) config1.AddOption("sessions", "hashkey", "12345678901234567890123456789012") config1.AddOption("sessions", "blockkey", "09876543210987654321098765432109") config1.AddOption("clients", "internalsecret", string(testInternalSecret)) config1.AddOption("geoip", "url", "none") events1, err := NewAsyncEvents(nats) if err != nil { t.Fatal(err) } t.Cleanup(func() { events1.Close() }) client1, _ := NewGrpcClientsForTest(t, addr2) hub1, err := NewHub(config1, events1, grpcServer1, client1, nil, r1, "no-version") if err != nil { t.Fatal(err) } if config2 == nil { config2 = goconf.NewConfigFile() } u2, err := url.Parse(server2.URL) if err != nil { t.Fatal(err) } config2.AddOption("backend", "allowed", u2.Host) if u2.Scheme == "http" { config2.AddOption("backend", "allowhttp", "true") } config2.AddOption("backend", "secret", string(testBackendSecret)) config2.AddOption("sessions", "hashkey", "12345678901234567890123456789012") config2.AddOption("sessions", "blockkey", "09876543210987654321098765432109") config2.AddOption("clients", "internalsecret", string(testInternalSecret)) config2.AddOption("geoip", "url", "none") events2, err := NewAsyncEvents(nats) if err != nil { t.Fatal(err) } t.Cleanup(func() { events2.Close() }) client2, _ := NewGrpcClientsForTest(t, addr1) hub2, err := NewHub(config2, events2, grpcServer2, client2, nil, r2, "no-version") if err != nil { t.Fatal(err) } b1, err := NewBackendServer(config1, hub1, "no-version") if err != nil { t.Fatal(err) } if err := b1.Start(r1); err != nil { t.Fatal(err) } b2, err := NewBackendServer(config2, hub2, "no-version") if err != nil { t.Fatal(err) } if err := b2.Start(r2); err != nil { t.Fatal(err) } go hub1.Run() go hub2.Run() t.Cleanup(func() { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() WaitForHub(ctx, t, hub1) WaitForHub(ctx, t, hub2) }) return b1, b2, hub1, hub2, server1, server2 } func performBackendRequest(requestUrl string, body []byte) (*http.Response, error) { request, err := http.NewRequest("POST", requestUrl, bytes.NewReader(body)) if err != nil { return nil, err } request.Header.Set("Content-Type", "application/json") rnd := newRandomString(32) check := CalculateBackendChecksum(rnd, body, testBackendSecret) request.Header.Set("Spreed-Signaling-Random", rnd) request.Header.Set("Spreed-Signaling-Checksum", check) u, err := url.Parse(requestUrl) if err != nil { return nil, err } request.Header.Set("Spreed-Signaling-Backend", u.Scheme+"://"+u.Host) client := &http.Client{} return client.Do(request) } func expectRoomlistEvent(ch chan *AsyncMessage, msgType string) (*EventServerMessage, error) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() select { case message := <-ch: if message.Type != "message" || message.Message == nil { return nil, fmt.Errorf("Expected message type message, got %+v", message) } msg := message.Message if msg.Type != "event" || msg.Event == nil { return nil, fmt.Errorf("Expected message type event, got %+v", msg) } if msg.Event.Target != "roomlist" || msg.Event.Type != msgType { return nil, fmt.Errorf("Expected roomlist %s event, got %+v", msgType, msg.Event) } return msg.Event, nil case <-ctx.Done(): return nil, ctx.Err() } } func TestBackendServer_NoAuth(t *testing.T) { _, _, _, _, _, server := CreateBackendServerForTest(t) roomId := "the-room-id" data := []byte{'{', '}'} request, err := http.NewRequest("POST", server.URL+"/api/v1/room/"+roomId, bytes.NewReader(data)) if err != nil { t.Fatal(err) } request.Header.Set("Content-Type", "application/json") client := &http.Client{} res, err := client.Do(request) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != http.StatusForbidden { t.Errorf("Expected error response, got %s: %s", res.Status, string(body)) } } func TestBackendServer_InvalidAuth(t *testing.T) { _, _, _, _, _, server := CreateBackendServerForTest(t) roomId := "the-room-id" data := []byte{'{', '}'} request, err := http.NewRequest("POST", server.URL+"/api/v1/room/"+roomId, bytes.NewReader(data)) if err != nil { t.Fatal(err) } request.Header.Set("Content-Type", "application/json") request.Header.Set("Spreed-Signaling-Random", "hello") request.Header.Set("Spreed-Signaling-Checksum", "world") client := &http.Client{} res, err := client.Do(request) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != http.StatusForbidden { t.Errorf("Expected error response, got %s: %s", res.Status, string(body)) } } func TestBackendServer_OldCompatAuth(t *testing.T) { _, _, _, _, _, server := CreateBackendServerForTest(t) roomId := "the-room-id" userid := "the-user-id" roomProperties := json.RawMessage("{\"foo\":\"bar\"}") msg := &BackendServerRoomRequest{ Type: "invite", Invite: &BackendRoomInviteRequest{ UserIds: []string{ userid, }, AllUserIds: []string{ userid, }, Properties: &roomProperties, }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } request, err := http.NewRequest("POST", server.URL+"/api/v1/room/"+roomId, bytes.NewReader(data)) if err != nil { t.Fatal(err) } request.Header.Set("Content-Type", "application/json") rnd := newRandomString(32) check := CalculateBackendChecksum(rnd, data, testBackendSecret) request.Header.Set("Spreed-Signaling-Random", rnd) request.Header.Set("Spreed-Signaling-Checksum", check) client := &http.Client{} res, err := client.Do(request) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != http.StatusOK { t.Errorf("Expected success, got %s: %s", res.Status, string(body)) } } func TestBackendServer_InvalidBody(t *testing.T) { _, _, _, _, _, server := CreateBackendServerForTest(t) roomId := "the-room-id" data := []byte{1, 2, 3, 4} // Invalid JSON res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != http.StatusBadRequest { t.Errorf("Expected error response, got %s: %s", res.Status, string(body)) } } func TestBackendServer_UnsupportedRequest(t *testing.T) { _, _, _, _, _, server := CreateBackendServerForTest(t) msg := &BackendServerRoomRequest{ Type: "lala", } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } roomId := "the-room-id" res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != http.StatusBadRequest { t.Errorf("Expected error response, got %s: %s", res.Status, string(body)) } } func TestBackendServer_RoomInvite(t *testing.T) { for _, backend := range eventBackendsForTest { t.Run(backend, func(t *testing.T) { RunTestBackendServer_RoomInvite(t) }) } } type channelEventListener struct { ch chan *AsyncMessage } func (l *channelEventListener) ProcessAsyncUserMessage(message *AsyncMessage) { l.ch <- message } func RunTestBackendServer_RoomInvite(t *testing.T) { _, _, events, hub, _, server := CreateBackendServerForTest(t) u, err := url.Parse(server.URL) if err != nil { t.Fatal(err) } userid := "test-userid" roomProperties := json.RawMessage("{\"foo\":\"bar\"}") backend := hub.backend.GetBackend(u) eventsChan := make(chan *AsyncMessage, 1) listener := &channelEventListener{ ch: eventsChan, } if err := events.RegisterUserListener(userid, backend, listener); err != nil { t.Fatal(err) } defer events.UnregisterUserListener(userid, backend, listener) msg := &BackendServerRoomRequest{ Type: "invite", Invite: &BackendRoomInviteRequest{ UserIds: []string{ userid, }, AllUserIds: []string{ userid, }, Properties: &roomProperties, }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } roomId := "the-room-id" res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } event, err := expectRoomlistEvent(eventsChan, "invite") if err != nil { t.Error(err) } else if event.Invite == nil { t.Errorf("Expected invite, got %+v", event) } else if event.Invite.RoomId != roomId { t.Errorf("Expected room %s, got %+v", roomId, event) } else if event.Invite.Properties == nil || !bytes.Equal(*event.Invite.Properties, roomProperties) { t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(*event.Invite.Properties)) } } func TestBackendServer_RoomDisinvite(t *testing.T) { for _, backend := range eventBackendsForTest { t.Run(backend, func(t *testing.T) { RunTestBackendServer_RoomDisinvite(t) }) } } func RunTestBackendServer_RoomDisinvite(t *testing.T) { _, _, events, hub, _, server := CreateBackendServerForTest(t) u, err := url.Parse(server.URL) if err != nil { t.Fatal(err) } backend := hub.backend.GetBackend(u) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // Ignore "join" events. if err := client.DrainMessages(ctx); err != nil { t.Error(err) } roomProperties := json.RawMessage("{\"foo\":\"bar\"}") eventsChan := make(chan *AsyncMessage, 1) listener := &channelEventListener{ ch: eventsChan, } if err := events.RegisterUserListener(testDefaultUserId, backend, listener); err != nil { t.Fatal(err) } defer events.UnregisterUserListener(testDefaultUserId, backend, listener) msg := &BackendServerRoomRequest{ Type: "disinvite", Disinvite: &BackendRoomDisinviteRequest{ UserIds: []string{ testDefaultUserId, }, SessionIds: []string{ roomId + "-" + hello.Hello.SessionId, }, AllUserIds: []string{}, Properties: &roomProperties, }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } event, err := expectRoomlistEvent(eventsChan, "disinvite") if err != nil { t.Error(err) } else if event.Disinvite == nil { t.Errorf("Expected disinvite, got %+v", event) } else if event.Disinvite.RoomId != roomId { t.Errorf("Expected room %s, got %+v", roomId, event) } else if event.Disinvite.Properties != nil { t.Errorf("Room properties should be omitted, got %s", string(*event.Disinvite.Properties)) } else if event.Disinvite.Reason != "disinvited" { t.Errorf("Reason should be disinvited, got %s", event.Disinvite.Reason) } if message, err := client.RunUntilRoomlistDisinvite(ctx); err != nil { t.Error(err) } else if message.RoomId != roomId { t.Errorf("Expected message for room %s, got %s", roomId, message.RoomId) } if message, err := client.RunUntilMessage(ctx); err != nil && !websocket.IsCloseError(err, websocket.CloseNoStatusReceived) { t.Errorf("Received unexpected error %s", err) } else if err == nil { t.Errorf("Server should have closed the connection, received %+v", *message) } } func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) { _, _, _, hub, _, server := CreateBackendServerForTest(t) client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId1 := "test-room1" if _, err := client1.JoinRoom(ctx, roomId1); err != nil { t.Fatal(err) } if err := client1.RunUntilJoined(ctx, hello1.Hello); err != nil { t.Error(err) } roomId2 := "test-room2" if _, err := client2.JoinRoom(ctx, roomId2); err != nil { t.Fatal(err) } if err := client2.RunUntilJoined(ctx, hello2.Hello); err != nil { t.Error(err) } msg := &BackendServerRoomRequest{ Type: "disinvite", Disinvite: &BackendRoomDisinviteRequest{ UserIds: []string{ testDefaultUserId, }, SessionIds: []string{ roomId1 + "-" + hello1.Hello.SessionId, }, AllUserIds: []string{}, }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId1, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } if message, err := client1.RunUntilRoomlistDisinvite(ctx); err != nil { t.Error(err) } else if message.RoomId != roomId1 { t.Errorf("Expected message for room %s, got %s", roomId1, message.RoomId) } if message, err := client1.RunUntilMessage(ctx); err != nil && !websocket.IsCloseError(err, websocket.CloseNoStatusReceived) { t.Errorf("Received unexpected error %s", err) } else if err == nil { t.Errorf("Server should have closed the connection, received %+v", *message) } if message, err := client2.RunUntilRoomlistDisinvite(ctx); err != nil { t.Error(err) } else if message.RoomId != roomId1 { t.Errorf("Expected message for room %s, got %s", roomId1, message.RoomId) } msg = &BackendServerRoomRequest{ Type: "update", Update: &BackendRoomUpdateRequest{ UserIds: []string{ testDefaultUserId, }, Properties: (*json.RawMessage)(&testRoomProperties), }, } data, err = json.Marshal(msg) if err != nil { t.Fatal(err) } res, err = performBackendRequest(server.URL+"/api/v1/room/"+roomId2, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err = io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } if message, err := client2.RunUntilRoomlistUpdate(ctx); err != nil { t.Error(err) } else if message.RoomId != roomId2 { t.Errorf("Expected message for room %s, got %s", roomId2, message.RoomId) } } func TestBackendServer_RoomUpdate(t *testing.T) { for _, backend := range eventBackendsForTest { t.Run(backend, func(t *testing.T) { RunTestBackendServer_RoomUpdate(t) }) } } func RunTestBackendServer_RoomUpdate(t *testing.T) { _, _, events, hub, _, server := CreateBackendServerForTest(t) u, err := url.Parse(server.URL) if err != nil { t.Fatal(err) } roomId := "the-room-id" emptyProperties := json.RawMessage("{}") backend := hub.backend.GetBackend(u) if backend == nil { t.Fatalf("Did not find backend") } room, err := hub.createRoom(roomId, &emptyProperties, backend) if err != nil { t.Fatalf("Could not create room: %s", err) } defer room.Close() userid := "test-userid" roomProperties := json.RawMessage("{\"foo\":\"bar\"}") eventsChan := make(chan *AsyncMessage, 1) listener := &channelEventListener{ ch: eventsChan, } if err := events.RegisterUserListener(userid, backend, listener); err != nil { t.Fatal(err) } defer events.UnregisterUserListener(userid, backend, listener) msg := &BackendServerRoomRequest{ Type: "update", Update: &BackendRoomUpdateRequest{ UserIds: []string{ userid, }, Properties: &roomProperties, }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } event, err := expectRoomlistEvent(eventsChan, "update") if err != nil { t.Error(err) } else if event.Update == nil { t.Errorf("Expected update, got %+v", event) } else if event.Update.RoomId != roomId { t.Errorf("Expected room %s, got %+v", roomId, event) } else if event.Update.Properties == nil || !bytes.Equal(*event.Update.Properties, roomProperties) { t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(*event.Update.Properties)) } // TODO: Use event to wait for asynchronous messages. time.Sleep(10 * time.Millisecond) room = hub.getRoom(roomId) if room == nil { t.Fatalf("Room %s does not exist", roomId) } if string(*room.Properties()) != string(roomProperties) { t.Errorf("Expected properties %s for room %s, got %s", string(roomProperties), room.Id(), string(*room.Properties())) } } func TestBackendServer_RoomDelete(t *testing.T) { for _, backend := range eventBackendsForTest { t.Run(backend, func(t *testing.T) { RunTestBackendServer_RoomDelete(t) }) } } func RunTestBackendServer_RoomDelete(t *testing.T) { _, _, events, hub, _, server := CreateBackendServerForTest(t) u, err := url.Parse(server.URL) if err != nil { t.Fatal(err) } roomId := "the-room-id" emptyProperties := json.RawMessage("{}") backend := hub.backend.GetBackend(u) if backend == nil { t.Fatalf("Did not find backend") } if _, err := hub.createRoom(roomId, &emptyProperties, backend); err != nil { t.Fatalf("Could not create room: %s", err) } userid := "test-userid" eventsChan := make(chan *AsyncMessage, 1) listener := &channelEventListener{ ch: eventsChan, } if err := events.RegisterUserListener(userid, backend, listener); err != nil { t.Fatal(err) } defer events.UnregisterUserListener(userid, backend, listener) msg := &BackendServerRoomRequest{ Type: "delete", Delete: &BackendRoomDeleteRequest{ UserIds: []string{ userid, }, }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } // A deleted room is signalled as a "disinvite" event. event, err := expectRoomlistEvent(eventsChan, "disinvite") if err != nil { t.Error(err) } else if event.Disinvite == nil { t.Errorf("Expected disinvite, got %+v", event) } else if event.Disinvite.RoomId != roomId { t.Errorf("Expected room %s, got %+v", roomId, event) } else if event.Disinvite.Properties != nil { t.Errorf("Room properties should be omitted, got %s", string(*event.Disinvite.Properties)) } else if event.Disinvite.Reason != "deleted" { t.Errorf("Reason should be deleted, got %s", event.Disinvite.Reason) } // TODO: Use event to wait for asynchronous messages. time.Sleep(10 * time.Millisecond) room := hub.getRoom(roomId) if room != nil { t.Errorf("Room %s should have been deleted", roomId) } } func TestBackendServer_ParticipantsUpdatePermissions(t *testing.T) { for _, subtest := range clusteredTests { t.Run(subtest, func(t *testing.T) { var hub1 *Hub var hub2 *Hub var server1 *httptest.Server var server2 *httptest.Server if isLocalTest(t) { _, _, _, hub1, _, server1 = CreateBackendServerForTest(t) hub2 = hub1 server2 = server1 } else { _, _, hub1, hub2, server1, server2 = CreateBackendServerWithClusteringForTest(t) } client1 := NewTestClient(t, server1, hub1) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server2, hub2) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } session1 := hub1.GetSessionByPublicId(hello1.Hello.SessionId) if session1 == nil { t.Fatalf("Session %s does not exist", hello1.Hello.SessionId) } session2 := hub2.GetSessionByPublicId(hello2.Hello.SessionId) if session2 == nil { t.Fatalf("Session %s does not exist", hello2.Hello.SessionId) } // Sessions have all permissions initially (fallback for old-style sessions). assertSessionHasPermission(t, session1, PERMISSION_MAY_PUBLISH_MEDIA) assertSessionHasPermission(t, session1, PERMISSION_MAY_PUBLISH_SCREEN) assertSessionHasPermission(t, session2, PERMISSION_MAY_PUBLISH_MEDIA) assertSessionHasPermission(t, session2, PERMISSION_MAY_PUBLISH_SCREEN) // Join room by id. roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } if room, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // Ignore "join" events. if err := client1.DrainMessages(ctx); err != nil { t.Error(err) } if err := client2.DrainMessages(ctx); err != nil { t.Error(err) } msg := &BackendServerRoomRequest{ Type: "participants", Participants: &BackendRoomParticipantsRequest{ Changed: []map[string]interface{}{ { "sessionId": roomId + "-" + hello1.Hello.SessionId, "permissions": []Permission{PERMISSION_MAY_PUBLISH_MEDIA}, }, { "sessionId": roomId + "-" + hello2.Hello.SessionId, "permissions": []Permission{PERMISSION_MAY_PUBLISH_SCREEN}, }, }, Users: []map[string]interface{}{ { "sessionId": roomId + "-" + hello1.Hello.SessionId, "permissions": []Permission{PERMISSION_MAY_PUBLISH_MEDIA}, }, { "sessionId": roomId + "-" + hello2.Hello.SessionId, "permissions": []Permission{PERMISSION_MAY_PUBLISH_SCREEN}, }, }, }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } // The request could be sent to any of the backend servers. res, err := performBackendRequest(server1.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } // TODO: Use event to wait for asynchronous messages. time.Sleep(10 * time.Millisecond) assertSessionHasPermission(t, session1, PERMISSION_MAY_PUBLISH_MEDIA) assertSessionHasNotPermission(t, session1, PERMISSION_MAY_PUBLISH_SCREEN) assertSessionHasNotPermission(t, session2, PERMISSION_MAY_PUBLISH_MEDIA) assertSessionHasPermission(t, session2, PERMISSION_MAY_PUBLISH_SCREEN) }) } } func TestBackendServer_ParticipantsUpdateEmptyPermissions(t *testing.T) { _, _, _, hub, _, server := CreateBackendServerForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } session := hub.GetSessionByPublicId(hello.Hello.SessionId) if session == nil { t.Fatalf("Session %s does not exist", hello.Hello.SessionId) } // Sessions have all permissions initially (fallback for old-style sessions). assertSessionHasPermission(t, session, PERMISSION_MAY_PUBLISH_MEDIA) assertSessionHasPermission(t, session, PERMISSION_MAY_PUBLISH_SCREEN) // Join room by id. roomId := "test-room" room, err := client.JoinRoom(ctx, roomId) if err != nil { t.Fatal(err) } if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // Ignore "join" events. if err := client.DrainMessages(ctx); err != nil { t.Error(err) } // Updating with empty permissions upgrades to non-old-style and removes // all previously available permissions. msg := &BackendServerRoomRequest{ Type: "participants", Participants: &BackendRoomParticipantsRequest{ Changed: []map[string]interface{}{ { "sessionId": roomId + "-" + hello.Hello.SessionId, "permissions": []Permission{}, }, }, Users: []map[string]interface{}{ { "sessionId": roomId + "-" + hello.Hello.SessionId, "permissions": []Permission{}, }, }, }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } // TODO: Use event to wait for asynchronous messages. time.Sleep(10 * time.Millisecond) assertSessionHasNotPermission(t, session, PERMISSION_MAY_PUBLISH_MEDIA) assertSessionHasNotPermission(t, session, PERMISSION_MAY_PUBLISH_SCREEN) } func TestBackendServer_ParticipantsUpdateTimeout(t *testing.T) { _, _, _, hub, _, server := CreateBackendServerForTest(t) client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // Give message processing some time. time.Sleep(10 * time.Millisecond) if room, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } WaitForUsersJoined(ctx, t, client1, hello1, client2, hello2) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() msg := &BackendServerRoomRequest{ Type: "incall", InCall: &BackendRoomInCallRequest{ InCall: json.RawMessage("7"), Changed: []map[string]interface{}{ { "sessionId": roomId + "-" + hello1.Hello.SessionId, "inCall": 7, }, { "sessionId": "unknown-room-session-id", "inCall": 3, }, }, Users: []map[string]interface{}{ { "sessionId": roomId + "-" + hello1.Hello.SessionId, "inCall": 7, }, { "sessionId": "unknown-room-session-id", "inCall": 3, }, }, }, } data, err := json.Marshal(msg) if err != nil { t.Error(err) return } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Error(err) return } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } }() // Ensure the first request is being processed. time.Sleep(100 * time.Millisecond) wg.Add(1) go func() { defer wg.Done() msg := &BackendServerRoomRequest{ Type: "incall", InCall: &BackendRoomInCallRequest{ InCall: json.RawMessage("7"), Changed: []map[string]interface{}{ { "sessionId": roomId + "-" + hello1.Hello.SessionId, "inCall": 7, }, { "sessionId": roomId + "-" + hello2.Hello.SessionId, "inCall": 3, }, }, Users: []map[string]interface{}{ { "sessionId": roomId + "-" + hello1.Hello.SessionId, "inCall": 7, }, { "sessionId": roomId + "-" + hello2.Hello.SessionId, "inCall": 3, }, }, }, } data, err := json.Marshal(msg) if err != nil { t.Error(err) return } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Error(err) return } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } }() wg.Wait() if t.Failed() { return } msg1_a, err := client1.RunUntilMessage(ctx) if err != nil { t.Error(err) } if in_call_1, err := checkMessageParticipantsInCall(msg1_a); err != nil { t.Error(err) } else if len(in_call_1.Users) != 2 { msg1_b, err := client1.RunUntilMessage(ctx) if err != nil { t.Error(err) } if in_call_2, err := checkMessageParticipantsInCall(msg1_b); err != nil { t.Error(err) } else if len(in_call_2.Users) != 2 { t.Errorf("Wrong number of users received: %d, expected 2", len(in_call_2.Users)) } } msg2_a, err := client2.RunUntilMessage(ctx) if err != nil { t.Error(err) } if in_call_1, err := checkMessageParticipantsInCall(msg2_a); err != nil { t.Error(err) } else if len(in_call_1.Users) != 2 { msg2_b, err := client2.RunUntilMessage(ctx) if err != nil { t.Error(err) } if in_call_2, err := checkMessageParticipantsInCall(msg2_b); err != nil { t.Error(err) } else if len(in_call_2.Users) != 2 { t.Errorf("Wrong number of users received: %d, expected 2", len(in_call_2.Users)) } } ctx2, cancel2 := context.WithTimeout(context.Background(), time.Second+100*time.Millisecond) defer cancel2() if msg1_c, _ := client1.RunUntilMessage(ctx2); msg1_c != nil { if in_call_2, err := checkMessageParticipantsInCall(msg1_c); err != nil { t.Error(err) } else if len(in_call_2.Users) != 2 { t.Errorf("Wrong number of users received: %d, expected 2", len(in_call_2.Users)) } } ctx3, cancel3 := context.WithTimeout(context.Background(), time.Second+100*time.Millisecond) defer cancel3() if msg2_c, _ := client2.RunUntilMessage(ctx3); msg2_c != nil { if in_call_2, err := checkMessageParticipantsInCall(msg2_c); err != nil { t.Error(err) } else if len(in_call_2.Users) != 2 { t.Errorf("Wrong number of users received: %d, expected 2", len(in_call_2.Users)) } } } func TestBackendServer_InCallAll(t *testing.T) { for _, subtest := range clusteredTests { t.Run(subtest, func(t *testing.T) { var hub1 *Hub var hub2 *Hub var server1 *httptest.Server var server2 *httptest.Server if isLocalTest(t) { _, _, _, hub1, _, server1 = CreateBackendServerForTest(t) hub2 = hub1 server2 = server1 } else { _, _, hub1, hub2, server1, server2 = CreateBackendServerWithClusteringForTest(t) } client1 := NewTestClient(t, server1, hub1) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server2, hub2) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } session1 := hub1.GetSessionByPublicId(hello1.Hello.SessionId) if session1 == nil { t.Fatalf("Could not find session %s", hello1.Hello.SessionId) } session2 := hub2.GetSessionByPublicId(hello2.Hello.SessionId) if session2 == nil { t.Fatalf("Could not find session %s", hello2.Hello.SessionId) } // Join room by id. roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // Give message processing some time. time.Sleep(10 * time.Millisecond) if room, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } WaitForUsersJoined(ctx, t, client1, hello1, client2, hello2) room1 := hub1.getRoom(roomId) if room1 == nil { t.Fatalf("Could not find room %s in hub1", roomId) } room2 := hub2.getRoom(roomId) if room2 == nil { t.Fatalf("Could not find room %s in hub2", roomId) } if room1.IsSessionInCall(session1) { t.Errorf("Session %s should not be in room %s", session1.PublicId(), room1.Id()) } if room2.IsSessionInCall(session2) { t.Errorf("Session %s should not be in room %s", session2.PublicId(), room2.Id()) } var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() msg := &BackendServerRoomRequest{ Type: "incall", InCall: &BackendRoomInCallRequest{ InCall: json.RawMessage("7"), All: true, }, } data, err := json.Marshal(msg) if err != nil { t.Error(err) return } res, err := performBackendRequest(server1.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Error(err) return } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } }() wg.Wait() if t.Failed() { return } if msg1_a, err := client1.RunUntilMessage(ctx); err != nil { t.Error(err) } else if in_call_1, err := checkMessageParticipantsInCall(msg1_a); err != nil { t.Error(err) } else if !in_call_1.All { t.Errorf("All flag not set in message %+v", in_call_1) } else if !bytes.Equal(*in_call_1.InCall, []byte("7")) { t.Errorf("Expected inCall flag 7, got %s", string(*in_call_1.InCall)) } if msg2_a, err := client2.RunUntilMessage(ctx); err != nil { t.Error(err) } else if in_call_1, err := checkMessageParticipantsInCall(msg2_a); err != nil { t.Error(err) } else if !in_call_1.All { t.Errorf("All flag not set in message %+v", in_call_1) } else if !bytes.Equal(*in_call_1.InCall, []byte("7")) { t.Errorf("Expected inCall flag 7, got %s", string(*in_call_1.InCall)) } if !room1.IsSessionInCall(session1) { t.Errorf("Session %s should be in room %s", session1.PublicId(), room1.Id()) } if !room2.IsSessionInCall(session2) { t.Errorf("Session %s should be in room %s", session2.PublicId(), room2.Id()) } ctx2, cancel2 := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel2() if message, err := client1.RunUntilMessage(ctx2); err != nil && err != ErrNoMessageReceived && err != context.DeadlineExceeded { t.Error(err) } else if message != nil { t.Errorf("Expected no message, got %+v", message) } ctx3, cancel3 := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel3() if message, err := client2.RunUntilMessage(ctx3); err != nil && err != ErrNoMessageReceived && err != context.DeadlineExceeded { t.Error(err) } else if message != nil { t.Errorf("Expected no message, got %+v", message) } wg.Add(1) go func() { defer wg.Done() msg := &BackendServerRoomRequest{ Type: "incall", InCall: &BackendRoomInCallRequest{ InCall: json.RawMessage("0"), All: true, }, } data, err := json.Marshal(msg) if err != nil { t.Error(err) return } res, err := performBackendRequest(server1.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Error(err) return } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } }() wg.Wait() if t.Failed() { return } if msg1_a, err := client1.RunUntilMessage(ctx); err != nil { t.Error(err) } else if in_call_1, err := checkMessageParticipantsInCall(msg1_a); err != nil { t.Error(err) } else if !in_call_1.All { t.Errorf("All flag not set in message %+v", in_call_1) } else if !bytes.Equal(*in_call_1.InCall, []byte("0")) { t.Errorf("Expected inCall flag 0, got %s", string(*in_call_1.InCall)) } if msg2_a, err := client2.RunUntilMessage(ctx); err != nil { t.Error(err) } else if in_call_1, err := checkMessageParticipantsInCall(msg2_a); err != nil { t.Error(err) } else if !in_call_1.All { t.Errorf("All flag not set in message %+v", in_call_1) } else if !bytes.Equal(*in_call_1.InCall, []byte("0")) { t.Errorf("Expected inCall flag 0, got %s", string(*in_call_1.InCall)) } if room1.IsSessionInCall(session1) { t.Errorf("Session %s should not be in room %s", session1.PublicId(), room1.Id()) } if room2.IsSessionInCall(session2) { t.Errorf("Session %s should not be in room %s", session2.PublicId(), room2.Id()) } ctx4, cancel4 := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel4() if message, err := client1.RunUntilMessage(ctx4); err != nil && err != ErrNoMessageReceived && err != context.DeadlineExceeded { t.Error(err) } else if message != nil { t.Errorf("Expected no message, got %+v", message) } ctx5, cancel5 := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel5() if message, err := client2.RunUntilMessage(ctx5); err != nil && err != ErrNoMessageReceived && err != context.DeadlineExceeded { t.Error(err) } else if message != nil { t.Errorf("Expected no message, got %+v", message) } }) } } func TestBackendServer_RoomMessage(t *testing.T) { _, _, _, hub, _, server := CreateBackendServerForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() _, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // Ignore "join" events. if err := client.DrainMessages(ctx); err != nil { t.Error(err) } messageData := json.RawMessage("{\"foo\":\"bar\"}") msg := &BackendServerRoomRequest{ Type: "message", Message: &BackendRoomMessageRequest{ Data: &messageData, }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } message, err := client.RunUntilRoomMessage(ctx) if err != nil { t.Error(err) } else if message.RoomId != roomId { t.Errorf("Expected message for room %s, got %s", roomId, message.RoomId) } else if !bytes.Equal(messageData, *message.Data) { t.Errorf("Expected message data %s, got %s", string(messageData), string(*message.Data)) } } func TestBackendServer_TurnCredentials(t *testing.T) { _, _, _, _, _, server := CreateBackendServerForTestWithTurn(t) q := make(url.Values) q.Set("service", "turn") q.Set("api", turnApiKey) request, err := http.NewRequest("GET", server.URL+"/turn/credentials?"+q.Encode(), nil) if err != nil { t.Fatal(err) } client := &http.Client{} res, err := client.Do(request) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } var cred TurnCredentials if err := json.Unmarshal(body, &cred); err != nil { t.Fatal(err) } m := hmac.New(sha1.New, []byte(turnSecret)) m.Write([]byte(cred.Username)) // nolint password := base64.StdEncoding.EncodeToString(m.Sum(nil)) if cred.Password != password { t.Errorf("Expected password %s, got %s", password, cred.Password) } if cred.TTL != int64((24 * time.Hour).Seconds()) { t.Errorf("Expected a TTL of %d, got %d", int64((24 * time.Hour).Seconds()), cred.TTL) } if !reflect.DeepEqual(cred.URIs, turnServers) { t.Errorf("Expected the list of servers as %s, got %s", turnServers, cred.URIs) } } func TestBackendServer_StatsAllowedIps(t *testing.T) { config := goconf.NewConfigFile() config.AddOption("stats", "allowed_ips", "127.0.0.1, 192.168.0.1, 192.168.1.1/24") _, backend, _, _, _, _ := CreateBackendServerForTestFromConfig(t, config) allowed := []string{ "127.0.0.1", "127.0.0.1:1234", "192.168.0.1:1234", "192.168.1.1:1234", "192.168.1.100:1234", } notAllowed := []string{ "192.168.0.2:1234", "10.1.2.3:1234", } for _, addr := range allowed { t.Run(addr, func(t *testing.T) { r1 := &http.Request{ RemoteAddr: addr, } if !backend.allowStatsAccess(r1) { t.Errorf("should allow %s", addr) } r2 := &http.Request{ RemoteAddr: "1.2.3.4:12345", Header: http.Header{ textproto.CanonicalMIMEHeaderKey("x-real-ip"): []string{addr}, }, } if !backend.allowStatsAccess(r2) { t.Errorf("should allow %s", addr) } r3 := &http.Request{ RemoteAddr: "1.2.3.4:12345", Header: http.Header{ textproto.CanonicalMIMEHeaderKey("x-forwarded-for"): []string{addr}, }, } if !backend.allowStatsAccess(r3) { t.Errorf("should allow %s", addr) } r4 := &http.Request{ RemoteAddr: "1.2.3.4:12345", Header: http.Header{ textproto.CanonicalMIMEHeaderKey("x-forwarded-for"): []string{addr + ", 1.2.3.4:23456"}, }, } if !backend.allowStatsAccess(r4) { t.Errorf("should allow %s", addr) } }) } for _, addr := range notAllowed { t.Run(addr, func(t *testing.T) { r := &http.Request{ RemoteAddr: addr, } if backend.allowStatsAccess(r) { t.Errorf("should not allow %s", addr) } }) } } func Test_IsNumeric(t *testing.T) { numeric := []string{ "0", "1", "12345", } nonNumeric := []string{ "", " ", " 0", "0 ", " 0 ", "-1", "1.2", "1a", "a1", } for _, s := range numeric { if !isNumeric(s) { t.Errorf("%s should be numeric", s) } } for _, s := range nonNumeric { if isNumeric(s) { t.Errorf("%s should not be numeric", s) } } } func TestBackendServer_DialoutNoSipBridge(t *testing.T) { _, _, _, hub, _, server := CreateBackendServerForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHelloInternal(); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() _, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } roomId := "12345" msg := &BackendServerRoomRequest{ Type: "dialout", Dialout: &BackendRoomDialoutRequest{ Number: "+1234567890", }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != http.StatusNotFound { t.Fatalf("Expected error %d, got %s: %s", http.StatusNotFound, res.Status, string(body)) } var response BackendServerRoomResponse if err := json.Unmarshal(body, &response); err != nil { t.Fatal(err) } if response.Type != "dialout" || response.Dialout == nil { t.Fatalf("expected type dialout, got %s", string(body)) } if response.Dialout.Error == nil { t.Fatalf("expected dialout error, got %s", string(body)) } if expected := "no_client_available"; response.Dialout.Error.Code != expected { t.Errorf("expected error code %s, got %s", expected, string(body)) } } func TestBackendServer_DialoutAccepted(t *testing.T) { _, _, _, hub, _, server := CreateBackendServerForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHelloInternalWithFeatures([]string{"start-dialout"}); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() _, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } roomId := "12345" callId := "call-123" stopped := make(chan struct{}) go func() { defer close(stopped) msg, err := client.RunUntilMessage(ctx) if err != nil { t.Error(err) return } if msg.Type != "internal" || msg.Internal.Type != "dialout" { t.Errorf("expected internal dialout message, got %+v", msg) return } if msg.Internal.Dialout.RoomId != roomId { t.Errorf("expected room id %s, got %+v", roomId, msg) } if url := server.URL + "/"; msg.Internal.Dialout.Backend != url { t.Errorf("expected backend %s, got %+v", url, msg) } response := &ClientMessage{ Id: msg.Id, Type: "internal", Internal: &InternalClientMessage{ Type: "dialout", Dialout: &DialoutInternalClientMessage{ Type: "status", RoomId: msg.Internal.Dialout.RoomId, Status: &DialoutStatusInternalClientMessage{ Status: "accepted", CallId: callId, }, }, }, } if err := client.WriteJSON(response); err != nil { t.Error(err) } }() defer func() { <-stopped }() msg := &BackendServerRoomRequest{ Type: "dialout", Dialout: &BackendRoomDialoutRequest{ Number: "+1234567890", }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != http.StatusOK { t.Fatalf("Expected error %d, got %s: %s", http.StatusOK, res.Status, string(body)) } var response BackendServerRoomResponse if err := json.Unmarshal(body, &response); err != nil { t.Fatal(err) } if response.Type != "dialout" || response.Dialout == nil { t.Fatalf("expected type dialout, got %s", string(body)) } if response.Dialout.Error != nil { t.Fatalf("expected dialout success, got %s", string(body)) } if response.Dialout.CallId != callId { t.Errorf("expected call id %s, got %s", callId, string(body)) } } func TestBackendServer_DialoutAcceptedCompat(t *testing.T) { _, _, _, hub, _, server := CreateBackendServerForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHelloInternalWithFeatures([]string{"start-dialout"}); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() _, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } roomId := "12345" callId := "call-123" stopped := make(chan struct{}) go func() { defer close(stopped) msg, err := client.RunUntilMessage(ctx) if err != nil { t.Error(err) return } if msg.Type != "internal" || msg.Internal.Type != "dialout" { t.Errorf("expected internal dialout message, got %+v", msg) return } if msg.Internal.Dialout.RoomId != roomId { t.Errorf("expected room id %s, got %+v", roomId, msg) } if url := server.URL + "/"; msg.Internal.Dialout.Backend != url { t.Errorf("expected backend %s, got %+v", url, msg) } response := &ClientMessage{ Id: msg.Id, Type: "internal", Internal: &InternalClientMessage{ Type: "dialout", Dialout: &DialoutInternalClientMessage{ Type: "status", RoomId: msg.Internal.Dialout.RoomId, Status: &DialoutStatusInternalClientMessage{ Status: "accepted", CallId: callId, }, }, }, } if err := client.WriteJSON(response); err != nil { t.Error(err) } }() defer func() { <-stopped }() msg := &BackendServerRoomRequest{ Type: "dialout", Dialout: &BackendRoomDialoutRequest{ Number: "+1234567890", }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != http.StatusOK { t.Fatalf("Expected error %d, got %s: %s", http.StatusOK, res.Status, string(body)) } var response BackendServerRoomResponse if err := json.Unmarshal(body, &response); err != nil { t.Fatal(err) } if response.Type != "dialout" || response.Dialout == nil { t.Fatalf("expected type dialout, got %s", string(body)) } if response.Dialout.Error != nil { t.Fatalf("expected dialout success, got %s", string(body)) } if response.Dialout.CallId != callId { t.Errorf("expected call id %s, got %s", callId, string(body)) } } func TestBackendServer_DialoutRejected(t *testing.T) { _, _, _, hub, _, server := CreateBackendServerForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHelloInternalWithFeatures([]string{"start-dialout"}); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() _, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } roomId := "12345" errorCode := "error-code" errorMessage := "rejected call" stopped := make(chan struct{}) go func() { defer close(stopped) msg, err := client.RunUntilMessage(ctx) if err != nil { t.Error(err) return } if msg.Type != "internal" || msg.Internal.Type != "dialout" { t.Errorf("expected internal dialout message, got %+v", msg) return } if msg.Internal.Dialout.RoomId != roomId { t.Errorf("expected room id %s, got %+v", roomId, msg) } if url := server.URL + "/"; msg.Internal.Dialout.Backend != url { t.Errorf("expected backend %s, got %+v", url, msg) } response := &ClientMessage{ Id: msg.Id, Type: "internal", Internal: &InternalClientMessage{ Type: "dialout", Dialout: &DialoutInternalClientMessage{ Type: "error", Error: NewError(errorCode, errorMessage), }, }, } if err := client.WriteJSON(response); err != nil { t.Error(err) } }() defer func() { <-stopped }() msg := &BackendServerRoomRequest{ Type: "dialout", Dialout: &BackendRoomDialoutRequest{ Number: "+1234567890", }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != http.StatusBadGateway { t.Fatalf("Expected error %d, got %s: %s", http.StatusBadGateway, res.Status, string(body)) } var response BackendServerRoomResponse if err := json.Unmarshal(body, &response); err != nil { t.Fatal(err) } if response.Type != "dialout" || response.Dialout == nil { t.Fatalf("expected type dialout, got %s", string(body)) } if response.Dialout.Error == nil { t.Fatalf("expected dialout error, got %s", string(body)) } if response.Dialout.Error.Code != errorCode { t.Errorf("expected error code %s, got %s", errorCode, string(body)) } if response.Dialout.Error.Message != errorMessage { t.Errorf("expected error message %s, got %s", errorMessage, string(body)) } } nextcloud-spreed-signaling-1.2.4/backend_storage_etcd.go000066400000000000000000000145201460321600400234130ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "encoding/json" "fmt" "log" "net/url" "sync" "time" "github.com/dlintw/goconf" clientv3 "go.etcd.io/etcd/client/v3" ) type backendStorageEtcd struct { backendStorageCommon etcdClient *EtcdClient keyPrefix string keyInfos map[string]*BackendInformationEtcd initializedCtx context.Context initializedFunc context.CancelFunc initializedWg sync.WaitGroup wakeupChanForTesting chan struct{} } func NewBackendStorageEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient) (BackendStorage, error) { if etcdClient == nil || !etcdClient.IsConfigured() { return nil, fmt.Errorf("no etcd endpoints configured") } keyPrefix, _ := config.GetString("backend", "backendprefix") if keyPrefix == "" { return nil, fmt.Errorf("no backend prefix configured") } initializedCtx, initializedFunc := context.WithCancel(context.Background()) result := &backendStorageEtcd{ backendStorageCommon: backendStorageCommon{ backends: make(map[string][]*Backend), }, etcdClient: etcdClient, keyPrefix: keyPrefix, keyInfos: make(map[string]*BackendInformationEtcd), initializedCtx: initializedCtx, initializedFunc: initializedFunc, } etcdClient.AddListener(result) return result, nil } func (s *backendStorageEtcd) WaitForInitialized(ctx context.Context) error { select { case <-ctx.Done(): return ctx.Err() case <-s.initializedCtx.Done(): return nil } } func (s *backendStorageEtcd) wakeupForTesting() { if s.wakeupChanForTesting == nil { return } select { case s.wakeupChanForTesting <- struct{}{}: default: } } func (s *backendStorageEtcd) EtcdClientCreated(client *EtcdClient) { s.initializedWg.Add(1) go func() { if err := client.Watch(context.Background(), s.keyPrefix, s, clientv3.WithPrefix()); err != nil { log.Printf("Error processing watch for %s: %s", s.keyPrefix, err) } }() go func() { if err := client.WaitForConnection(context.Background()); err != nil { panic(err) } backoff, err := NewExponentialBackoff(initialWaitDelay, maxWaitDelay) if err != nil { panic(err) } for { response, err := s.getBackends(client, s.keyPrefix) if err != nil { if err == context.DeadlineExceeded { log.Printf("Timeout getting initial list of backends, retry in %s", backoff.NextWait()) } else { log.Printf("Could not get initial list of backends, retry in %s: %s", backoff.NextWait(), err) } backoff.Wait(context.Background()) continue } for _, ev := range response.Kvs { s.EtcdKeyUpdated(client, string(ev.Key), ev.Value) } s.initializedWg.Wait() s.initializedFunc() return } }() } func (s *backendStorageEtcd) EtcdWatchCreated(client *EtcdClient, key string) { s.initializedWg.Done() } func (s *backendStorageEtcd) getBackends(client *EtcdClient, keyPrefix string) (*clientv3.GetResponse, error) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() return client.Get(ctx, keyPrefix, clientv3.WithPrefix()) } func (s *backendStorageEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []byte) { var info BackendInformationEtcd if err := json.Unmarshal(data, &info); err != nil { log.Printf("Could not decode backend information %s: %s", string(data), err) return } if err := info.CheckValid(); err != nil { log.Printf("Received invalid backend information %s: %s", string(data), err) return } backend := &Backend{ id: key, url: info.Url, parsedUrl: info.parsedUrl, secret: []byte(info.Secret), allowHttp: info.parsedUrl.Scheme == "http", maxStreamBitrate: info.MaxStreamBitrate, maxScreenBitrate: info.MaxScreenBitrate, sessionLimit: info.SessionLimit, } host := info.parsedUrl.Host s.mu.Lock() defer s.mu.Unlock() s.keyInfos[key] = &info entries, found := s.backends[host] if !found { // Simple case, first backend for this host log.Printf("Added backend %s (from %s)", info.Url, key) s.backends[host] = []*Backend{backend} statsBackendsCurrent.Inc() s.wakeupForTesting() return } // Was the backend changed? replaced := false for idx, entry := range entries { if entry.id == key { log.Printf("Updated backend %s (from %s)", info.Url, key) entries[idx] = backend replaced = true break } } if !replaced { // New backend, add to list. log.Printf("Added backend %s (from %s)", info.Url, key) s.backends[host] = append(entries, backend) statsBackendsCurrent.Inc() } s.wakeupForTesting() } func (s *backendStorageEtcd) EtcdKeyDeleted(client *EtcdClient, key string) { s.mu.Lock() defer s.mu.Unlock() info, found := s.keyInfos[key] if !found { return } delete(s.keyInfos, key) host := info.parsedUrl.Host entries, found := s.backends[host] if !found { return } log.Printf("Removing backend %s (from %s)", info.Url, key) newEntries := make([]*Backend, 0, len(entries)-1) for _, entry := range entries { if entry.id == key { statsBackendsCurrent.Dec() continue } newEntries = append(newEntries, entry) } if len(newEntries) > 0 { s.backends[host] = newEntries } else { delete(s.backends, host) } s.wakeupForTesting() } func (s *backendStorageEtcd) Close() { s.etcdClient.RemoveListener(s) } func (s *backendStorageEtcd) Reload(config *goconf.ConfigFile) { // Backend updates are processed through etcd. } func (s *backendStorageEtcd) GetCompatBackend() *Backend { return nil } func (s *backendStorageEtcd) GetBackend(u *url.URL) *Backend { s.mu.RLock() defer s.mu.RUnlock() return s.getBackendLocked(u) } nextcloud-spreed-signaling-1.2.4/backend_storage_etcd_test.go000066400000000000000000000021761460321600400244560ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling func (s *backendStorageEtcd) getWakeupChannelForTesting() <-chan struct{} { s.mu.Lock() defer s.mu.Unlock() if s.wakeupChanForTesting != nil { return s.wakeupChanForTesting } ch := make(chan struct{}, 1) s.wakeupChanForTesting = ch return ch } nextcloud-spreed-signaling-1.2.4/backend_storage_static.go000066400000000000000000000213611460321600400237640ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "log" "net/url" "reflect" "strings" "github.com/dlintw/goconf" ) type backendStorageStatic struct { backendStorageCommon // Deprecated allowAll bool commonSecret []byte compatBackend *Backend } func NewBackendStorageStatic(config *goconf.ConfigFile) (BackendStorage, error) { allowAll, _ := config.GetBool("backend", "allowall") allowHttp, _ := config.GetBool("backend", "allowhttp") commonSecret, _ := config.GetString("backend", "secret") sessionLimit, err := config.GetInt("backend", "sessionlimit") if err != nil || sessionLimit < 0 { sessionLimit = 0 } backends := make(map[string][]*Backend) var compatBackend *Backend numBackends := 0 if allowAll { log.Println("WARNING: All backend hostnames are allowed, only use for development!") compatBackend = &Backend{ id: "compat", secret: []byte(commonSecret), compat: true, allowHttp: allowHttp, sessionLimit: uint64(sessionLimit), } if sessionLimit > 0 { log.Printf("Allow a maximum of %d sessions", sessionLimit) } numBackends++ } else if backendIds, _ := config.GetString("backend", "backends"); backendIds != "" { for host, configuredBackends := range getConfiguredHosts(backendIds, config, commonSecret) { backends[host] = append(backends[host], configuredBackends...) for _, be := range configuredBackends { log.Printf("Backend %s added for %s", be.id, be.url) } numBackends += len(configuredBackends) } } else if allowedUrls, _ := config.GetString("backend", "allowed"); allowedUrls != "" { // Old-style configuration, only hosts are configured and are using a common secret. allowMap := make(map[string]bool) for _, u := range strings.Split(allowedUrls, ",") { u = strings.TrimSpace(u) if idx := strings.IndexByte(u, '/'); idx != -1 { log.Printf("WARNING: Removing path from allowed hostname \"%s\", check your configuration!", u) u = u[:idx] } if u != "" { allowMap[strings.ToLower(u)] = true } } if len(allowMap) == 0 { log.Println("WARNING: No backend hostnames are allowed, check your configuration!") } else { compatBackend = &Backend{ id: "compat", secret: []byte(commonSecret), compat: true, allowHttp: allowHttp, sessionLimit: uint64(sessionLimit), } hosts := make([]string, 0, len(allowMap)) for host := range allowMap { hosts = append(hosts, host) backends[host] = []*Backend{compatBackend} } if len(hosts) > 1 { log.Println("WARNING: Using deprecated backend configuration. Please migrate the \"allowed\" setting to the new \"backends\" configuration.") } log.Printf("Allowed backend hostnames: %s", hosts) if sessionLimit > 0 { log.Printf("Allow a maximum of %d sessions", sessionLimit) } numBackends++ } } if numBackends == 0 { log.Printf("WARNING: No backends configured, client connections will not be possible.") } statsBackendsCurrent.Add(float64(numBackends)) return &backendStorageStatic{ backendStorageCommon: backendStorageCommon{ backends: backends, }, allowAll: allowAll, commonSecret: []byte(commonSecret), compatBackend: compatBackend, }, nil } func (s *backendStorageStatic) Close() { } func (s *backendStorageStatic) RemoveBackendsForHost(host string) { if oldBackends := s.backends[host]; len(oldBackends) > 0 { for _, backend := range oldBackends { log.Printf("Backend %s removed for %s", backend.id, backend.url) } statsBackendsCurrent.Sub(float64(len(oldBackends))) } delete(s.backends, host) } func (s *backendStorageStatic) UpsertHost(host string, backends []*Backend) { for existingIndex, existingBackend := range s.backends[host] { found := false index := 0 for _, newBackend := range backends { if reflect.DeepEqual(existingBackend, newBackend) { // otherwise we could manually compare the struct members here found = true backends = append(backends[:index], backends[index+1:]...) break } else if newBackend.id == existingBackend.id { found = true s.backends[host][existingIndex] = newBackend backends = append(backends[:index], backends[index+1:]...) log.Printf("Backend %s updated for %s", newBackend.id, newBackend.url) break } index++ } if !found { removed := s.backends[host][existingIndex] log.Printf("Backend %s removed for %s", removed.id, removed.url) s.backends[host] = append(s.backends[host][:existingIndex], s.backends[host][existingIndex+1:]...) statsBackendsCurrent.Dec() } } s.backends[host] = append(s.backends[host], backends...) for _, added := range backends { log.Printf("Backend %s added for %s", added.id, added.url) } statsBackendsCurrent.Add(float64(len(backends))) } func getConfiguredBackendIDs(backendIds string) (ids []string) { seen := make(map[string]bool) for _, id := range strings.Split(backendIds, ",") { id = strings.TrimSpace(id) if id == "" { continue } if seen[id] { continue } ids = append(ids, id) seen[id] = true } return ids } func getConfiguredHosts(backendIds string, config *goconf.ConfigFile, commonSecret string) (hosts map[string][]*Backend) { hosts = make(map[string][]*Backend) for _, id := range getConfiguredBackendIDs(backendIds) { u, _ := config.GetString(id, "url") if u == "" { log.Printf("Backend %s is missing or incomplete, skipping", id) continue } if u[len(u)-1] != '/' { u += "/" } parsed, err := url.Parse(u) if err != nil { log.Printf("Backend %s has an invalid url %s configured (%s), skipping", id, u, err) continue } if strings.Contains(parsed.Host, ":") && hasStandardPort(parsed) { parsed.Host = parsed.Hostname() u = parsed.String() } secret, _ := config.GetString(id, "secret") if secret == "" && commonSecret != "" { log.Printf("Backend %s has no own shared secret set, using common shared secret", id) secret = commonSecret } if u == "" || secret == "" { log.Printf("Backend %s is missing or incomplete, skipping", id) continue } sessionLimit, err := config.GetInt(id, "sessionlimit") if err != nil || sessionLimit < 0 { sessionLimit = 0 } if sessionLimit > 0 { log.Printf("Backend %s allows a maximum of %d sessions", id, sessionLimit) } maxStreamBitrate, err := config.GetInt(id, "maxstreambitrate") if err != nil || maxStreamBitrate < 0 { maxStreamBitrate = 0 } maxScreenBitrate, err := config.GetInt(id, "maxscreenbitrate") if err != nil || maxScreenBitrate < 0 { maxScreenBitrate = 0 } hosts[parsed.Host] = append(hosts[parsed.Host], &Backend{ id: id, url: u, parsedUrl: parsed, secret: []byte(secret), allowHttp: parsed.Scheme == "http", maxStreamBitrate: maxStreamBitrate, maxScreenBitrate: maxScreenBitrate, sessionLimit: uint64(sessionLimit), }) } return hosts } func (s *backendStorageStatic) Reload(config *goconf.ConfigFile) { s.mu.Lock() defer s.mu.Unlock() if s.compatBackend != nil { log.Println("Old-style configuration active, reload is not supported") return } commonSecret, _ := config.GetString("backend", "secret") if backendIds, _ := config.GetString("backend", "backends"); backendIds != "" { configuredHosts := getConfiguredHosts(backendIds, config, commonSecret) // remove backends that are no longer configured for hostname := range s.backends { if _, ok := configuredHosts[hostname]; !ok { s.RemoveBackendsForHost(hostname) } } // rewrite backends adding newly configured ones and rewriting existing ones for hostname, configuredBackends := range configuredHosts { s.UpsertHost(hostname, configuredBackends) } } } func (s *backendStorageStatic) GetCompatBackend() *Backend { s.mu.RLock() defer s.mu.RUnlock() return s.compatBackend } func (s *backendStorageStatic) GetBackend(u *url.URL) *Backend { s.mu.RLock() defer s.mu.RUnlock() if _, found := s.backends[u.Host]; !found { if s.allowAll { return s.compatBackend } return nil } return s.getBackendLocked(u) } nextcloud-spreed-signaling-1.2.4/backoff.go000066400000000000000000000035301460321600400206730ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "fmt" "time" ) type Backoff interface { Reset() NextWait() time.Duration Wait(context.Context) } type exponentialBackoff struct { initial time.Duration maxWait time.Duration nextWait time.Duration } func NewExponentialBackoff(initial time.Duration, maxWait time.Duration) (Backoff, error) { if initial <= 0 { return nil, fmt.Errorf("initial must be larger than 0") } if maxWait < initial { return nil, fmt.Errorf("maxWait must be larger or equal to initial") } return &exponentialBackoff{ initial: initial, maxWait: maxWait, nextWait: initial, }, nil } func (b *exponentialBackoff) Reset() { b.nextWait = b.initial } func (b *exponentialBackoff) NextWait() time.Duration { return b.nextWait } func (b *exponentialBackoff) Wait(ctx context.Context) { waiter, cancel := context.WithTimeout(ctx, b.nextWait) defer cancel() b.nextWait = b.nextWait * 2 if b.nextWait > b.maxWait { b.nextWait = b.maxWait } <-waiter.Done() } nextcloud-spreed-signaling-1.2.4/backoff_test.go000066400000000000000000000033661460321600400217410ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "testing" "time" ) func TestBackoff_Exponential(t *testing.T) { backoff, err := NewExponentialBackoff(100*time.Millisecond, 500*time.Millisecond) if err != nil { t.Fatal(err) } waitTimes := []time.Duration{ 100 * time.Millisecond, 200 * time.Millisecond, 400 * time.Millisecond, 500 * time.Millisecond, 500 * time.Millisecond, } for _, wait := range waitTimes { if backoff.NextWait() != wait { t.Errorf("Wait time should be %s, got %s", wait, backoff.NextWait()) } a := time.Now() backoff.Wait(context.Background()) b := time.Now() if b.Sub(a) < wait { t.Errorf("Should have waited %s, got %s", wait, b.Sub(a)) } } backoff.Reset() a := time.Now() backoff.Wait(context.Background()) b := time.Now() if b.Sub(a) < 100*time.Millisecond { t.Errorf("Should have waited %s, got %s", 100*time.Millisecond, b.Sub(a)) } } nextcloud-spreed-signaling-1.2.4/capabilities.go000066400000000000000000000213611460321600400217330ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "encoding/json" "fmt" "io" "log" "net/http" "net/url" "strings" "sync" "time" ) const ( // Name of the "Talk" app in Nextcloud. AppNameSpreed = "spreed" // Name of capability to enable the "v3" API for the signaling endpoint. FeatureSignalingV3Api = "signaling-v3" // Cache received capabilities for one hour. CapabilitiesCacheDuration = time.Hour // Don't invalidate more than once per minute. maxInvalidateInterval = time.Minute ) // Can be overwritten by tests. var getCapabilitiesNow = time.Now type capabilitiesEntry struct { nextUpdate time.Time capabilities map[string]interface{} } type Capabilities struct { mu sync.RWMutex version string pool *HttpClientPool entries map[string]*capabilitiesEntry nextInvalidate map[string]time.Time } func NewCapabilities(version string, pool *HttpClientPool) (*Capabilities, error) { result := &Capabilities{ version: version, pool: pool, entries: make(map[string]*capabilitiesEntry), nextInvalidate: make(map[string]time.Time), } return result, nil } type CapabilitiesVersion struct { Major int `json:"major"` Minor int `json:"minor"` Micro int `json:"micro"` String string `json:"string"` Edition string `json:"edition"` ExtendedSupport bool `json:"extendedSupport"` } type CapabilitiesResponse struct { Version CapabilitiesVersion `json:"version"` Capabilities map[string]*json.RawMessage `json:"capabilities"` } func (c *Capabilities) getCapabilities(key string) (map[string]interface{}, bool) { c.mu.RLock() defer c.mu.RUnlock() now := getCapabilitiesNow() if entry, found := c.entries[key]; found && entry.nextUpdate.After(now) { return entry.capabilities, true } return nil, false } func (c *Capabilities) setCapabilities(key string, capabilities map[string]interface{}) { now := getCapabilitiesNow() entry := &capabilitiesEntry{ nextUpdate: now.Add(CapabilitiesCacheDuration), capabilities: capabilities, } c.mu.Lock() defer c.mu.Unlock() c.entries[key] = entry } func (c *Capabilities) invalidateCapabilities(key string) { c.mu.Lock() defer c.mu.Unlock() now := getCapabilitiesNow() if entry, found := c.nextInvalidate[key]; found && entry.After(now) { return } delete(c.entries, key) c.nextInvalidate[key] = now.Add(maxInvalidateInterval) } func (c *Capabilities) getKeyForUrl(u *url.URL) string { key := u.String() return key } func (c *Capabilities) loadCapabilities(ctx context.Context, u *url.URL) (map[string]interface{}, bool, error) { key := c.getKeyForUrl(u) if caps, found := c.getCapabilities(key); found { return caps, true, nil } capUrl := *u if !strings.Contains(capUrl.Path, "ocs/v2.php") { if !strings.HasSuffix(capUrl.Path, "/") { capUrl.Path += "/" } capUrl.Path = capUrl.Path + "ocs/v2.php/cloud/capabilities" } else if pos := strings.Index(capUrl.Path, "/ocs/v2.php/"); pos >= 0 { capUrl.Path = capUrl.Path[:pos+11] + "/cloud/capabilities" } log.Printf("Capabilities expired for %s, updating", capUrl.String()) client, pool, err := c.pool.Get(ctx, &capUrl) if err != nil { log.Printf("Could not get client for host %s: %s", capUrl.Host, err) return nil, false, err } defer pool.Put(client) req, err := http.NewRequestWithContext(ctx, "GET", capUrl.String(), nil) if err != nil { log.Printf("Could not create request to %s: %s", &capUrl, err) return nil, false, err } req.Header.Set("Accept", "application/json") req.Header.Set("OCS-APIRequest", "true") req.Header.Set("User-Agent", "nextcloud-spreed-signaling/"+c.version) resp, err := client.Do(req) if err != nil { return nil, false, err } defer resp.Body.Close() ct := resp.Header.Get("Content-Type") if !strings.HasPrefix(ct, "application/json") { log.Printf("Received unsupported content-type from %s: %s (%s)", capUrl.String(), ct, resp.Status) return nil, false, ErrUnsupportedContentType } body, err := io.ReadAll(resp.Body) if err != nil { log.Printf("Could not read response body from %s: %s", capUrl.String(), err) return nil, false, err } var ocs OcsResponse if err := json.Unmarshal(body, &ocs); err != nil { log.Printf("Could not decode OCS response %s from %s: %s", string(body), capUrl.String(), err) return nil, false, err } else if ocs.Ocs == nil || ocs.Ocs.Data == nil { log.Printf("Incomplete OCS response %s from %s", string(body), u) return nil, false, fmt.Errorf("incomplete OCS response") } var response CapabilitiesResponse if err := json.Unmarshal(*ocs.Ocs.Data, &response); err != nil { log.Printf("Could not decode OCS response body %s from %s: %s", string(*ocs.Ocs.Data), capUrl.String(), err) return nil, false, err } capaObj, found := response.Capabilities[AppNameSpreed] if !found || capaObj == nil { log.Printf("No capabilities received for app spreed from %s: %+v", capUrl.String(), response) return nil, false, nil } var capa map[string]interface{} if err := json.Unmarshal(*capaObj, &capa); err != nil { log.Printf("Unsupported capabilities received for app spreed from %s: %+v", capUrl.String(), response) return nil, false, nil } log.Printf("Received capabilities %+v from %s", capa, capUrl.String()) c.setCapabilities(key, capa) return capa, false, nil } func (c *Capabilities) HasCapabilityFeature(ctx context.Context, u *url.URL, feature string) bool { caps, _, err := c.loadCapabilities(ctx, u) if err != nil { log.Printf("Could not get capabilities for %s: %s", u, err) return false } featuresInterface := caps["features"] if featuresInterface == nil { return false } features, ok := featuresInterface.([]interface{}) if !ok { log.Printf("Invalid features list received for %s: %+v", u, featuresInterface) return false } for _, entry := range features { if entry == feature { return true } } return false } func (c *Capabilities) getConfigGroup(ctx context.Context, u *url.URL, group string) (map[string]interface{}, bool, bool) { caps, cached, err := c.loadCapabilities(ctx, u) if err != nil { log.Printf("Could not get capabilities for %s: %s", u, err) return nil, cached, false } configInterface := caps["config"] if configInterface == nil { return nil, cached, false } config, ok := configInterface.(map[string]interface{}) if !ok { log.Printf("Invalid config mapping received from %s: %+v", u, configInterface) return nil, cached, false } groupInterface := config[group] if groupInterface == nil { return nil, cached, false } groupConfig, ok := groupInterface.(map[string]interface{}) if !ok { log.Printf("Invalid group mapping \"%s\" received from %s: %+v", group, u, groupInterface) return nil, cached, false } return groupConfig, cached, true } func (c *Capabilities) GetIntegerConfig(ctx context.Context, u *url.URL, group, key string) (int, bool, bool) { groupConfig, cached, found := c.getConfigGroup(ctx, u, group) if !found { return 0, cached, false } value, found := groupConfig[key] if !found { return 0, cached, false } switch value := value.(type) { case int: return value, cached, true case float32: return int(value), cached, true case float64: return int(value), cached, true default: log.Printf("Invalid config value for \"%s\" received from %s: %+v", key, u, value) } return 0, cached, false } func (c *Capabilities) GetStringConfig(ctx context.Context, u *url.URL, group, key string) (string, bool, bool) { groupConfig, cached, found := c.getConfigGroup(ctx, u, group) if !found { return "", cached, false } value, found := groupConfig[key] if !found { return "", cached, false } switch value := value.(type) { case string: return value, cached, true default: log.Printf("Invalid config value for \"%s\" received from %s: %+v", key, u, value) } return "", cached, false } func (c *Capabilities) InvalidateCapabilities(u *url.URL) { key := c.getKeyForUrl(u) c.invalidateCapabilities(key) } nextcloud-spreed-signaling-1.2.4/capabilities_test.go000066400000000000000000000173671460321600400230050ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "encoding/json" "net/http" "net/http/httptest" "net/url" "strings" "sync/atomic" "testing" "time" "github.com/gorilla/mux" ) func NewCapabilitiesForTestWithCallback(t *testing.T, callback func(*CapabilitiesResponse)) (*url.URL, *Capabilities) { pool, err := NewHttpClientPool(1, false) if err != nil { t.Fatal(err) } capabilities, err := NewCapabilities("0.0", pool) if err != nil { t.Fatal(err) } r := mux.NewRouter() server := httptest.NewServer(r) t.Cleanup(func() { server.Close() }) u, err := url.Parse(server.URL) if err != nil { t.Fatal(err) } handleCapabilitiesFunc := func(w http.ResponseWriter, r *http.Request) { features := []string{ "foo", "bar", } if strings.Contains(t.Name(), "V3Api") { features = append(features, "signaling-v3") } signaling := map[string]interface{}{ "foo": "bar", "baz": 42, } config := map[string]interface{}{ "signaling": signaling, } spreedCapa, _ := json.Marshal(map[string]interface{}{ "features": features, "config": config, }) emptyArray := []byte("[]") response := &CapabilitiesResponse{ Version: CapabilitiesVersion{ Major: 20, }, Capabilities: map[string]*json.RawMessage{ "anotherApp": (*json.RawMessage)(&emptyArray), "spreed": (*json.RawMessage)(&spreedCapa), }, } if callback != nil { callback(response) } data, err := json.Marshal(response) if err != nil { t.Errorf("Could not marshal %+v: %s", response, err) } var ocs OcsResponse ocs.Ocs = &OcsBody{ Meta: OcsMeta{ Status: "ok", StatusCode: http.StatusOK, Message: http.StatusText(http.StatusOK), }, Data: (*json.RawMessage)(&data), } if data, err = json.Marshal(ocs); err != nil { t.Fatal(err) } w.Header().Add("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(data) // nolint } r.HandleFunc("/ocs/v2.php/cloud/capabilities", handleCapabilitiesFunc) return u, capabilities } func NewCapabilitiesForTest(t *testing.T) (*url.URL, *Capabilities) { return NewCapabilitiesForTestWithCallback(t, nil) } func SetCapabilitiesGetNow(t *testing.T, f func() time.Time) { old := getCapabilitiesNow t.Cleanup(func() { getCapabilitiesNow = old }) getCapabilitiesNow = f } func TestCapabilities(t *testing.T) { url, capabilities := NewCapabilitiesForTest(t) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() if !capabilities.HasCapabilityFeature(ctx, url, "foo") { t.Error("should have capability \"foo\"") } if capabilities.HasCapabilityFeature(ctx, url, "lala") { t.Error("should not have capability \"lala\"") } expectedString := "bar" if value, cached, found := capabilities.GetStringConfig(ctx, url, "signaling", "foo"); !found { t.Error("could not find value for \"foo\"") } else if value != expectedString { t.Errorf("expected value %s, got %s", expectedString, value) } else if !cached { t.Errorf("expected cached response") } if value, cached, found := capabilities.GetStringConfig(ctx, url, "signaling", "baz"); found { t.Errorf("should not have found value for \"baz\", got %s", value) } else if !cached { t.Errorf("expected cached response") } if value, cached, found := capabilities.GetStringConfig(ctx, url, "signaling", "invalid"); found { t.Errorf("should not have found value for \"invalid\", got %s", value) } else if !cached { t.Errorf("expected cached response") } if value, cached, found := capabilities.GetStringConfig(ctx, url, "invalid", "foo"); found { t.Errorf("should not have found value for \"baz\", got %s", value) } else if !cached { t.Errorf("expected cached response") } expectedInt := 42 if value, cached, found := capabilities.GetIntegerConfig(ctx, url, "signaling", "baz"); !found { t.Error("could not find value for \"baz\"") } else if value != expectedInt { t.Errorf("expected value %d, got %d", expectedInt, value) } else if !cached { t.Errorf("expected cached response") } if value, cached, found := capabilities.GetIntegerConfig(ctx, url, "signaling", "foo"); found { t.Errorf("should not have found value for \"foo\", got %d", value) } else if !cached { t.Errorf("expected cached response") } if value, cached, found := capabilities.GetIntegerConfig(ctx, url, "signaling", "invalid"); found { t.Errorf("should not have found value for \"invalid\", got %d", value) } else if !cached { t.Errorf("expected cached response") } if value, cached, found := capabilities.GetIntegerConfig(ctx, url, "invalid", "baz"); found { t.Errorf("should not have found value for \"baz\", got %d", value) } else if !cached { t.Errorf("expected cached response") } } func TestInvalidateCapabilities(t *testing.T) { var called atomic.Uint32 url, capabilities := NewCapabilitiesForTestWithCallback(t, func(cr *CapabilitiesResponse) { called.Add(1) }) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() expectedString := "bar" if value, cached, found := capabilities.GetStringConfig(ctx, url, "signaling", "foo"); !found { t.Error("could not find value for \"foo\"") } else if value != expectedString { t.Errorf("expected value %s, got %s", expectedString, value) } else if cached { t.Errorf("expected direct response") } if value := called.Load(); value != 1 { t.Errorf("expected called %d, got %d", 1, value) } // Invalidating will cause the capabilities to be reloaded. capabilities.InvalidateCapabilities(url) if value, cached, found := capabilities.GetStringConfig(ctx, url, "signaling", "foo"); !found { t.Error("could not find value for \"foo\"") } else if value != expectedString { t.Errorf("expected value %s, got %s", expectedString, value) } else if cached { t.Errorf("expected direct response") } if value := called.Load(); value != 2 { t.Errorf("expected called %d, got %d", 2, value) } // Invalidating is throttled to about once per minute. capabilities.InvalidateCapabilities(url) if value, cached, found := capabilities.GetStringConfig(ctx, url, "signaling", "foo"); !found { t.Error("could not find value for \"foo\"") } else if value != expectedString { t.Errorf("expected value %s, got %s", expectedString, value) } else if !cached { t.Errorf("expected cached response") } if value := called.Load(); value != 2 { t.Errorf("expected called %d, got %d", 2, value) } // At a later time, invalidating can be done again. SetCapabilitiesGetNow(t, func() time.Time { return time.Now().Add(2 * time.Minute) }) capabilities.InvalidateCapabilities(url) if value, cached, found := capabilities.GetStringConfig(ctx, url, "signaling", "foo"); !found { t.Error("could not find value for \"foo\"") } else if value != expectedString { t.Errorf("expected value %s, got %s", expectedString, value) } else if cached { t.Errorf("expected direct response") } if value := called.Load(); value != 3 { t.Errorf("expected called %d, got %d", 3, value) } } nextcloud-spreed-signaling-1.2.4/certificate_reloader.go000066400000000000000000000074441460321600400234470ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "crypto/tls" "crypto/x509" "fmt" "log" "os" "sync/atomic" ) type CertificateReloader struct { certFile string certWatcher *FileWatcher keyFile string keyWatcher *FileWatcher certificate atomic.Pointer[tls.Certificate] reloadCounter atomic.Uint64 } func NewCertificateReloader(certFile string, keyFile string) (*CertificateReloader, error) { pair, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { return nil, fmt.Errorf("could not load certificate / key: %w", err) } reloader := &CertificateReloader{ certFile: certFile, keyFile: keyFile, } reloader.certificate.Store(&pair) reloader.certWatcher, err = NewFileWatcher(certFile, reloader.reload) if err != nil { return nil, err } reloader.keyWatcher, err = NewFileWatcher(keyFile, reloader.reload) if err != nil { reloader.certWatcher.Close() // nolint return nil, err } return reloader, nil } func (r *CertificateReloader) reload(filename string) { log.Printf("reloading certificate from %s with %s", r.certFile, r.keyFile) pair, err := tls.LoadX509KeyPair(r.certFile, r.keyFile) if err != nil { log.Printf("could not load certificate / key: %s", err) return } r.certificate.Store(&pair) r.reloadCounter.Add(1) } func (r *CertificateReloader) getCertificate() (*tls.Certificate, error) { return r.certificate.Load(), nil } func (r *CertificateReloader) GetCertificate(h *tls.ClientHelloInfo) (*tls.Certificate, error) { return r.getCertificate() } func (r *CertificateReloader) GetClientCertificate(i *tls.CertificateRequestInfo) (*tls.Certificate, error) { return r.getCertificate() } func (r *CertificateReloader) GetReloadCounter() uint64 { return r.reloadCounter.Load() } type CertPoolReloader struct { certFile string certWatcher *FileWatcher pool atomic.Pointer[x509.CertPool] reloadCounter atomic.Uint64 } func loadCertPool(filename string) (*x509.CertPool, error) { cert, err := os.ReadFile(filename) if err != nil { return nil, err } pool := x509.NewCertPool() if !pool.AppendCertsFromPEM(cert) { return nil, fmt.Errorf("invalid CA in %s: %w", filename, err) } return pool, nil } func NewCertPoolReloader(certFile string) (*CertPoolReloader, error) { pool, err := loadCertPool(certFile) if err != nil { return nil, err } reloader := &CertPoolReloader{ certFile: certFile, } reloader.pool.Store(pool) reloader.certWatcher, err = NewFileWatcher(certFile, reloader.reload) if err != nil { return nil, err } return reloader, nil } func (r *CertPoolReloader) reload(filename string) { log.Printf("reloading certificate pool from %s", r.certFile) pool, err := loadCertPool(r.certFile) if err != nil { log.Printf("could not load certificate pool: %s", err) return } r.pool.Store(pool) r.reloadCounter.Add(1) } func (r *CertPoolReloader) GetCertPool() *x509.CertPool { return r.pool.Load() } func (r *CertPoolReloader) GetReloadCounter() uint64 { return r.reloadCounter.Load() } nextcloud-spreed-signaling-1.2.4/certificate_reloader_test.go000066400000000000000000000031751460321600400245030ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "testing" "time" ) func UpdateCertificateCheckIntervalForTest(t *testing.T, interval time.Duration) { old := deduplicateWatchEvents.Load() t.Cleanup(func() { deduplicateWatchEvents.Store(old) }) deduplicateWatchEvents.Store(int64(interval)) } func (r *CertificateReloader) WaitForReload(ctx context.Context) error { counter := r.GetReloadCounter() for counter == r.GetReloadCounter() { if err := ctx.Err(); err != nil { return err } time.Sleep(time.Millisecond) } return nil } func (r *CertPoolReloader) WaitForReload(ctx context.Context) error { counter := r.GetReloadCounter() for counter == r.GetReloadCounter() { if err := ctx.Err(); err != nil { return err } time.Sleep(time.Millisecond) } return nil } nextcloud-spreed-signaling-1.2.4/channel_waiter.go000066400000000000000000000030171460321600400222630ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "sync" ) type ChannelWaiters struct { mu sync.RWMutex id uint64 waiters map[uint64]chan struct{} } func (w *ChannelWaiters) Wakeup() { w.mu.RLock() defer w.mu.RUnlock() for _, ch := range w.waiters { select { case ch <- struct{}{}: default: // Receiver is still processing previous wakeup. } } } func (w *ChannelWaiters) Add(ch chan struct{}) uint64 { w.mu.Lock() defer w.mu.Unlock() if w.waiters == nil { w.waiters = make(map[uint64]chan struct{}) } id := w.id w.id++ w.waiters[id] = ch return id } func (w *ChannelWaiters) Remove(id uint64) { w.mu.Lock() defer w.mu.Unlock() delete(w.waiters, id) } nextcloud-spreed-signaling-1.2.4/channel_waiter_test.go000066400000000000000000000031351460321600400233230ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "testing" ) func TestChannelWaiters(t *testing.T) { var waiters ChannelWaiters ch1 := make(chan struct{}, 1) id1 := waiters.Add(ch1) defer waiters.Remove(id1) ch2 := make(chan struct{}, 1) id2 := waiters.Add(ch2) defer waiters.Remove(id2) waiters.Wakeup() <-ch1 <-ch2 select { case <-ch1: t.Error("should have not received another event") case <-ch2: t.Error("should have not received another event") default: } ch3 := make(chan struct{}, 1) id3 := waiters.Add(ch3) waiters.Remove(id3) // Multiple wakeups work even without processing. waiters.Wakeup() waiters.Wakeup() waiters.Wakeup() <-ch1 <-ch2 select { case <-ch3: t.Error("should have not received another event") default: } } nextcloud-spreed-signaling-1.2.4/client.go000066400000000000000000000264371460321600400205710ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "bytes" "encoding/json" "log" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/gorilla/websocket" "github.com/mailru/easyjson" ) const ( // Time allowed to write a message to the peer. writeWait = 10 * time.Second // Time allowed to read the next pong message from the peer. pongWait = 60 * time.Second // Send pings to peer with this period. Must be less than pongWait. pingPeriod = (pongWait * 9) / 10 // Maximum message size allowed from peer. maxMessageSize = 64 * 1024 ) var ( noCountry = "no-country" loopback = "loopback" unknownCountry = "unknown-country" ) func init() { RegisterClientStats() } func IsValidCountry(country string) bool { switch country { case "": fallthrough case noCountry: fallthrough case loopback: fallthrough case unknownCountry: return false default: return true } } var ( InvalidFormat = NewError("invalid_format", "Invalid data format.") bufferPool = sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } ) type WritableClientMessage interface { json.Marshaler CloseAfterSend(session Session) bool } type ClientHandler interface { OnClosed(*Client) OnMessageReceived(*Client, []byte) OnRTTReceived(*Client, time.Duration) } type ClientGeoIpHandler interface { OnLookupCountry(*Client) string } type Client struct { conn *websocket.Conn addr string handler ClientHandler agent string closed atomic.Int32 country *string logRTT bool session atomic.Pointer[ClientSession] mu sync.Mutex closer *Closer closeOnce sync.Once messagesDone chan struct{} messageChan chan *bytes.Buffer } func NewClient(conn *websocket.Conn, remoteAddress string, agent string, handler ClientHandler) (*Client, error) { remoteAddress = strings.TrimSpace(remoteAddress) if remoteAddress == "" { remoteAddress = "unknown remote address" } agent = strings.TrimSpace(agent) if agent == "" { agent = "unknown user agent" } client := &Client{ agent: agent, logRTT: true, } client.SetConn(conn, remoteAddress, handler) return client, nil } func (c *Client) SetConn(conn *websocket.Conn, remoteAddress string, handler ClientHandler) { c.conn = conn c.addr = remoteAddress c.handler = handler c.closer = NewCloser() c.messageChan = make(chan *bytes.Buffer, 16) c.messagesDone = make(chan struct{}) } func (c *Client) IsConnected() bool { return c.closed.Load() == 0 } func (c *Client) IsAuthenticated() bool { return c.GetSession() != nil } func (c *Client) GetSession() *ClientSession { return c.session.Load() } func (c *Client) SetSession(session *ClientSession) { c.session.Store(session) } func (c *Client) RemoteAddr() string { return c.addr } func (c *Client) UserAgent() string { return c.agent } func (c *Client) Country() string { if c.country == nil { var country string if handler, ok := c.handler.(ClientGeoIpHandler); ok { country = handler.OnLookupCountry(c) } else { country = unknownCountry } c.country = &country } return *c.country } func (c *Client) Close() { if c.closed.Load() >= 2 { // Prevent reentrant call in case this was the second closing // step. Would otherwise deadlock in the "Once.Do" call path // through "Hub.processUnregister" (which calls "Close" again). return } c.closeOnce.Do(func() { c.doClose() }) } func (c *Client) doClose() { closed := c.closed.Add(1) if closed == 1 { c.mu.Lock() defer c.mu.Unlock() if c.conn != nil { c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) // nolint c.conn.Close() c.conn = nil } } else if closed == 2 { // Both the read pump and message processing must be finished before closing. c.closer.Close() <-c.messagesDone c.handler.OnClosed(c) c.SetSession(nil) } } func (c *Client) SendError(e *Error) bool { message := &ServerMessage{ Type: "error", Error: e, } return c.SendMessage(message) } func (c *Client) SendByeResponse(message *ClientMessage) bool { return c.SendByeResponseWithReason(message, "") } func (c *Client) SendByeResponseWithReason(message *ClientMessage, reason string) bool { response := &ServerMessage{ Type: "bye", Bye: &ByeServerMessage{}, } if message != nil { response.Id = message.Id } if reason != "" { response.Bye.Reason = reason } return c.SendMessage(response) } func (c *Client) SendMessage(message WritableClientMessage) bool { return c.writeMessage(message) } func (c *Client) ReadPump() { defer func() { close(c.messageChan) c.Close() }() go c.processMessages() addr := c.RemoteAddr() c.mu.Lock() conn := c.conn c.mu.Unlock() if conn == nil { log.Printf("Connection from %s closed while starting readPump", addr) return } conn.SetReadLimit(maxMessageSize) conn.SetPongHandler(func(msg string) error { now := time.Now() conn.SetReadDeadline(now.Add(pongWait)) // nolint if msg == "" { return nil } if ts, err := strconv.ParseInt(msg, 10, 64); err == nil { rtt := now.Sub(time.Unix(0, ts)) if c.logRTT { rtt_ms := rtt.Nanoseconds() / time.Millisecond.Nanoseconds() if session := c.GetSession(); session != nil { log.Printf("Client %s has RTT of %d ms (%s)", session.PublicId(), rtt_ms, rtt) } else { log.Printf("Client from %s has RTT of %d ms (%s)", addr, rtt_ms, rtt) } } c.handler.OnRTTReceived(c, rtt) } return nil }) for { conn.SetReadDeadline(time.Now().Add(pongWait)) // nolint messageType, reader, err := conn.NextReader() if err != nil { if _, ok := err.(*websocket.CloseError); !ok || websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway, websocket.CloseNoStatusReceived) { if session := c.GetSession(); session != nil { log.Printf("Error reading from client %s: %v", session.PublicId(), err) } else { log.Printf("Error reading from %s: %v", addr, err) } } break } if messageType != websocket.TextMessage { if session := c.GetSession(); session != nil { log.Printf("Unsupported message type %v from client %s", messageType, session.PublicId()) } else { log.Printf("Unsupported message type %v from %s", messageType, addr) } c.SendError(InvalidFormat) continue } decodeBuffer := bufferPool.Get().(*bytes.Buffer) decodeBuffer.Reset() if _, err := decodeBuffer.ReadFrom(reader); err != nil { bufferPool.Put(decodeBuffer) if session := c.GetSession(); session != nil { log.Printf("Error reading message from client %s: %v", session.PublicId(), err) } else { log.Printf("Error reading message from %s: %v", addr, err) } break } // Stop processing if the client was closed. if !c.IsConnected() { bufferPool.Put(decodeBuffer) break } c.messageChan <- decodeBuffer } } func (c *Client) processMessages() { for { buffer := <-c.messageChan if buffer == nil { break } c.handler.OnMessageReceived(c, buffer.Bytes()) bufferPool.Put(buffer) } close(c.messagesDone) c.doClose() } func (c *Client) writeInternal(message json.Marshaler) bool { var closeData []byte c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint writer, err := c.conn.NextWriter(websocket.TextMessage) if err == nil { if m, ok := (interface{}(message)).(easyjson.Marshaler); ok { _, err = easyjson.MarshalToWriter(m, writer) } else { err = json.NewEncoder(writer).Encode(message) } } if err == nil { err = writer.Close() } if err != nil { if err == websocket.ErrCloseSent { // Already sent a "close", won't be able to send anything else. return false } if session := c.GetSession(); session != nil { log.Printf("Could not send message %+v to client %s: %v", message, session.PublicId(), err) } else { log.Printf("Could not send message %+v to %s: %v", message, c.RemoteAddr(), err) } closeData = websocket.FormatCloseMessage(websocket.CloseInternalServerErr, "") goto close } return true close: c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint if err := c.conn.WriteMessage(websocket.CloseMessage, closeData); err != nil { if session := c.GetSession(); session != nil { log.Printf("Could not send close message to client %s: %v", session.PublicId(), err) } else { log.Printf("Could not send close message to %s: %v", c.RemoteAddr(), err) } } return false } func (c *Client) writeError(e error) bool { // nolint message := &ServerMessage{ Type: "error", Error: NewError("internal_error", e.Error()), } c.mu.Lock() defer c.mu.Unlock() if c.conn == nil { return false } if !c.writeMessageLocked(message) { return false } closeData := websocket.FormatCloseMessage(websocket.CloseInternalServerErr, e.Error()) c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint if err := c.conn.WriteMessage(websocket.CloseMessage, closeData); err != nil { if session := c.GetSession(); session != nil { log.Printf("Could not send close message to client %s: %v", session.PublicId(), err) } else { log.Printf("Could not send close message to %s: %v", c.RemoteAddr(), err) } } return false } func (c *Client) writeMessage(message WritableClientMessage) bool { c.mu.Lock() defer c.mu.Unlock() if c.conn == nil { return false } return c.writeMessageLocked(message) } func (c *Client) writeMessageLocked(message WritableClientMessage) bool { if !c.writeInternal(message) { return false } session := c.GetSession() if message.CloseAfterSend(session) { c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint c.conn.WriteMessage(websocket.CloseMessage, []byte{}) // nolint if session != nil { go session.Close() } go c.Close() return false } return true } func (c *Client) sendPing() bool { c.mu.Lock() defer c.mu.Unlock() if c.conn == nil { return false } now := time.Now().UnixNano() msg := strconv.FormatInt(now, 10) c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint if err := c.conn.WriteMessage(websocket.PingMessage, []byte(msg)); err != nil { if session := c.GetSession(); session != nil { log.Printf("Could not send ping to client %s: %v", session.PublicId(), err) } else { log.Printf("Could not send ping to %s: %v", c.RemoteAddr(), err) } return false } return true } func (c *Client) WritePump() { ticker := time.NewTicker(pingPeriod) defer func() { ticker.Stop() }() // Fetch initial RTT before any messages have been sent to the client. c.sendPing() for { select { case <-ticker.C: if !c.sendPing() { return } case <-c.closer.C: return } } } nextcloud-spreed-signaling-1.2.4/client/000077500000000000000000000000001460321600400202265ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/client/main.go000066400000000000000000000356031460321600400215100ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package main import ( "bytes" "encoding/base64" "encoding/json" "flag" "fmt" "io" "log" pseudorand "math/rand" "net" "net/http" "net/url" "os" "os/signal" "runtime" "strings" "sync" "sync/atomic" "time" "github.com/dlintw/goconf" "github.com/gorilla/mux" "github.com/gorilla/securecookie" "github.com/gorilla/websocket" "github.com/mailru/easyjson" signaling "github.com/strukturag/nextcloud-spreed-signaling" ) var ( addr = flag.String("addr", "localhost:28080", "http service address") config = flag.String("config", "server.conf", "config file to use") maxClients = flag.Int("maxClients", 100, "number of client connections") backendSecret []byte // Report messages that took more than 1 second. messageReportDuration = 1000 * time.Millisecond ) const ( // Time allowed to write a message to the peer. writeWait = 10 * time.Second // Time allowed to read the next pong message from the peer. pongWait = 60 * time.Second // Send pings to peer with this period. Must be less than pongWait. pingPeriod = (pongWait * 9) / 10 // Maximum message size allowed from peer. maxMessageSize = 64 * 1024 privateSessionName = "private-session" publicSessionName = "public-session" ) type Stats struct { numRecvMessages atomic.Uint64 numSentMessages atomic.Uint64 resetRecvMessages uint64 resetSentMessages uint64 start time.Time } func (s *Stats) reset(start time.Time) { s.resetRecvMessages = s.numRecvMessages.Load() s.resetSentMessages = s.numSentMessages.Load() s.start = start } func (s *Stats) Log() { now := time.Now() duration := now.Sub(s.start) perSec := uint64(duration / time.Second) if perSec == 0 { return } totalSentMessages := s.numSentMessages.Load() sentMessages := totalSentMessages - s.resetSentMessages totalRecvMessages := s.numRecvMessages.Load() recvMessages := totalRecvMessages - s.resetRecvMessages log.Printf("Stats: sent=%d (%d/sec), recv=%d (%d/sec), delta=%d", totalSentMessages, sentMessages/perSec, totalRecvMessages, recvMessages/perSec, totalSentMessages-totalRecvMessages) s.reset(now) } type MessagePayload struct { Now time.Time `json:"now"` } type SignalingClient struct { readyWg *sync.WaitGroup cookie *securecookie.SecureCookie conn *websocket.Conn stats *Stats closed atomic.Bool stopChan chan struct{} lock sync.Mutex privateSessionId string publicSessionId string userId string } func NewSignalingClient(cookie *securecookie.SecureCookie, url string, stats *Stats, readyWg *sync.WaitGroup, doneWg *sync.WaitGroup) (*SignalingClient, error) { conn, _, err := websocket.DefaultDialer.Dial(url, nil) if err != nil { return nil, err } client := &SignalingClient{ readyWg: readyWg, cookie: cookie, conn: conn, stats: stats, stopChan: make(chan struct{}), } doneWg.Add(2) go func() { defer doneWg.Done() client.readPump() }() go func() { defer doneWg.Done() client.writePump() }() return client, nil } func (c *SignalingClient) Close() { if !c.closed.CompareAndSwap(false, true) { return } // Signal writepump to terminate close(c.stopChan) c.lock.Lock() c.publicSessionId = "" c.privateSessionId = "" c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) // nolint c.conn.Close() c.conn = nil c.lock.Unlock() } func (c *SignalingClient) Send(message *signaling.ClientMessage) { c.lock.Lock() if c.conn == nil { c.lock.Unlock() return } if !c.writeInternal(message) { c.lock.Unlock() c.Close() return } c.lock.Unlock() } func (c *SignalingClient) processMessage(message *signaling.ServerMessage) { c.stats.numRecvMessages.Add(1) switch message.Type { case "hello": c.processHelloMessage(message) case "message": c.processMessageMessage(message) case "bye": log.Printf("Received bye: %+v", message.Bye) c.Close() case "error": log.Printf("Received error: %+v", message.Error) c.Close() default: log.Printf("Unsupported message type: %+v", *message) } } func (c *SignalingClient) privateToPublicSessionId(privateId string) string { var data signaling.SessionIdData if err := c.cookie.Decode(privateSessionName, privateId, &data); err != nil { panic(fmt.Sprintf("could not decode private session id: %s", err)) } encoded, err := c.cookie.Encode(publicSessionName, data) if err != nil { panic(fmt.Sprintf("could not encode public id: %s", err)) } reversed, err := reverseSessionId(encoded) if err != nil { panic(fmt.Sprintf("could not reverse session id: %s", err)) } return reversed } func (c *SignalingClient) processHelloMessage(message *signaling.ServerMessage) { c.lock.Lock() defer c.lock.Unlock() c.privateSessionId = message.Hello.ResumeId c.publicSessionId = c.privateToPublicSessionId(c.privateSessionId) c.userId = message.Hello.UserId log.Printf("Registered as %s (userid %s)", c.privateSessionId, c.userId) c.readyWg.Done() } func (c *SignalingClient) PublicSessionId() string { c.lock.Lock() defer c.lock.Unlock() return c.publicSessionId } func (c *SignalingClient) processMessageMessage(message *signaling.ServerMessage) { var msg MessagePayload if err := json.Unmarshal(*message.Message.Data, &msg); err != nil { log.Println("Error in unmarshal", err) return } now := time.Now() duration := now.Sub(msg.Now) if duration > messageReportDuration { log.Printf("Message took %s", duration) } } func (c *SignalingClient) readPump() { conn := c.conn defer func() { conn.Close() }() conn.SetReadLimit(maxMessageSize) conn.SetReadDeadline(time.Now().Add(pongWait)) // nolint conn.SetPongHandler(func(string) error { conn.SetReadDeadline(time.Now().Add(pongWait)) // nolint return nil }) var decodeBuffer bytes.Buffer for { conn.SetReadDeadline(time.Now().Add(pongWait)) // nolint messageType, reader, err := conn.NextReader() if err != nil { if websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway, websocket.CloseNoStatusReceived) { log.Printf("Error: %v", err) } break } if messageType != websocket.TextMessage { log.Println("Unsupported message type", messageType) break } decodeBuffer.Reset() if _, err := decodeBuffer.ReadFrom(reader); err != nil { c.lock.Lock() if c.conn != nil { log.Println("Error reading message", err) } c.lock.Unlock() break } var message signaling.ServerMessage if err := message.UnmarshalJSON(decodeBuffer.Bytes()); err != nil { log.Printf("Error: %v", err) break } c.processMessage(&message) } } func (c *SignalingClient) writeInternal(message *signaling.ClientMessage) bool { var closeData []byte c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint writer, err := c.conn.NextWriter(websocket.TextMessage) if err == nil { _, err = easyjson.MarshalToWriter(message, writer) } if err != nil { if err == websocket.ErrCloseSent { // Already sent a "close", won't be able to send anything else. return false } log.Println("Could not send message", message, err) // TODO(jojo): Differentiate between JSON encode errors and websocket errors. closeData = websocket.FormatCloseMessage(websocket.CloseInternalServerErr, "") goto close } writer.Close() c.stats.numSentMessages.Add(1) return true close: c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint c.conn.WriteMessage(websocket.CloseMessage, closeData) // nolint return false } func (c *SignalingClient) sendPing() bool { c.lock.Lock() defer c.lock.Unlock() if c.conn == nil { return false } c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint if err := c.conn.WriteMessage(websocket.PingMessage, []byte{}); err != nil { return false } return true } func (c *SignalingClient) writePump() { ticker := time.NewTicker(pingPeriod) defer func() { ticker.Stop() c.Close() }() for { select { case <-ticker.C: if !c.sendPing() { return } case <-c.stopChan: return } } } func (c *SignalingClient) SendMessages(clients []*SignalingClient) { sessionIds := make(map[*SignalingClient]string) for _, c := range clients { sessionIds[c] = c.PublicSessionId() } for !c.closed.Load() { now := time.Now() sender := c recipientIdx := pseudorand.Int() % len(clients) // Make sure a client is not sending to himself for clients[recipientIdx] == sender { recipientIdx = pseudorand.Int() % len(clients) } recipient := clients[recipientIdx] msgdata := MessagePayload{ Now: now, } data, _ := json.Marshal(msgdata) msg := &signaling.ClientMessage{ Type: "message", Message: &signaling.MessageClientMessage{ Recipient: signaling.MessageClientMessageRecipient{ Type: "session", SessionId: sessionIds[recipient], }, Data: (*json.RawMessage)(&data), }, } sender.Send(msg) // Give some time to other clients. time.Sleep(1 * time.Millisecond) } } func registerAuthHandler(router *mux.Router) { router.HandleFunc("/auth", func(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { log.Println("Error reading body:", err) return } rnd := r.Header.Get(signaling.HeaderBackendSignalingRandom) checksum := r.Header.Get(signaling.HeaderBackendSignalingChecksum) if rnd == "" || checksum == "" { log.Println("No checksum headers found") return } if verify := signaling.CalculateBackendChecksum(rnd, body, backendSecret); verify != checksum { log.Println("Backend checksum verification failed") return } var request signaling.BackendClientRequest if err := request.UnmarshalJSON(body); err != nil { log.Println(err) return } response := &signaling.BackendClientResponse{ Type: "auth", Auth: &signaling.BackendClientAuthResponse{ Version: signaling.BackendVersion, UserId: "sample-user", }, } data, err := response.MarshalJSON() if err != nil { log.Println(err) return } rawdata := json.RawMessage(data) payload := &signaling.OcsResponse{ Ocs: &signaling.OcsBody{ Meta: signaling.OcsMeta{ Status: "ok", StatusCode: http.StatusOK, Message: http.StatusText(http.StatusOK), }, Data: &rawdata, }, } jsonpayload, err := payload.MarshalJSON() if err != nil { log.Println(err) return } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(jsonpayload) // nolint }) } func getLocalIP() string { interfaces, err := net.InterfaceAddrs() if err != nil { log.Fatal(err) } for _, intf := range interfaces { switch t := intf.(type) { case *net.IPNet: if !t.IP.IsInterfaceLocalMulticast() && !t.IP.IsLoopback() { return t.IP.String() } } } return "" } func reverseSessionId(s string) (string, error) { // Note that we are assuming base64 encoded strings here. decoded, err := base64.URLEncoding.DecodeString(s) if err != nil { return "", err } for i, j := 0, len(decoded)-1; i < j; i, j = i+1, j-1 { decoded[i], decoded[j] = decoded[j], decoded[i] } return base64.URLEncoding.EncodeToString(decoded), nil } func main() { flag.Parse() log.SetFlags(0) config, err := goconf.ReadConfigFile(*config) if err != nil { log.Fatal("Could not read configuration: ", err) } secret, _ := config.GetString("backend", "secret") backendSecret = []byte(secret) hashKey, _ := config.GetString("sessions", "hashkey") switch len(hashKey) { case 32: case 64: default: log.Printf("WARNING: The sessions hash key should be 32 or 64 bytes but is %d bytes", len(hashKey)) } blockKey, _ := config.GetString("sessions", "blockkey") blockBytes := []byte(blockKey) switch len(blockKey) { case 0: blockBytes = nil case 16: case 24: case 32: default: log.Fatalf("The sessions block key must be 16, 24 or 32 bytes but is %d bytes", len(blockKey)) } cookie := securecookie.New([]byte(hashKey), blockBytes).MaxAge(0) cpus := runtime.NumCPU() runtime.GOMAXPROCS(cpus) log.Printf("Using a maximum of %d CPUs", cpus) interrupt := make(chan os.Signal, 1) signal.Notify(interrupt, os.Interrupt) r := mux.NewRouter() registerAuthHandler(r) localIP := getLocalIP() listener, err := net.Listen("tcp", localIP+":0") if err != nil { log.Fatal(err) } server := http.Server{ Handler: r, } go func() { server.Serve(listener) // nolint }() backendUrl := "http://" + listener.Addr().String() log.Println("Backend server running on", backendUrl) urls := make([]url.URL, 0) urlstrings := make([]string, 0) for _, host := range strings.Split(*addr, ",") { u := url.URL{ Scheme: "ws", Host: host, Path: "/spreed", } urls = append(urls, u) urlstrings = append(urlstrings, u.String()) } log.Printf("Connecting to %s", urlstrings) clients := make([]*SignalingClient, 0) stats := &Stats{} if *maxClients < 2 { log.Fatalf("Need at least 2 clients, got %d", *maxClients) } log.Printf("Starting %d clients", *maxClients) var doneWg sync.WaitGroup var readyWg sync.WaitGroup for i := 0; i < *maxClients; i++ { client, err := NewSignalingClient(cookie, urls[i%len(urls)].String(), stats, &readyWg, &doneWg) if err != nil { log.Fatal(err) } defer client.Close() readyWg.Add(1) request := &signaling.ClientMessage{ Type: "hello", Hello: &signaling.HelloClientMessage{ Version: signaling.HelloVersionV1, Auth: signaling.HelloClientMessageAuth{ Url: backendUrl + "/auth", Params: &json.RawMessage{'{', '}'}, }, }, } client.Send(request) clients = append(clients, client) } log.Println("Clients created") readyWg.Wait() log.Println("All connections established") for _, c := range clients { doneWg.Add(1) go func(c *SignalingClient) { defer doneWg.Done() c.SendMessages(clients) }(c) } stats.start = time.Now() reportInterval := 10 * time.Second report := time.NewTicker(reportInterval) loop: for { select { case <-interrupt: log.Println("Interrupted") break loop case <-report.C: stats.Log() } } log.Println("Waiting for clients to terminate ...") for _, c := range clients { c.Close() } doneWg.Wait() } nextcloud-spreed-signaling-1.2.4/client_stats_prometheus.go000066400000000000000000000024631460321600400242530ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "github.com/prometheus/client_golang/prometheus" ) var ( statsClientCountries = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "client", Name: "countries_total", Help: "The total number of connections by country", }, []string{"country"}) clientStats = []prometheus.Collector{ statsClientCountries, } ) func RegisterClientStats() { registerAll(clientStats...) } nextcloud-spreed-signaling-1.2.4/clientsession.go000066400000000000000000001106521460321600400221660ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "encoding/json" "fmt" "log" "net/url" "strings" "sync" "sync/atomic" "time" "github.com/pion/sdp/v3" ) var ( // Sessions expire 30 seconds after the connection closed. sessionExpireDuration = 30 * time.Second // Warn if a session has 32 or more pending messages. warnPendingMessagesCount = 32 PathToOcsSignalingBackend = "ocs/v2.php/apps/spreed/api/v1/signaling/backend" ) // ResponseHandlerFunc will return "true" has been fully processed. type ResponseHandlerFunc func(message *ClientMessage) bool type ClientSession struct { hub *Hub events AsyncEvents privateId string publicId string data *SessionIdData clientType string features []string userId string userData *json.RawMessage inCall Flags supportsPermissions bool permissions map[Permission]bool backend *Backend backendUrl string parsedBackendUrl *url.URL expires time.Time mu sync.Mutex client *Client room atomic.Pointer[Room] roomJoinTime atomic.Int64 roomSessionId string publisherWaiters ChannelWaiters publishers map[StreamType]McuPublisher subscribers map[string]McuSubscriber pendingClientMessages []*ServerMessage hasPendingChat bool hasPendingParticipantsUpdate bool virtualSessions map[*VirtualSession]bool seenJoinedLock sync.Mutex seenJoinedEvents map[string]bool responseHandlersLock sync.Mutex responseHandlers map[string]ResponseHandlerFunc } func NewClientSession(hub *Hub, privateId string, publicId string, data *SessionIdData, backend *Backend, hello *HelloClientMessage, auth *BackendClientAuthResponse) (*ClientSession, error) { s := &ClientSession{ hub: hub, events: hub.events, privateId: privateId, publicId: publicId, data: data, clientType: hello.Auth.Type, features: hello.Features, userId: auth.UserId, userData: auth.User, backend: backend, } if s.clientType == HelloClientTypeInternal { s.backendUrl = hello.Auth.internalParams.Backend s.parsedBackendUrl = hello.Auth.internalParams.parsedBackend if !s.HasFeature(ClientFeatureInternalInCall) { s.SetInCall(FlagInCall | FlagWithAudio) } } else { s.backendUrl = hello.Auth.Url s.parsedBackendUrl = hello.Auth.parsedUrl } if !strings.Contains(s.backendUrl, "/ocs/v2.php/") { backendUrl := s.backendUrl if !strings.HasSuffix(backendUrl, "/") { backendUrl += "/" } backendUrl += PathToOcsSignalingBackend u, err := url.Parse(backendUrl) if err != nil { return nil, err } if strings.Contains(u.Host, ":") && hasStandardPort(u) { u.Host = u.Hostname() } s.backendUrl = backendUrl s.parsedBackendUrl = u } if err := s.SubscribeEvents(); err != nil { return nil, err } return s, nil } func (s *ClientSession) PrivateId() string { return s.privateId } func (s *ClientSession) PublicId() string { return s.publicId } func (s *ClientSession) RoomSessionId() string { s.mu.Lock() defer s.mu.Unlock() return s.roomSessionId } func (s *ClientSession) Data() *SessionIdData { return s.data } func (s *ClientSession) ClientType() string { return s.clientType } // GetInCall is only used for internal clients. func (s *ClientSession) GetInCall() int { return int(s.inCall.Get()) } func (s *ClientSession) SetInCall(inCall int) bool { if inCall < 0 { inCall = 0 } return s.inCall.Set(uint32(inCall)) } func (s *ClientSession) GetFeatures() []string { return s.features } func (s *ClientSession) HasFeature(feature string) bool { for _, f := range s.features { if f == feature { return true } } return false } // HasPermission checks if the session has the passed permissions. func (s *ClientSession) HasPermission(permission Permission) bool { s.mu.Lock() defer s.mu.Unlock() return s.hasPermissionLocked(permission) } // HasAnyPermission checks if the session has one of the passed permissions. func (s *ClientSession) HasAnyPermission(permission ...Permission) bool { if len(permission) == 0 { return false } s.mu.Lock() defer s.mu.Unlock() return s.hasAnyPermissionLocked(permission...) } func (s *ClientSession) hasAnyPermissionLocked(permission ...Permission) bool { if len(permission) == 0 { return false } for _, p := range permission { if s.hasPermissionLocked(p) { return true } } return false } func (s *ClientSession) hasPermissionLocked(permission Permission) bool { if !s.supportsPermissions { // Old-style session that doesn't receive permissions from Nextcloud. if result, found := DefaultPermissionOverrides[permission]; found { return result } return true } if val, found := s.permissions[permission]; found { return val } return false } func permissionsEqual(a, b map[Permission]bool) bool { if a == nil && b == nil { return true } else if a != nil && b == nil { return false } else if a == nil && b != nil { return false } if len(a) != len(b) { return false } for k, v1 := range a { if v2, found := b[k]; !found || v1 != v2 { return false } } return true } func (s *ClientSession) SetPermissions(permissions []Permission) { var p map[Permission]bool for _, permission := range permissions { if p == nil { p = make(map[Permission]bool) } p[permission] = true } s.mu.Lock() defer s.mu.Unlock() if s.supportsPermissions && permissionsEqual(s.permissions, p) { return } s.permissions = p s.supportsPermissions = true log.Printf("Permissions of session %s changed: %s", s.PublicId(), permissions) } func (s *ClientSession) Backend() *Backend { return s.backend } func (s *ClientSession) BackendUrl() string { return s.backendUrl } func (s *ClientSession) ParsedBackendUrl() *url.URL { return s.parsedBackendUrl } func (s *ClientSession) AuthUserId() string { return s.userId } func (s *ClientSession) UserId() string { userId := s.userId if userId == "" { if room := s.GetRoom(); room != nil { if data := room.GetRoomSessionData(s); data != nil { userId = data.UserId } } } return userId } func (s *ClientSession) UserData() *json.RawMessage { return s.userData } func (s *ClientSession) StartExpire() { // The hub mutex must be held when calling this method. s.expires = time.Now().Add(sessionExpireDuration) s.hub.expiredSessions[s] = true } func (s *ClientSession) StopExpire() { // The hub mutex must be held when calling this method. delete(s.hub.expiredSessions, s) } func (s *ClientSession) IsExpired(now time.Time) bool { return now.After(s.expires) } func (s *ClientSession) SetRoom(room *Room) { s.room.Store(room) if room != nil { s.roomJoinTime.Store(time.Now().UnixNano()) } else { s.roomJoinTime.Store(0) } s.seenJoinedLock.Lock() defer s.seenJoinedLock.Unlock() s.seenJoinedEvents = nil } func (s *ClientSession) GetRoom() *Room { return s.room.Load() } func (s *ClientSession) getRoomJoinTime() time.Time { t := s.roomJoinTime.Load() if t == 0 { return time.Time{} } return time.Unix(0, t) } func (s *ClientSession) releaseMcuObjects() { if len(s.publishers) > 0 { go func(publishers map[StreamType]McuPublisher) { ctx := context.TODO() for _, publisher := range publishers { publisher.Close(ctx) } }(s.publishers) s.publishers = nil } if len(s.subscribers) > 0 { go func(subscribers map[string]McuSubscriber) { ctx := context.TODO() for _, subscriber := range subscribers { subscriber.Close(ctx) } }(s.subscribers) s.subscribers = nil } } func (s *ClientSession) Close() { s.closeAndWait(true) } func (s *ClientSession) closeAndWait(wait bool) { s.hub.removeSession(s) s.mu.Lock() defer s.mu.Unlock() if s.userId != "" { s.events.UnregisterUserListener(s.userId, s.backend, s) } s.events.UnregisterSessionListener(s.publicId, s.backend, s) go func(virtualSessions map[*VirtualSession]bool) { for session := range virtualSessions { session.Close() } }(s.virtualSessions) s.virtualSessions = nil s.releaseMcuObjects() s.clearClientLocked(nil) s.backend.RemoveSession(s) } func (s *ClientSession) SubscribeEvents() error { s.mu.Lock() defer s.mu.Unlock() if s.userId != "" { if err := s.events.RegisterUserListener(s.userId, s.backend, s); err != nil { return err } } return s.events.RegisterSessionListener(s.publicId, s.backend, s) } func (s *ClientSession) UpdateRoomSessionId(roomSessionId string) error { s.mu.Lock() defer s.mu.Unlock() if s.roomSessionId == roomSessionId { return nil } if err := s.hub.roomSessions.SetRoomSession(s, roomSessionId); err != nil { return err } if roomSessionId != "" { if room := s.GetRoom(); room != nil { log.Printf("Session %s updated room session id to %s in room %s", s.PublicId(), roomSessionId, room.Id()) } else { log.Printf("Session %s updated room session id to %s in unknown room", s.PublicId(), roomSessionId) } } else { if room := s.GetRoom(); room != nil { log.Printf("Session %s cleared room session id in room %s", s.PublicId(), room.Id()) } else { log.Printf("Session %s cleared room session id in unknown room", s.PublicId()) } } s.roomSessionId = roomSessionId return nil } func (s *ClientSession) SubscribeRoomEvents(roomid string, roomSessionId string) error { s.mu.Lock() defer s.mu.Unlock() if err := s.events.RegisterRoomListener(roomid, s.backend, s); err != nil { return err } if roomSessionId != "" { if err := s.hub.roomSessions.SetRoomSession(s, roomSessionId); err != nil { s.doUnsubscribeRoomEvents(true) return err } } log.Printf("Session %s joined room %s with room session id %s", s.PublicId(), roomid, roomSessionId) s.roomSessionId = roomSessionId return nil } func (s *ClientSession) LeaveCall() { s.mu.Lock() defer s.mu.Unlock() room := s.GetRoom() if room == nil { return } log.Printf("Session %s left call %s", s.PublicId(), room.Id()) s.releaseMcuObjects() } func (s *ClientSession) LeaveRoom(notify bool) *Room { s.mu.Lock() defer s.mu.Unlock() room := s.GetRoom() if room == nil { return nil } s.doUnsubscribeRoomEvents(notify) s.SetRoom(nil) s.releaseMcuObjects() room.RemoveSession(s) return room } func (s *ClientSession) UnsubscribeRoomEvents() { s.mu.Lock() defer s.mu.Unlock() s.doUnsubscribeRoomEvents(true) } func (s *ClientSession) doUnsubscribeRoomEvents(notify bool) { room := s.GetRoom() if room != nil { s.events.UnregisterRoomListener(room.Id(), s.Backend(), s) } s.hub.roomSessions.DeleteRoomSession(s) if notify && room != nil && s.roomSessionId != "" { // Notify go func(sid string) { ctx := context.Background() request := NewBackendClientRoomRequest(room.Id(), s.userId, sid) request.Room.Action = "leave" var response map[string]interface{} if err := s.hub.backend.PerformJSONRequest(ctx, s.ParsedBackendUrl(), request, &response); err != nil { log.Printf("Could not notify about room session %s left room %s: %s", sid, room.Id(), err) } else { log.Printf("Removed room session %s: %+v", sid, response) } }(s.roomSessionId) } s.roomSessionId = "" } func (s *ClientSession) ClearClient(client *Client) { s.mu.Lock() defer s.mu.Unlock() s.clearClientLocked(client) } func (s *ClientSession) clearClientLocked(client *Client) { if s.client == nil { return } else if client != nil && s.client != client { log.Printf("Trying to clear other client in session %s", s.PublicId()) return } prevClient := s.client s.client = nil prevClient.SetSession(nil) } func (s *ClientSession) GetClient() *Client { s.mu.Lock() defer s.mu.Unlock() return s.getClientUnlocked() } func (s *ClientSession) getClientUnlocked() *Client { return s.client } func (s *ClientSession) SetClient(client *Client) *Client { if client == nil { panic("Use ClearClient to set the client to nil") } s.mu.Lock() defer s.mu.Unlock() if client == s.client { // No change return nil } client.SetSession(s) prev := s.client if prev != nil { s.clearClientLocked(prev) } s.client = client return prev } func (s *ClientSession) sendOffer(client McuClient, sender string, streamType StreamType, offer map[string]interface{}) { offer_message := &AnswerOfferMessage{ To: s.PublicId(), From: sender, Type: "offer", RoomType: string(streamType), Payload: offer, Sid: client.Sid(), } offer_data, err := json.Marshal(offer_message) if err != nil { log.Println("Could not serialize offer", offer_message, err) return } response_message := &ServerMessage{ Type: "message", Message: &MessageServerMessage{ Sender: &MessageServerMessageSender{ Type: "session", SessionId: sender, }, Data: (*json.RawMessage)(&offer_data), }, } s.sendMessageUnlocked(response_message) } func (s *ClientSession) sendCandidate(client McuClient, sender string, streamType StreamType, candidate interface{}) { candidate_message := &AnswerOfferMessage{ To: s.PublicId(), From: sender, Type: "candidate", RoomType: string(streamType), Payload: map[string]interface{}{ "candidate": candidate, }, Sid: client.Sid(), } candidate_data, err := json.Marshal(candidate_message) if err != nil { log.Println("Could not serialize candidate", candidate_message, err) return } response_message := &ServerMessage{ Type: "message", Message: &MessageServerMessage{ Sender: &MessageServerMessageSender{ Type: "session", SessionId: sender, }, Data: (*json.RawMessage)(&candidate_data), }, } s.sendMessageUnlocked(response_message) } func (s *ClientSession) sendMessageUnlocked(message *ServerMessage) bool { if c := s.getClientUnlocked(); c != nil { if c.SendMessage(message) { return true } } s.storePendingMessage(message) return true } func (s *ClientSession) SendError(e *Error) bool { message := &ServerMessage{ Type: "error", Error: e, } return s.SendMessage(message) } func (s *ClientSession) SendMessage(message *ServerMessage) bool { message = s.filterMessage(message) if message == nil { return true } s.mu.Lock() defer s.mu.Unlock() return s.sendMessageUnlocked(message) } func (s *ClientSession) SendMessages(messages []*ServerMessage) bool { s.mu.Lock() defer s.mu.Unlock() for _, message := range messages { s.sendMessageUnlocked(message) } return true } func (s *ClientSession) OnUpdateOffer(client McuClient, offer map[string]interface{}) { s.mu.Lock() defer s.mu.Unlock() for _, sub := range s.subscribers { if sub.Id() == client.Id() { s.sendOffer(client, sub.Publisher(), client.StreamType(), offer) return } } } func (s *ClientSession) OnIceCandidate(client McuClient, candidate interface{}) { s.mu.Lock() defer s.mu.Unlock() for _, sub := range s.subscribers { if sub.Id() == client.Id() { s.sendCandidate(client, sub.Publisher(), client.StreamType(), candidate) return } } for _, pub := range s.publishers { if pub.Id() == client.Id() { s.sendCandidate(client, s.PublicId(), client.StreamType(), candidate) return } } log.Printf("Session %s received candidate %+v for unknown client %s", s.PublicId(), candidate, client.Id()) } func (s *ClientSession) OnIceCompleted(client McuClient) { // TODO(jojo): This causes a JavaScript error when creating a candidate from "null". // Figure out a better way to signal this. // An empty candidate signals the end of candidates. // s.OnIceCandidate(client, nil) } func (s *ClientSession) SubscriberSidUpdated(subscriber McuSubscriber) { } func (s *ClientSession) PublisherClosed(publisher McuPublisher) { s.mu.Lock() defer s.mu.Unlock() for id, p := range s.publishers { if p == publisher { delete(s.publishers, id) break } } } func (s *ClientSession) SubscriberClosed(subscriber McuSubscriber) { s.mu.Lock() defer s.mu.Unlock() for id, sub := range s.subscribers { if sub == subscriber { delete(s.subscribers, id) break } } } type SdpError struct { message string } func (e *SdpError) Error() string { return e.message } type WrappedSdpError struct { SdpError err error } func (e *WrappedSdpError) Unwrap() error { return e.err } type PermissionError struct { permission Permission } func (e *PermissionError) Permission() Permission { return e.permission } func (e *PermissionError) Error() string { return fmt.Sprintf("permission \"%s\" not found", e.permission) } func (s *ClientSession) isSdpAllowedToSendLocked(payload map[string]interface{}) (MediaType, error) { sdpValue, found := payload["sdp"] if !found { return 0, &SdpError{"payload does not contain a sdp"} } sdpText, ok := sdpValue.(string) if !ok { return 0, &SdpError{"payload does not contain a valid sdp"} } var sdp sdp.SessionDescription if err := sdp.Unmarshal([]byte(sdpText)); err != nil { return 0, &WrappedSdpError{ SdpError: SdpError{ message: fmt.Sprintf("could not parse sdp: %s", err), }, err: err, } } var mediaTypes MediaType mayPublishMedia := s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_MEDIA) for _, md := range sdp.MediaDescriptions { switch md.MediaName.Media { case "audio": if !mayPublishMedia && !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_AUDIO) { return 0, &PermissionError{PERMISSION_MAY_PUBLISH_AUDIO} } mediaTypes |= MediaTypeAudio case "video": if !mayPublishMedia && !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_VIDEO) { return 0, &PermissionError{PERMISSION_MAY_PUBLISH_VIDEO} } mediaTypes |= MediaTypeVideo } } return mediaTypes, nil } func (s *ClientSession) IsAllowedToSend(data *MessageClientMessageData) error { s.mu.Lock() defer s.mu.Unlock() if data != nil && data.RoomType == "screen" { if s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_SCREEN) { return nil } return &PermissionError{PERMISSION_MAY_PUBLISH_SCREEN} } else if s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_MEDIA) { // Client is allowed to publish any media (audio / video). return nil } else if data != nil && data.Type == "offer" { // Parse SDP to check what user is trying to publish and check permissions accordingly. if _, err := s.isSdpAllowedToSendLocked(data.Payload); err != nil { return err } return nil } else { // Candidate or unknown event, check if client is allowed to publish any media. if s.hasAnyPermissionLocked(PERMISSION_MAY_PUBLISH_AUDIO, PERMISSION_MAY_PUBLISH_VIDEO) { return nil } return fmt.Errorf("permission check failed") } } func (s *ClientSession) CheckOfferType(streamType StreamType, data *MessageClientMessageData) (MediaType, error) { s.mu.Lock() defer s.mu.Unlock() return s.checkOfferTypeLocked(streamType, data) } func (s *ClientSession) checkOfferTypeLocked(streamType StreamType, data *MessageClientMessageData) (MediaType, error) { if streamType == StreamTypeScreen { if !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_SCREEN) { return 0, &PermissionError{PERMISSION_MAY_PUBLISH_SCREEN} } return MediaTypeScreen, nil } else if data != nil && data.Type == "offer" { mediaTypes, err := s.isSdpAllowedToSendLocked(data.Payload) if err != nil { return 0, err } return mediaTypes, nil } return 0, nil } func (s *ClientSession) GetOrCreatePublisher(ctx context.Context, mcu Mcu, streamType StreamType, data *MessageClientMessageData) (McuPublisher, error) { s.mu.Lock() defer s.mu.Unlock() mediaTypes, err := s.checkOfferTypeLocked(streamType, data) if err != nil { return nil, err } publisher, found := s.publishers[streamType] if !found { client := s.getClientUnlocked() s.mu.Unlock() defer s.mu.Lock() bitrate := data.Bitrate if backend := s.Backend(); backend != nil { var maxBitrate int if streamType == StreamTypeScreen { maxBitrate = backend.maxScreenBitrate } else { maxBitrate = backend.maxStreamBitrate } if bitrate <= 0 { bitrate = maxBitrate } else if maxBitrate > 0 && bitrate > maxBitrate { bitrate = maxBitrate } } var err error publisher, err = mcu.NewPublisher(ctx, s, s.PublicId(), data.Sid, streamType, bitrate, mediaTypes, client) if err != nil { return nil, err } if s.publishers == nil { s.publishers = make(map[StreamType]McuPublisher) } if prev, found := s.publishers[streamType]; found { // Another thread created the publisher while we were waiting. go func(pub McuPublisher) { closeCtx := context.TODO() pub.Close(closeCtx) }(publisher) publisher = prev } else { s.publishers[streamType] = publisher } log.Printf("Publishing %s as %s for session %s", streamType, publisher.Id(), s.PublicId()) s.publisherWaiters.Wakeup() } else { publisher.SetMedia(mediaTypes) } return publisher, nil } func (s *ClientSession) getPublisherLocked(streamType StreamType) McuPublisher { return s.publishers[streamType] } func (s *ClientSession) GetPublisher(streamType StreamType) McuPublisher { s.mu.Lock() defer s.mu.Unlock() return s.getPublisherLocked(streamType) } func (s *ClientSession) GetOrWaitForPublisher(ctx context.Context, streamType StreamType) McuPublisher { s.mu.Lock() defer s.mu.Unlock() publisher := s.getPublisherLocked(streamType) if publisher != nil { return publisher } ch := make(chan struct{}, 1) id := s.publisherWaiters.Add(ch) defer s.publisherWaiters.Remove(id) for { s.mu.Unlock() select { case <-ch: s.mu.Lock() publisher := s.getPublisherLocked(streamType) if publisher != nil { return publisher } case <-ctx.Done(): s.mu.Lock() return nil } } } func (s *ClientSession) GetOrCreateSubscriber(ctx context.Context, mcu Mcu, id string, streamType StreamType) (McuSubscriber, error) { s.mu.Lock() defer s.mu.Unlock() // TODO(jojo): Add method to remove subscribers. subscriber, found := s.subscribers[getStreamId(id, streamType)] if !found { s.mu.Unlock() var err error subscriber, err = mcu.NewSubscriber(ctx, s, id, streamType) s.mu.Lock() if err != nil { return nil, err } if s.subscribers == nil { s.subscribers = make(map[string]McuSubscriber) } if prev, found := s.subscribers[getStreamId(id, streamType)]; found { // Another thread created the subscriber while we were waiting. go func(sub McuSubscriber) { closeCtx := context.TODO() sub.Close(closeCtx) }(subscriber) subscriber = prev } else { s.subscribers[getStreamId(id, streamType)] = subscriber } log.Printf("Subscribing %s from %s as %s in session %s", streamType, id, subscriber.Id(), s.PublicId()) } return subscriber, nil } func (s *ClientSession) GetSubscriber(id string, streamType StreamType) McuSubscriber { s.mu.Lock() defer s.mu.Unlock() return s.subscribers[getStreamId(id, streamType)] } func (s *ClientSession) ProcessAsyncRoomMessage(message *AsyncMessage) { s.processAsyncMessage(message) } func (s *ClientSession) ProcessAsyncUserMessage(message *AsyncMessage) { s.processAsyncMessage(message) } func (s *ClientSession) ProcessAsyncSessionMessage(message *AsyncMessage) { s.processAsyncMessage(message) } func (s *ClientSession) processAsyncMessage(message *AsyncMessage) { switch message.Type { case "permissions": s.SetPermissions(message.Permissions) go func() { s.mu.Lock() defer s.mu.Unlock() if !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_MEDIA) { if publisher, found := s.publishers[StreamTypeVideo]; found { if (publisher.HasMedia(MediaTypeAudio) && !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_AUDIO)) || (publisher.HasMedia(MediaTypeVideo) && !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_VIDEO)) { delete(s.publishers, StreamTypeVideo) log.Printf("Session %s is no longer allowed to publish media, closing publisher %s", s.PublicId(), publisher.Id()) go func() { publisher.Close(context.Background()) }() return } } } if !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_SCREEN) { if publisher, found := s.publishers[StreamTypeScreen]; found { delete(s.publishers, StreamTypeScreen) log.Printf("Session %s is no longer allowed to publish screen, closing publisher %s", s.PublicId(), publisher.Id()) go func() { publisher.Close(context.Background()) }() return } } }() return case "message": if message.Message.Type == "bye" && message.Message.Bye.Reason == "room_session_reconnected" { log.Printf("Closing session %s because same room session %s connected", s.PublicId(), s.RoomSessionId()) s.LeaveRoom(false) defer s.closeAndWait(false) } case "sendoffer": // Process asynchronously to not block other messages received. go func() { ctx, cancel := context.WithTimeout(context.Background(), s.hub.mcuTimeout) defer cancel() mc, err := s.GetOrCreateSubscriber(ctx, s.hub.mcu, message.SendOffer.SessionId, StreamType(message.SendOffer.Data.RoomType)) if err != nil { log.Printf("Could not create MCU subscriber for session %s to process sendoffer in %s: %s", message.SendOffer.SessionId, s.PublicId(), err) if err := s.events.PublishSessionMessage(message.SendOffer.SessionId, s.backend, &AsyncMessage{ Type: "message", Message: &ServerMessage{ Id: message.SendOffer.MessageId, Type: "error", Error: NewError("client_not_found", "No MCU client found to send message to."), }, }); err != nil { log.Printf("Error sending sendoffer error response to %s: %s", message.SendOffer.SessionId, err) } return } else if mc == nil { log.Printf("No MCU subscriber found for session %s to process sendoffer in %s", message.SendOffer.SessionId, s.PublicId()) if err := s.events.PublishSessionMessage(message.SendOffer.SessionId, s.backend, &AsyncMessage{ Type: "message", Message: &ServerMessage{ Id: message.SendOffer.MessageId, Type: "error", Error: NewError("client_not_found", "No MCU client found to send message to."), }, }); err != nil { log.Printf("Error sending sendoffer error response to %s: %s", message.SendOffer.SessionId, err) } return } mc.SendMessage(context.TODO(), nil, message.SendOffer.Data, func(err error, response map[string]interface{}) { if err != nil { log.Printf("Could not send MCU message %+v for session %s to %s: %s", message.SendOffer.Data, message.SendOffer.SessionId, s.PublicId(), err) if err := s.events.PublishSessionMessage(message.SendOffer.SessionId, s.backend, &AsyncMessage{ Type: "message", Message: &ServerMessage{ Id: message.SendOffer.MessageId, Type: "error", Error: NewError("processing_failed", "Processing of the message failed, please check server logs."), }, }); err != nil { log.Printf("Error sending sendoffer error response to %s: %s", message.SendOffer.SessionId, err) } return } else if response == nil { // No response received return } s.hub.sendMcuMessageResponse(s, mc, &MessageClientMessage{ Recipient: MessageClientMessageRecipient{ SessionId: message.SendOffer.SessionId, }, }, message.SendOffer.Data, response) }) }() return } serverMessage := s.filterAsyncMessage(message) if serverMessage == nil { return } s.SendMessage(serverMessage) } func (s *ClientSession) storePendingMessage(message *ServerMessage) { if message.IsChatRefresh() { if s.hasPendingChat { // Only send a single "chat-refresh" message on resume. return } s.hasPendingChat = true } if !s.hasPendingParticipantsUpdate && message.IsParticipantsUpdate() { s.hasPendingParticipantsUpdate = true } s.pendingClientMessages = append(s.pendingClientMessages, message) if len(s.pendingClientMessages) >= warnPendingMessagesCount { log.Printf("Session %s has %d pending messages", s.PublicId(), len(s.pendingClientMessages)) } } func filterDisplayNames(events []*EventServerMessageSessionEntry) []*EventServerMessageSessionEntry { result := make([]*EventServerMessageSessionEntry, 0, len(events)) for _, event := range events { if event.User == nil { result = append(result, event) continue } var userdata map[string]interface{} if err := json.Unmarshal(*event.User, &userdata); err != nil { result = append(result, event) continue } if _, found := userdata["displayname"]; !found { result = append(result, event) continue } delete(userdata, "displayname") if len(userdata) == 0 { // No more userdata, no need to serialize empty map. e := event.Clone() e.User = nil result = append(result, e) continue } data, err := json.Marshal(userdata) if err != nil { result = append(result, event) continue } e := event.Clone() e.User = (*json.RawMessage)(&data) result = append(result, e) } return result } func (s *ClientSession) filterDuplicateJoin(entries []*EventServerMessageSessionEntry) []*EventServerMessageSessionEntry { s.seenJoinedLock.Lock() defer s.seenJoinedLock.Unlock() // Due to the asynchronous events, a session might received a "Joined" event // for the same (other) session twice, so filter these out on a per-session // level. result := make([]*EventServerMessageSessionEntry, 0, len(entries)) for _, e := range entries { if s.seenJoinedEvents[e.SessionId] { log.Printf("Session %s got duplicate joined event for %s, ignoring", s.publicId, e.SessionId) continue } if s.seenJoinedEvents == nil { s.seenJoinedEvents = make(map[string]bool) } s.seenJoinedEvents[e.SessionId] = true result = append(result, e) } return result } func (s *ClientSession) filterMessage(message *ServerMessage) *ServerMessage { switch message.Type { case "event": switch message.Event.Target { case "participants": if message.Event.Type == "update" { m := message.Event.Update users := make(map[string]bool) for _, entry := range m.Users { users[entry["sessionId"].(string)] = true } for _, entry := range m.Changed { if users[entry["sessionId"].(string)] { continue } m.Users = append(m.Users, entry) } // TODO(jojo): Only send all users if current session id has // changed its "inCall" flag to true. m.Changed = nil } case "room": switch message.Event.Type { case "join": join := s.filterDuplicateJoin(message.Event.Join) if len(join) == 0 { return nil } copied := false if len(join) != len(message.Event.Join) { // Create unique copy of message for only this client. copied = true message = &ServerMessage{ Id: message.Id, Type: message.Type, Event: &EventServerMessage{ Type: message.Event.Type, Target: message.Event.Target, Join: join, }, } } if s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) { if copied { message.Event.Join = filterDisplayNames(message.Event.Join) } else { message = &ServerMessage{ Id: message.Id, Type: message.Type, Event: &EventServerMessage{ Type: message.Event.Type, Target: message.Event.Target, Join: filterDisplayNames(message.Event.Join), }, } } } case "leave": s.seenJoinedLock.Lock() defer s.seenJoinedLock.Unlock() for _, e := range message.Event.Leave { delete(s.seenJoinedEvents, e) } case "message": if message.Event.Message == nil || message.Event.Message.Data == nil || len(*message.Event.Message.Data) == 0 || !s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) { return message } var data RoomEventMessageData if err := json.Unmarshal(*message.Event.Message.Data, &data); err != nil { return message } if data.Type == "chat" && data.Chat != nil && data.Chat.Comment != nil { if displayName, found := (*data.Chat.Comment)["actorDisplayName"]; found && displayName != "" { (*data.Chat.Comment)["actorDisplayName"] = "" if encoded, err := json.Marshal(data); err == nil { // Create unique copy of message for only this client. message = &ServerMessage{ Id: message.Id, Type: message.Type, Event: &EventServerMessage{ Type: message.Event.Type, Target: message.Event.Target, Message: &RoomEventMessage{ RoomId: message.Event.Message.RoomId, Data: (*json.RawMessage)(&encoded), }, }, } } } } } } case "message": if message.Message != nil && message.Message.Data != nil && len(*message.Message.Data) > 0 && s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) { var data MessageServerMessageData if err := json.Unmarshal(*message.Message.Data, &data); err != nil { return message } if data.Type == "nickChanged" { return nil } } } return message } func (s *ClientSession) filterAsyncMessage(msg *AsyncMessage) *ServerMessage { switch msg.Type { case "message": if msg.Message == nil { log.Printf("Received asynchronous message without payload: %+v", msg) return nil } switch msg.Message.Type { case "message": if msg.Message.Message != nil && msg.Message.Message.Sender != nil && msg.Message.Message.Sender.SessionId == s.PublicId() { // Don't send message back to sender (can happen if sent to user or room) return nil } case "control": if msg.Message.Control != nil && msg.Message.Control.Sender != nil && msg.Message.Control.Sender.SessionId == s.PublicId() { // Don't send message back to sender (can happen if sent to user or room) return nil } case "event": if msg.Message.Event.Target == "room" { // Can happen mostly during tests where an older room async message // could be received by a subscriber that joined after it was sent. if joined := s.getRoomJoinTime(); joined.IsZero() || msg.SendTime.Before(joined) { log.Printf("Message %+v was sent on %s before room was joined on %s, ignoring", msg.Message, msg.SendTime, joined) return nil } } } return msg.Message default: log.Printf("Received async message with unsupported type %s: %+v", msg.Type, msg) return nil } } func (s *ClientSession) NotifySessionResumed(client *Client) { s.mu.Lock() if len(s.pendingClientMessages) == 0 { s.mu.Unlock() if room := s.GetRoom(); room != nil { room.NotifySessionResumed(s) } return } messages := s.pendingClientMessages hasPendingParticipantsUpdate := s.hasPendingParticipantsUpdate s.pendingClientMessages = nil s.hasPendingChat = false s.hasPendingParticipantsUpdate = false s.mu.Unlock() log.Printf("Send %d pending messages to session %s", len(messages), s.PublicId()) // Send through session to handle connection interruptions. s.SendMessages(messages) if !hasPendingParticipantsUpdate { // Only need to send initial participants list update if none was part of the pending messages. if room := s.GetRoom(); room != nil { room.NotifySessionResumed(s) } } } func (s *ClientSession) AddVirtualSession(session *VirtualSession) { s.mu.Lock() if s.virtualSessions == nil { s.virtualSessions = make(map[*VirtualSession]bool) } s.virtualSessions[session] = true s.mu.Unlock() } func (s *ClientSession) RemoveVirtualSession(session *VirtualSession) { s.mu.Lock() delete(s.virtualSessions, session) s.mu.Unlock() } func (s *ClientSession) GetVirtualSessions() []*VirtualSession { s.mu.Lock() defer s.mu.Unlock() result := make([]*VirtualSession, 0, len(s.virtualSessions)) for session := range s.virtualSessions { result = append(result, session) } return result } func (s *ClientSession) HandleResponse(id string, handler ResponseHandlerFunc) { s.responseHandlersLock.Lock() defer s.responseHandlersLock.Unlock() if s.responseHandlers == nil { s.responseHandlers = make(map[string]ResponseHandlerFunc) } s.responseHandlers[id] = handler } func (s *ClientSession) ClearResponseHandler(id string) { s.responseHandlersLock.Lock() defer s.responseHandlersLock.Unlock() delete(s.responseHandlers, id) } func (s *ClientSession) ProcessResponse(message *ClientMessage) bool { id := message.Id if id == "" { return false } s.responseHandlersLock.Lock() cb, found := s.responseHandlers[id] defer s.responseHandlersLock.Unlock() if !found { return false } return cb(message) } nextcloud-spreed-signaling-1.2.4/clientsession_test.go000066400000000000000000000155431460321600400232300ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2019 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "net/url" "strconv" "testing" ) var ( equalStrings = map[bool]string{ true: "equal", false: "not equal", } ) type EqualTestData struct { a map[Permission]bool b map[Permission]bool equal bool } func Test_permissionsEqual(t *testing.T) { tests := []EqualTestData{ { a: nil, b: nil, equal: true, }, { a: map[Permission]bool{ PERMISSION_MAY_PUBLISH_MEDIA: true, }, b: nil, equal: false, }, { a: nil, b: map[Permission]bool{ PERMISSION_MAY_PUBLISH_MEDIA: true, }, equal: false, }, { a: map[Permission]bool{ PERMISSION_MAY_PUBLISH_MEDIA: true, }, b: map[Permission]bool{ PERMISSION_MAY_PUBLISH_MEDIA: true, }, equal: true, }, { a: map[Permission]bool{ PERMISSION_MAY_PUBLISH_MEDIA: true, PERMISSION_MAY_PUBLISH_SCREEN: true, }, b: map[Permission]bool{ PERMISSION_MAY_PUBLISH_MEDIA: true, }, equal: false, }, { a: map[Permission]bool{ PERMISSION_MAY_PUBLISH_MEDIA: true, }, b: map[Permission]bool{ PERMISSION_MAY_PUBLISH_MEDIA: true, PERMISSION_MAY_PUBLISH_SCREEN: true, }, equal: false, }, { a: map[Permission]bool{ PERMISSION_MAY_PUBLISH_MEDIA: true, PERMISSION_MAY_PUBLISH_SCREEN: true, }, b: map[Permission]bool{ PERMISSION_MAY_PUBLISH_MEDIA: true, PERMISSION_MAY_PUBLISH_SCREEN: true, }, equal: true, }, { a: map[Permission]bool{ PERMISSION_MAY_PUBLISH_MEDIA: true, PERMISSION_MAY_PUBLISH_SCREEN: true, }, b: map[Permission]bool{ PERMISSION_MAY_PUBLISH_MEDIA: true, PERMISSION_MAY_PUBLISH_SCREEN: false, }, equal: false, }, } for idx, test := range tests { test := test t.Run(strconv.Itoa(idx), func(t *testing.T) { equal := permissionsEqual(test.a, test.b) if equal != test.equal { t.Errorf("Expected %+v to be %s to %+v but was %s", test.a, equalStrings[test.equal], test.b, equalStrings[equal]) } }) } } func TestBandwidth_Client(t *testing.T) { hub, _, _, server := CreateHubForTest(t) mcu, err := NewTestMCU() if err != nil { t.Fatal(err) } else if err := mcu.Start(); err != nil { t.Fatal(err) } defer mcu.Stop() hub.SetMcu(mcu) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event. if err := client.RunUntilJoined(ctx, hello.Hello); err != nil { t.Error(err) } // Client may not send an offer with audio and video. bitrate := 10000 if err := client.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello.Hello.SessionId, }, MessageClientMessageData{ Type: "offer", Sid: "54321", RoomType: "video", Bitrate: bitrate, Payload: map[string]interface{}{ "sdp": MockSdpOfferAudioAndVideo, }, }); err != nil { t.Fatal(err) } if err := client.RunUntilAnswer(ctx, MockSdpAnswerAudioAndVideo); err != nil { t.Fatal(err) } pub := mcu.GetPublisher(hello.Hello.SessionId) if pub == nil { t.Fatal("Could not find publisher") } if pub.bitrate != bitrate { t.Errorf("Expected bitrate %d, got %d", bitrate, pub.bitrate) } } func TestBandwidth_Backend(t *testing.T) { hub, _, _, server := CreateHubWithMultipleBackendsForTest(t) u, err := url.Parse(server.URL + "/one") if err != nil { t.Fatal(err) } backend := hub.backend.GetBackend(u) if backend == nil { t.Fatal("Could not get backend") } backend.maxScreenBitrate = 1000 backend.maxStreamBitrate = 2000 mcu, err := NewTestMCU() if err != nil { t.Fatal(err) } else if err := mcu.Start(); err != nil { t.Fatal(err) } defer mcu.Stop() hub.SetMcu(mcu) streamTypes := []StreamType{ StreamTypeVideo, StreamTypeScreen, } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() for _, streamType := range streamTypes { t.Run(string(streamType), func(t *testing.T) { client := NewTestClient(t, server, hub) defer client.CloseWithBye() params := TestBackendClientAuthParams{ UserId: testDefaultUserId, } if err := client.SendHelloParams(server.URL+"/one", HelloVersionV1, "client", nil, params); err != nil { t.Fatal(err) } hello, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event. if err := client.RunUntilJoined(ctx, hello.Hello); err != nil { t.Error(err) } // Client may not send an offer with audio and video. bitrate := 10000 if err := client.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello.Hello.SessionId, }, MessageClientMessageData{ Type: "offer", Sid: "54321", RoomType: string(streamType), Bitrate: bitrate, Payload: map[string]interface{}{ "sdp": MockSdpOfferAudioAndVideo, }, }); err != nil { t.Fatal(err) } if err := client.RunUntilAnswer(ctx, MockSdpAnswerAudioAndVideo); err != nil { t.Fatal(err) } pub := mcu.GetPublisher(hello.Hello.SessionId) if pub == nil { t.Fatal("Could not find publisher") } var expectBitrate int if streamType == StreamTypeVideo { expectBitrate = backend.maxStreamBitrate } else { expectBitrate = backend.maxScreenBitrate } if pub.bitrate != expectBitrate { t.Errorf("Expected bitrate %d, got %d", expectBitrate, pub.bitrate) } }) } } nextcloud-spreed-signaling-1.2.4/closer.go000066400000000000000000000022761460321600400205750ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "sync/atomic" ) type Closer struct { closed atomic.Bool C chan struct{} } func NewCloser() *Closer { return &Closer{ C: make(chan struct{}), } } func (c *Closer) IsClosed() bool { return c.closed.Load() } func (c *Closer) Close() { if c.closed.CompareAndSwap(false, true) { close(c.C) } } nextcloud-spreed-signaling-1.2.4/closer_test.go000066400000000000000000000027171460321600400216340ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "sync" "testing" ) func TestCloserMulti(t *testing.T) { closer := NewCloser() var wg sync.WaitGroup count := 10 for i := 0; i < count; i++ { wg.Add(1) go func() { defer wg.Done() <-closer.C }() } if closer.IsClosed() { t.Error("should not be closed") } closer.Close() if !closer.IsClosed() { t.Error("should be closed") } wg.Wait() } func TestCloserCloseBeforeWait(t *testing.T) { closer := NewCloser() closer.Close() if !closer.IsClosed() { t.Error("should be closed") } <-closer.C if !closer.IsClosed() { t.Error("should be closed") } } nextcloud-spreed-signaling-1.2.4/concurrentmap.go000066400000000000000000000030521460321600400221570ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "sync" ) type ConcurrentStringStringMap struct { sync.Mutex d map[string]string } func (m *ConcurrentStringStringMap) Set(key, value string) { m.Lock() defer m.Unlock() if m.d == nil { m.d = make(map[string]string) } m.d[key] = value } func (m *ConcurrentStringStringMap) Get(key string) (string, bool) { m.Lock() defer m.Unlock() s, found := m.d[key] return s, found } func (m *ConcurrentStringStringMap) Del(key string) { m.Lock() defer m.Unlock() delete(m.d, key) } func (m *ConcurrentStringStringMap) Len() int { m.Lock() defer m.Unlock() return len(m.d) } func (m *ConcurrentStringStringMap) Clear() { m.Lock() defer m.Unlock() m.d = nil } nextcloud-spreed-signaling-1.2.4/concurrentmap_test.go000066400000000000000000000062401460321600400232200ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "strconv" "sync" "testing" ) func TestConcurrentStringStringMap(t *testing.T) { var m ConcurrentStringStringMap if m.Len() != 0 { t.Errorf("Expected %d entries, got %d", 0, m.Len()) } if v, found := m.Get("foo"); found { t.Errorf("Expected missing entry, got %s", v) } m.Set("foo", "bar") if m.Len() != 1 { t.Errorf("Expected %d entries, got %d", 1, m.Len()) } if v, found := m.Get("foo"); !found { t.Errorf("Expected entry") } else if v != "bar" { t.Errorf("Expected bar, got %s", v) } m.Set("foo", "baz") if m.Len() != 1 { t.Errorf("Expected %d entries, got %d", 1, m.Len()) } if v, found := m.Get("foo"); !found { t.Errorf("Expected entry") } else if v != "baz" { t.Errorf("Expected baz, got %s", v) } m.Set("lala", "lolo") if m.Len() != 2 { t.Errorf("Expected %d entries, got %d", 2, m.Len()) } if v, found := m.Get("lala"); !found { t.Errorf("Expected entry") } else if v != "lolo" { t.Errorf("Expected lolo, got %s", v) } // Deleting missing entries doesn't do anything. m.Del("xyz") if m.Len() != 2 { t.Errorf("Expected %d entries, got %d", 2, m.Len()) } if v, found := m.Get("foo"); !found { t.Errorf("Expected entry") } else if v != "baz" { t.Errorf("Expected baz, got %s", v) } if v, found := m.Get("lala"); !found { t.Errorf("Expected entry") } else if v != "lolo" { t.Errorf("Expected lolo, got %s", v) } m.Del("lala") if m.Len() != 1 { t.Errorf("Expected %d entries, got %d", 2, m.Len()) } if v, found := m.Get("foo"); !found { t.Errorf("Expected entry") } else if v != "baz" { t.Errorf("Expected baz, got %s", v) } if v, found := m.Get("lala"); found { t.Errorf("Expected missing entry, got %s", v) } m.Clear() var wg sync.WaitGroup concurrency := 100 count := 1000 for x := 0; x < concurrency; x++ { wg.Add(1) go func(x int) { defer wg.Done() key := "key-" + strconv.Itoa(x) for y := 0; y < count; y = y + 1 { value := newRandomString(32) m.Set(key, value) if v, found := m.Get(key); !found { t.Errorf("Expected entry for key %s", key) return } else if v != value { t.Errorf("Expected value %s for key %s, got %s", value, key, v) return } } }(x) } wg.Wait() if m.Len() != concurrency { t.Errorf("Expected %d entries, got %d", concurrency, m.Len()) } } nextcloud-spreed-signaling-1.2.4/config.go000066400000000000000000000030221460321600400205410ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "errors" "github.com/dlintw/goconf" ) func GetStringOptions(config *goconf.ConfigFile, section string, ignoreErrors bool) (map[string]string, error) { options, _ := config.GetOptions(section) if len(options) == 0 { return nil, nil } result := make(map[string]string) for _, option := range options { value, err := config.GetString(section, option) if err != nil { if ignoreErrors { continue } var ge goconf.GetError if errors.As(err, &ge) && ge.Reason == goconf.OptionNotFound { // Skip options from "default" section. continue } return nil, err } result[option] = value } return result, nil } nextcloud-spreed-signaling-1.2.4/config_test.go000066400000000000000000000025671460321600400216150ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "reflect" "testing" "github.com/dlintw/goconf" ) func TestStringOptions(t *testing.T) { expected := map[string]string{ "one": "1", "two": "2", } config := goconf.NewConfigFile() for k, v := range expected { config.AddOption("foo", k, v) } config.AddOption("default", "three", "3") options, err := GetStringOptions(config, "foo", false) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(expected, options) { t.Errorf("expected %+v, got %+v", expected, options) } } nextcloud-spreed-signaling-1.2.4/continentmap.go000066400000000000000000000101511460321600400217740ustar00rootroot00000000000000package signaling // This file has been automatically generated, do not modify. // Source: https://github.com/datasets/country-codes/raw/master/data/country-codes.csv var ( ContinentMap = map[string][]string{ "AD": {"EU"}, "AE": {"AS"}, "AF": {"AS"}, "AG": {"NA"}, "AI": {"NA"}, "AL": {"EU"}, "AM": {"AS"}, "AO": {"AF"}, "AQ": {"AN"}, "AR": {"SA"}, "AS": {"OC"}, "AT": {"EU"}, "AU": {"OC"}, "AW": {"NA"}, "AX": {"EU"}, "AZ": {"AS"}, "BA": {"EU"}, "BB": {"NA"}, "BD": {"AS"}, "BE": {"EU"}, "BF": {"AF"}, "BG": {"EU"}, "BH": {"AS"}, "BI": {"AF"}, "BJ": {"AF"}, "BL": {"NA"}, "BM": {"NA"}, "BN": {"AS"}, "BO": {"SA"}, "BQ": {"NA"}, "BR": {"SA"}, "BS": {"NA"}, "BT": {"AS"}, "BV": {"AN"}, "BW": {"AF"}, "BY": {"EU"}, "BZ": {"NA"}, "CA": {"NA"}, "CC": {"AS"}, "CD": {"AF"}, "CF": {"AF"}, "CG": {"AF"}, "CH": {"EU"}, "CI": {"AF"}, "CK": {"OC"}, "CL": {"SA"}, "CM": {"AF"}, "CN": {"AS"}, "CO": {"SA"}, "CR": {"NA"}, "CU": {"NA"}, "CV": {"AF"}, "CW": {"NA"}, "CX": {"OC"}, "CY": {"EU"}, "CZ": {"EU"}, "DE": {"EU"}, "DJ": {"AF"}, "DK": {"EU"}, "DM": {"NA"}, "DO": {"NA"}, "DZ": {"AF"}, "EC": {"SA"}, "EE": {"EU"}, "EG": {"AF"}, "EH": {"AF"}, "ER": {"AF"}, "ES": {"EU"}, "ET": {"AF"}, "FI": {"EU"}, "FJ": {"OC"}, "FK": {"SA"}, "FM": {"OC"}, "FO": {"EU"}, "FR": {"EU"}, "GA": {"AF"}, "GB": {"EU"}, "GD": {"NA"}, "GE": {"AS"}, "GF": {"SA"}, "GG": {"EU"}, "GH": {"AF"}, "GI": {"EU"}, "GL": {"NA"}, "GM": {"AF"}, "GN": {"AF"}, "GP": {"NA"}, "GQ": {"AF"}, "GR": {"EU"}, "GS": {"AN"}, "GT": {"NA"}, "GU": {"OC"}, "GW": {"AF"}, "GY": {"SA"}, "HK": {"AS"}, "HM": {"AN"}, "HN": {"NA"}, "HR": {"EU"}, "HT": {"NA"}, "HU": {"EU"}, "ID": {"AS"}, "IE": {"EU"}, "IL": {"AS"}, "IM": {"EU"}, "IN": {"AS"}, "IO": {"AS"}, "IQ": {"AS"}, "IR": {"AS"}, "IS": {"EU"}, "IT": {"EU"}, "JE": {"EU"}, "JM": {"NA"}, "JO": {"AS"}, "JP": {"AS"}, "KE": {"AF"}, "KG": {"AS"}, "KH": {"AS"}, "KI": {"OC"}, "KM": {"AF"}, "KN": {"NA"}, "KP": {"AS"}, "KR": {"AS"}, "KW": {"AS"}, "KY": {"NA"}, "KZ": {"AS"}, "LA": {"AS"}, "LB": {"AS"}, "LC": {"NA"}, "LI": {"EU"}, "LK": {"AS"}, "LR": {"AF"}, "LS": {"AF"}, "LT": {"EU"}, "LU": {"EU"}, "LV": {"EU"}, "LY": {"AF"}, "MA": {"AF"}, "MC": {"EU"}, "MD": {"EU"}, "ME": {"EU"}, "MF": {"NA"}, "MG": {"AF"}, "MH": {"OC"}, "MK": {"EU"}, "ML": {"AF"}, "MM": {"AS"}, "MN": {"AS"}, "MO": {"AS"}, "MP": {"OC"}, "MQ": {"NA"}, "MR": {"AF"}, "MS": {"NA"}, "MT": {"EU"}, "MU": {"AF"}, "MV": {"AS"}, "MW": {"AF"}, "MX": {"NA"}, "MY": {"AS"}, "MZ": {"AF"}, "NA": {"AF"}, "NC": {"OC"}, "NE": {"AF"}, "NF": {"OC"}, "NG": {"AF"}, "NI": {"NA"}, "NL": {"EU"}, "NO": {"EU"}, "NP": {"AS"}, "NR": {"OC"}, "NU": {"OC"}, "NZ": {"OC"}, "OM": {"AS"}, "PA": {"NA"}, "PE": {"SA"}, "PF": {"OC"}, "PG": {"OC"}, "PH": {"AS"}, "PK": {"AS"}, "PL": {"EU"}, "PM": {"NA"}, "PN": {"OC"}, "PR": {"NA"}, "PS": {"AS"}, "PT": {"EU"}, "PW": {"OC"}, "PY": {"SA"}, "QA": {"AS"}, "RE": {"AF"}, "RO": {"EU"}, "RS": {"EU"}, "RU": {"EU"}, "RW": {"AF"}, "SA": {"AS"}, "SB": {"OC"}, "SC": {"AF"}, "SD": {"AF"}, "SE": {"EU"}, "SG": {"AS"}, "SH": {"AF"}, "SI": {"EU"}, "SJ": {"EU"}, "SK": {"EU"}, "SL": {"AF"}, "SM": {"EU"}, "SN": {"AF"}, "SO": {"AF"}, "SR": {"SA"}, "SS": {"AF"}, "ST": {"AF"}, "SV": {"NA"}, "SX": {"NA"}, "SY": {"AS"}, "SZ": {"AF"}, "TC": {"NA"}, "TD": {"AF"}, "TF": {"AN"}, "TG": {"AF"}, "TH": {"AS"}, "TJ": {"AS"}, "TK": {"OC"}, "TL": {"OC"}, "TM": {"AS"}, "TN": {"AF"}, "TO": {"OC"}, "TR": {"AS"}, "TT": {"NA"}, "TV": {"OC"}, "TW": {"AS"}, "TZ": {"AF"}, "UA": {"EU"}, "UG": {"AF"}, "UM": {"OC"}, "US": {"NA"}, "UY": {"SA"}, "UZ": {"AS"}, "VA": {"EU"}, "VC": {"NA"}, "VE": {"SA"}, "VG": {"NA"}, "VI": {"NA"}, "VN": {"AS"}, "VU": {"OC"}, "WF": {"OC"}, "WS": {"OC"}, "YE": {"AS"}, "YT": {"AF"}, "ZA": {"AF"}, "ZM": {"AF"}, "ZW": {"AF"}, } ) nextcloud-spreed-signaling-1.2.4/deferred_executor.go000066400000000000000000000037471460321600400230100ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2020 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "log" "reflect" "runtime" "runtime/debug" "sync" ) // DeferredExecutor will asynchronously execute functions while maintaining // their order. type DeferredExecutor struct { queue chan func() closed chan struct{} closeOnce sync.Once } func NewDeferredExecutor(queueSize int) *DeferredExecutor { if queueSize < 0 { queueSize = 0 } result := &DeferredExecutor{ queue: make(chan func(), queueSize), closed: make(chan struct{}), } go result.run() return result } func (e *DeferredExecutor) run() { defer close(e.closed) for { f := <-e.queue if f == nil { break } f() } } func getFunctionName(i interface{}) string { return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() } func (e *DeferredExecutor) Execute(f func()) { defer func() { if e := recover(); e != nil { log.Printf("Could not defer function %v: %+v", getFunctionName(f), e) log.Printf("Called from %s", string(debug.Stack())) } }() e.queue <- f } func (e *DeferredExecutor) Close() { e.closeOnce.Do(func() { close(e.queue) }) } func (e *DeferredExecutor) waitForStop() { <-e.closed } nextcloud-spreed-signaling-1.2.4/deferred_executor_test.go000066400000000000000000000051251460321600400240370ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2020 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "testing" "time" ) func TestDeferredExecutor_MultiClose(t *testing.T) { e := NewDeferredExecutor(0) defer e.waitForStop() e.Close() e.Close() } func TestDeferredExecutor_QueueSize(t *testing.T) { e := NewDeferredExecutor(0) defer e.waitForStop() defer e.Close() delay := 100 * time.Millisecond e.Execute(func() { time.Sleep(delay) }) // The queue will block until the first command finishes. a := time.Now() e.Execute(func() { time.Sleep(time.Millisecond) }) b := time.Now() delta := b.Sub(a) // Allow one millisecond less delay to account for time variance on CI runners. if delta+time.Millisecond < delay { t.Errorf("Expected a delay of %s, got %s", delay, delta) } } func TestDeferredExecutor_Order(t *testing.T) { e := NewDeferredExecutor(64) defer e.waitForStop() defer e.Close() var entries []int getFunc := func(x int) func() { return func() { entries = append(entries, x) } } done := make(chan struct{}) for x := 0; x < 10; x++ { e.Execute(getFunc(x)) } e.Execute(func() { close(done) }) <-done for x := 0; x < 10; x++ { if entries[x] != x { t.Errorf("Expected %d at position %d, got %d", x, x, entries[x]) } } } func TestDeferredExecutor_CloseFromFunc(t *testing.T) { e := NewDeferredExecutor(64) defer e.waitForStop() done := make(chan struct{}) e.Execute(func() { defer close(done) e.Close() }) <-done } func TestDeferredExecutor_DeferAfterClose(t *testing.T) { e := NewDeferredExecutor(64) defer e.waitForStop() e.Close() e.Execute(func() { t.Error("method should not have been called") }) } func TestDeferredExecutor_WaitForStopTwice(t *testing.T) { e := NewDeferredExecutor(64) defer e.waitForStop() e.Close() e.waitForStop() } nextcloud-spreed-signaling-1.2.4/dist/000077500000000000000000000000001460321600400177135ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/dist/init/000077500000000000000000000000001460321600400206565ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/dist/init/systemd/000077500000000000000000000000001460321600400223465ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/dist/init/systemd/signaling.service000066400000000000000000000017071460321600400257100ustar00rootroot00000000000000[Unit] Description=Nextcloud Talk signaling server [Service] ExecStart=/usr/bin/signaling --config /etc/signaling/server.conf User=signaling Group=signaling Restart=on-failure # Makes sure that /etc/signaling is owned by this service ConfigurationDirectory=signaling # Hardening - see systemd.exec(5) CapabilityBoundingSet= ExecPaths=/usr/bin/signaling /usr/lib LockPersonality=yes MemoryDenyWriteExecute=yes NoExecPaths=/ NoNewPrivileges=yes PrivateDevices=yes PrivateTmp=yes PrivateUsers=yes ProcSubset=pid ProtectClock=yes ProtectControlGroups=yes ProtectHome=yes ProtectHostname=yes ProtectKernelLogs=yes ProtectKernelModules=yes ProtectKernelTunables=yes ProtectProc=invisible ProtectSystem=strict RemoveIPC=yes RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX RestrictNamespaces=yes RestrictRealtime=yes RestrictSUIDSGID=yes SystemCallArchitectures=native SystemCallFilter=@system-service SystemCallFilter=~ @privileged [Install] WantedBy=multi-user.target nextcloud-spreed-signaling-1.2.4/dist/init/systemd/sysusers.d/000077500000000000000000000000001460321600400244705ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/dist/init/systemd/sysusers.d/signaling.conf000066400000000000000000000002461460321600400273140ustar00rootroot00000000000000# SPDX-FileCopyrightText: 2022 Andrea Pappacoda # # SPDX-License-Identifier: AGPL-3.0-or-later u signaling - "nextcloud-spreed-signaling user" nextcloud-spreed-signaling-1.2.4/dns_monitor.go000066400000000000000000000156041460321600400216400ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "log" "net" "net/url" "strings" "sync" "sync/atomic" "time" ) var ( lookupDnsMonitorIP = net.LookupIP ) const ( defaultDnsMonitorInterval = time.Second ) type DnsMonitorCallback = func(entry *DnsMonitorEntry, all []net.IP, add []net.IP, keep []net.IP, remove []net.IP) type DnsMonitorEntry struct { entry atomic.Pointer[dnsMonitorEntry] url string callback DnsMonitorCallback } func (e *DnsMonitorEntry) URL() string { return e.url } type dnsMonitorEntry struct { hostname string hostIP net.IP mu sync.Mutex ips []net.IP entries map[*DnsMonitorEntry]bool } func (e *dnsMonitorEntry) setIPs(ips []net.IP, fromIP bool) { e.mu.Lock() defer e.mu.Unlock() empty := len(e.ips) == 0 if empty { // Simple case: initial lookup. if len(ips) > 0 { e.ips = ips e.runCallbacks(ips, ips, nil, nil) } return } else if fromIP { // No more updates possible for IP addresses. return } else if len(ips) == 0 { // Simple case: no records received from lookup. if !empty { removed := e.ips e.ips = nil e.runCallbacks(nil, nil, nil, removed) } return } var newIPs []net.IP var addedIPs []net.IP var removedIPs []net.IP var keepIPs []net.IP for _, oldIP := range e.ips { found := false for idx, newIP := range ips { if oldIP.Equal(newIP) { ips = append(ips[:idx], ips[idx+1:]...) found = true keepIPs = append(keepIPs, oldIP) newIPs = append(newIPs, oldIP) break } } if !found { removedIPs = append(removedIPs, oldIP) } } if len(ips) > 0 { addedIPs = append(addedIPs, ips...) newIPs = append(newIPs, ips...) } e.ips = newIPs if len(addedIPs) > 0 || len(removedIPs) > 0 { e.runCallbacks(newIPs, addedIPs, keepIPs, removedIPs) } } func (e *dnsMonitorEntry) addEntry(entry *DnsMonitorEntry) { e.mu.Lock() defer e.mu.Unlock() e.entries[entry] = true } func (e *dnsMonitorEntry) removeEntry(entry *DnsMonitorEntry) bool { e.mu.Lock() defer e.mu.Unlock() delete(e.entries, entry) return len(e.entries) == 0 } func (e *dnsMonitorEntry) runCallbacks(all []net.IP, add []net.IP, keep []net.IP, remove []net.IP) { for entry := range e.entries { entry.callback(entry, all, add, keep, remove) } } type DnsMonitor struct { interval time.Duration stopCtx context.Context stopFunc func() stopped chan struct{} mu sync.RWMutex cond *sync.Cond hostnames map[string]*dnsMonitorEntry hasRemoved atomic.Bool // Can be overwritten from tests. checkHostnames func() } func NewDnsMonitor(interval time.Duration) (*DnsMonitor, error) { if interval < 0 { interval = defaultDnsMonitorInterval } stopCtx, stopFunc := context.WithCancel(context.Background()) monitor := &DnsMonitor{ interval: interval, stopCtx: stopCtx, stopFunc: stopFunc, stopped: make(chan struct{}), hostnames: make(map[string]*dnsMonitorEntry), } monitor.cond = sync.NewCond(&monitor.mu) monitor.checkHostnames = monitor.doCheckHostnames return monitor, nil } func (m *DnsMonitor) Start() error { go m.run() return nil } func (m *DnsMonitor) Stop() { m.stopFunc() m.cond.Signal() <-m.stopped } func (m *DnsMonitor) Add(target string, callback DnsMonitorCallback) (*DnsMonitorEntry, error) { var hostname string if strings.Contains(target, "://") { // Full URL passed. parsed, err := url.Parse(target) if err != nil { return nil, err } hostname = parsed.Host } else { // Hostname only passed. hostname = target } if h, _, err := net.SplitHostPort(hostname); err == nil { hostname = h } m.mu.Lock() defer m.mu.Unlock() e := &DnsMonitorEntry{ url: target, callback: callback, } entry, found := m.hostnames[hostname] if !found { entry = &dnsMonitorEntry{ hostname: hostname, hostIP: net.ParseIP(hostname), entries: make(map[*DnsMonitorEntry]bool), } m.hostnames[hostname] = entry } e.entry.Store(entry) entry.addEntry(e) m.cond.Signal() return e, nil } func (m *DnsMonitor) Remove(entry *DnsMonitorEntry) { oldEntry := entry.entry.Swap(nil) if oldEntry == nil { // Already removed. return } locked := m.mu.TryLock() // Spin-lock for simple cases that resolve immediately to avoid deferred removal. for i := 0; !locked && i < 1000; i++ { time.Sleep(time.Nanosecond) locked = m.mu.TryLock() } if !locked { // Currently processing callbacks for this entry, need to defer removal. m.hasRemoved.Store(true) return } defer m.mu.Unlock() e, found := m.hostnames[oldEntry.hostname] if !found { return } if e.removeEntry(entry) { delete(m.hostnames, e.hostname) } } func (m *DnsMonitor) clearRemoved() { if !m.hasRemoved.CompareAndSwap(true, false) { return } m.mu.Lock() defer m.mu.Unlock() for hostname, entry := range m.hostnames { deleted := false for e := range entry.entries { if e.entry.Load() == nil { delete(entry.entries, e) deleted = true } } if deleted && len(entry.entries) == 0 { delete(m.hostnames, hostname) } } } func (m *DnsMonitor) waitForEntries() (waited bool) { m.mu.Lock() defer m.mu.Unlock() for len(m.hostnames) == 0 && m.stopCtx.Err() == nil { m.cond.Wait() waited = true } return } func (m *DnsMonitor) run() { ticker := time.NewTicker(m.interval) defer ticker.Stop() defer close(m.stopped) for { if m.waitForEntries() { ticker.Reset(m.interval) if m.stopCtx.Err() == nil { // Initial check when a new entry was added. More checks will be // triggered by the Ticker. m.checkHostnames() continue } } select { case <-m.stopCtx.Done(): return case <-ticker.C: m.checkHostnames() } } } func (m *DnsMonitor) doCheckHostnames() { m.clearRemoved() m.mu.RLock() defer m.mu.RUnlock() for _, entry := range m.hostnames { m.checkHostname(entry) } } func (m *DnsMonitor) checkHostname(entry *dnsMonitorEntry) { if len(entry.hostIP) > 0 { entry.setIPs([]net.IP{entry.hostIP}, true) return } ips, err := lookupDnsMonitorIP(entry.hostname) if err != nil { log.Printf("Could not lookup %s: %s", entry.hostname, err) return } entry.setIPs(ips, false) } nextcloud-spreed-signaling-1.2.4/dns_monitor_test.go000066400000000000000000000210431460321600400226710ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "fmt" "net" "reflect" "sync" "sync/atomic" "testing" "time" ) type mockDnsLookup struct { sync.RWMutex ips map[string][]net.IP } func newMockDnsLookupForTest(t *testing.T) *mockDnsLookup { mock := &mockDnsLookup{ ips: make(map[string][]net.IP), } prev := lookupDnsMonitorIP t.Cleanup(func() { lookupDnsMonitorIP = prev }) lookupDnsMonitorIP = mock.lookup return mock } func (m *mockDnsLookup) Set(host string, ips []net.IP) { m.Lock() defer m.Unlock() m.ips[host] = ips } func (m *mockDnsLookup) Get(host string) []net.IP { m.Lock() defer m.Unlock() return m.ips[host] } func (m *mockDnsLookup) lookup(host string) ([]net.IP, error) { m.RLock() defer m.RUnlock() ips, found := m.ips[host] if !found { return nil, &net.DNSError{ Err: fmt.Sprintf("could not resolve %s", host), Name: host, IsNotFound: true, } } return append([]net.IP{}, ips...), nil } func newDnsMonitorForTest(t *testing.T, interval time.Duration) *DnsMonitor { t.Helper() monitor, err := NewDnsMonitor(interval) if err != nil { t.Fatal(err) } t.Cleanup(func() { monitor.Stop() }) if err := monitor.Start(); err != nil { t.Fatal(err) } return monitor } type dnsMonitorReceiverRecord struct { all []net.IP add []net.IP keep []net.IP remove []net.IP } func (r *dnsMonitorReceiverRecord) Equal(other *dnsMonitorReceiverRecord) bool { return r == other || (reflect.DeepEqual(r.add, other.add) && reflect.DeepEqual(r.keep, other.keep) && reflect.DeepEqual(r.remove, other.remove)) } func (r *dnsMonitorReceiverRecord) String() string { return fmt.Sprintf("all=%v, add=%v, keep=%v, remove=%v", r.all, r.add, r.keep, r.remove) } var ( expectNone = &dnsMonitorReceiverRecord{} ) type dnsMonitorReceiver struct { sync.Mutex t *testing.T expected *dnsMonitorReceiverRecord received *dnsMonitorReceiverRecord } func newDnsMonitorReceiverForTest(t *testing.T) *dnsMonitorReceiver { return &dnsMonitorReceiver{ t: t, } } func (r *dnsMonitorReceiver) OnLookup(entry *DnsMonitorEntry, all, add, keep, remove []net.IP) { r.Lock() defer r.Unlock() received := &dnsMonitorReceiverRecord{ all: all, add: add, keep: keep, remove: remove, } expected := r.expected r.expected = nil if expected == expectNone { r.t.Errorf("expected no event, got %v", received) return } if expected == nil { if r.received != nil && !r.received.Equal(received) { r.t.Errorf("already received %v, got %v", r.received, received) } return } if !expected.Equal(received) { r.t.Errorf("expected %v, got %v", expected, received) } r.received = nil r.expected = nil } func (r *dnsMonitorReceiver) WaitForExpected(ctx context.Context) { r.t.Helper() r.Lock() defer r.Unlock() ticker := time.NewTicker(time.Microsecond) abort := false for r.expected != nil && !abort { r.Unlock() select { case <-ticker.C: case <-ctx.Done(): r.t.Error(ctx.Err()) abort = true } r.Lock() } } func (r *dnsMonitorReceiver) Expect(all, add, keep, remove []net.IP) { r.t.Helper() r.Lock() defer r.Unlock() if r.expected != nil && r.expected != expectNone { r.t.Errorf("didn't get previously expected %v", r.expected) } expected := &dnsMonitorReceiverRecord{ all: all, add: add, keep: keep, remove: remove, } if r.received != nil && r.received.Equal(expected) { r.received = nil return } r.expected = expected } func (r *dnsMonitorReceiver) ExpectNone() { r.t.Helper() r.Lock() defer r.Unlock() if r.expected != nil && r.expected != expectNone { r.t.Errorf("didn't get previously expected %v", r.expected) } r.expected = expectNone } func TestDnsMonitor(t *testing.T) { lookup := newMockDnsLookupForTest(t) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() interval := time.Millisecond monitor := newDnsMonitorForTest(t, interval) ip1 := net.ParseIP("192.168.0.1") ip2 := net.ParseIP("192.168.1.1") ip3 := net.ParseIP("10.1.2.3") ips1 := []net.IP{ ip1, ip2, } lookup.Set("foo", ips1) rec1 := newDnsMonitorReceiverForTest(t) rec1.Expect(ips1, ips1, nil, nil) entry1, err := monitor.Add("https://foo:12345", rec1.OnLookup) if err != nil { t.Fatal(err) } defer monitor.Remove(entry1) rec1.WaitForExpected(ctx) ips2 := []net.IP{ ip1, ip2, ip3, } add2 := []net.IP{ip3} keep2 := []net.IP{ip1, ip2} rec1.Expect(ips2, add2, keep2, nil) lookup.Set("foo", ips2) rec1.WaitForExpected(ctx) ips3 := []net.IP{ ip2, ip3, } keep3 := []net.IP{ip2, ip3} remove3 := []net.IP{ip1} rec1.Expect(ips3, nil, keep3, remove3) lookup.Set("foo", ips3) rec1.WaitForExpected(ctx) rec1.ExpectNone() time.Sleep(5 * interval) remove4 := []net.IP{ip2, ip3} rec1.Expect(nil, nil, nil, remove4) lookup.Set("foo", nil) rec1.WaitForExpected(ctx) rec1.ExpectNone() time.Sleep(5 * interval) // Removing multiple times is supported. monitor.Remove(entry1) monitor.Remove(entry1) // No more events after removing. lookup.Set("foo", ips1) rec1.ExpectNone() time.Sleep(5 * interval) } func TestDnsMonitorIP(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() interval := time.Millisecond monitor := newDnsMonitorForTest(t, interval) ip := "192.168.0.1" ips := []net.IP{ net.ParseIP(ip), } rec1 := newDnsMonitorReceiverForTest(t) rec1.Expect(ips, ips, nil, nil) entry, err := monitor.Add(ip+":12345", rec1.OnLookup) if err != nil { t.Fatal(err) } defer monitor.Remove(entry) rec1.WaitForExpected(ctx) rec1.ExpectNone() time.Sleep(5 * interval) } func TestDnsMonitorNoLookupIfEmpty(t *testing.T) { interval := time.Millisecond monitor := newDnsMonitorForTest(t, interval) var checked atomic.Bool monitor.checkHostnames = func() { checked.Store(true) monitor.doCheckHostnames() } time.Sleep(10 * interval) if checked.Load() { t.Error("should not have checked hostnames") } } type deadlockMonitorReceiver struct { t *testing.T monitor *DnsMonitor mu sync.RWMutex wg sync.WaitGroup entry *DnsMonitorEntry started chan struct{} triggered bool closed atomic.Bool } func newDeadlockMonitorReceiver(t *testing.T, monitor *DnsMonitor) *deadlockMonitorReceiver { return &deadlockMonitorReceiver{ t: t, monitor: monitor, started: make(chan struct{}), } } func (r *deadlockMonitorReceiver) OnLookup(entry *DnsMonitorEntry, all []net.IP, add []net.IP, keep []net.IP, remove []net.IP) { if r.closed.Load() { r.t.Error("received lookup after closed") return } r.mu.Lock() defer r.mu.Unlock() if r.triggered { return } r.triggered = true r.wg.Add(1) go func() { defer r.wg.Done() r.mu.RLock() defer r.mu.RUnlock() close(r.started) time.Sleep(50 * time.Millisecond) }() } func (r *deadlockMonitorReceiver) Start() { r.mu.Lock() defer r.mu.Unlock() entry, err := r.monitor.Add("foo", r.OnLookup) if err != nil { r.t.Errorf("error adding listener: %s", err) return } r.entry = entry } func (r *deadlockMonitorReceiver) Close() { r.mu.Lock() defer r.mu.Unlock() if r.entry != nil { r.monitor.Remove(r.entry) r.closed.Store(true) } r.wg.Wait() } func TestDnsMonitorDeadlock(t *testing.T) { lookup := newMockDnsLookupForTest(t) ip1 := net.ParseIP("192.168.0.1") ip2 := net.ParseIP("192.168.0.2") lookup.Set("foo", []net.IP{ip1}) interval := time.Millisecond monitor := newDnsMonitorForTest(t, interval) r := newDeadlockMonitorReceiver(t, monitor) r.Start() <-r.started lookup.Set("foo", []net.IP{ip2}) r.Close() lookup.Set("foo", []net.IP{ip1}) time.Sleep(10 * interval) monitor.mu.Lock() defer monitor.mu.Unlock() if len(monitor.hostnames) > 0 { t.Errorf("should have cleared hostnames, got %+v", monitor.hostnames) } } nextcloud-spreed-signaling-1.2.4/docker/000077500000000000000000000000001460321600400202175ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/docker/README.md000066400000000000000000000153451460321600400215060ustar00rootroot00000000000000# Docker images for nextcloud-spreed-signaling ## Signaling server The image for the signaling server can be retrieved from strukturag/nextcloud-spreed-signaling: Replace `version` with the tag or commit you want to use. ### Configuration The running container can be configured through different environment variables: - `CONFIG`: Optional name of configuration file to use. - `HTTP_LISTEN`: Address of HTTP listener. - `HTTPS_LISTEN`: Address of HTTPS listener. - `HTTPS_CERTIFICATE`: Name of certificate file for the HTTPS listener. - `HTTPS_KEY`: Name of private key file for the HTTPS listener. - `HASH_KEY`: Secret value used to generate checksums of sessions (32 or 64 bytes). - `BLOCK_KEY`: Key for encrypting data in the sessions (16, 24 or 32 bytes). - `INTERNAL_SHARED_SECRET_KEY`: Shared secret for connections from internal clients. - `BACKENDS_ALLOWALL`: Allow all backends. Extremly insecure - use only for development! - `BACKENDS_ALLOWALL_SECRET`: Secret when `BACKENDS_ALLOWALL` is enabled. - `BACKENDS`: Space-separated list of backend ids. - `BACKEND__URL`: Url of backend `ID` (where `ID` is the uppercase backend id). - `BACKEND__SHARED_SECRET`: Shared secret for backend `ID` (where `ID` is the uppercase backend id). - `BACKEND__SESSION_LIMIT`: Optional session limit for backend `ID` (where `ID` is the uppercase backend id). - `BACKEND__MAX_STREAM_BITRATE`: Optional maximum bitrate for audio/video streams in backend `ID` (where `ID` is the uppercase backend id). - `BACKEND__MAX_SCREEN_BITRATE`: Optional maximum bitrate for screensharing streams in backend `ID` (where `ID` is the uppercase backend id). - `NATS_URL`: Optional URL of NATS server. - `ETCD_ENDPOINTS`: Static list of etcd endpoints (if etcd should be used). - `ETCD_DISCOVERY_SRV`: Alternative domain to use for DNS SRV configuration of etcd endpoints (if etcd should be used). - `ETCD_DISCOVERY_SERVICE`: Optional service name for DNS SRV configuration of etcd.. - `ETCD_CLIENT_CERTIFICATE`: Filename of certificate for etcd client. - `ETCD_CLIENT_KEY`: Filename of private key for etcd client. - `ETCD_CLIENT_CA`: Filename of CA for etcd client. - `USE_JANUS`: Set to `1` if Janus should be used as WebRTC backend. - `JANUS_URL`: Url to Janus server (if `USE_JANUS` is set to `1`). - `USE_PROXY`: Set to `1` if proxy servers should be used as WebRTC backends. - `PROXY_TOKEN_ID`: Id of the token to use when connecting to proxy servers. - `PROXY_TOKEN_KEY`: Private key for the configured token id. - `PROXY_URLS`: Space-separated list of proxy URLs to connect to. - `PROXY_DNS_DISCOVERY`: Enable DNS discovery on hostnames of configured static URLs. - `PROXY_ETCD`: Set to `1` if etcd should be used to configure proxy connections. - `PROXY_KEY_PREFIX`: Key prefix of proxy entries. - `MAX_STREAM_BITRATE`: Optional global maximum bitrate for audio/video streams. - `MAX_SCREEN_BITRATE`: Optional global maximum bitrate for screensharing streams. - `TURN_API_KEY`: API key that Janus will need to send when requesting TURN credentials. - `TURN_SECRET`: The shared secret to use for generating TURN credentials. - `TURN_SERVERS`: A comma-separated list of TURN servers to use. - `GEOIP_LICENSE`: License key to use when downloading the MaxMind GeoIP database. - `GEOIP_URL`: Optional URL to download a MaxMind GeoIP database from. - `GEOIP_OVERRIDES`: Optional space-separated list of overrides for GeoIP lookups. - `CONTINENT_OVERRIDES`: Optional space-separated list of overrides for continent mappings. - `STATS_IPS`: Comma-separated list of IP addresses that are allowed to access the stats endpoint. - `GRPC_LISTEN`: IP and port to listen on for GRPC requests. - `GRPC_SERVER_CERTIFICATE`: Certificate to use for the GRPC server. - `GRPC_SERVER_KEY`: Private key to use for the GRPC server. - `GRPC_SERVER_CA`: CA certificate that is allowed to issue certificates of GRPC servers. - `GRPC_CLIENT_CERTIFICATE`: Certificate to use for the GRPC client. - `GRPC_CLIENT_KEY`: Private key to use for the GRPC client. - `GRPC_CLIENT_CA`: CA certificate that is allowed to issue certificates of GRPC clients. - `GRPC_TARGETS`: Comma-separated list of GRPC targets to connect to for clustering mode. - `GRPC_DNS_DISCOVERY`: Enable DNS discovery on hostnames of configured GRPC targets. - `GRPC_ETCD`: Set to `1` if etcd should be used to configure GRPC peers. - `GRPC_TARGET_PREFIX`: Key prefix of GRPC target entries. - `SKIP_VERIFY`: Set to `true` to skip certificate validation of backends and proxy servers. This should only be enabled during development, e.g. to work with self-signed certificates. Example with two backends: docker run \ ... \ -e BACKENDS="foo bar" \ -e BACKEND_FOO_URL=https://cloud.server1.tld \ -e BACKEND_FOO_SHARED_SECRET=verysecret \ -e BACKEND_BAR_URL=https://cloud.server2.tld \ -e BACKEND_BAR_SHARED_SECRET=moresecret \ ... See https://github.com/strukturag/nextcloud-spreed-signaling/blob/master/server.conf.in for further details on the different options. ## Signaling proxy The image for the signaling proxy can be retrieved from strukturag/nextcloud-spreed-signaling:-proxy Replace `version` with the tag or commit you want to use. ### Configuration The running container can be configured through different environment variables: - `CONFIG`: Optional name of configuration file to use. - `HTTP_LISTEN`: Address of HTTP listener. - `COUNTRY`: Optional ISO 3166 country this proxy is located at. - `JANUS_URL`: Url to Janus server. - `MAX_STREAM_BITRATE`: Optional maximum bitrate for audio/video streams. - `MAX_SCREEN_BITRATE`: Optional maximum bitrate for screensharing streams. - `ETCD_ENDPOINTS`: Static list of etcd endpoints (if etcd should be used). - `ETCD_DISCOVERY_SRV`: Alternative domain to use for DNS SRV configuration of etcd endpoints (if etcd should be used). - `ETCD_DISCOVERY_SERVICE`: Optional service name for DNS SRV configuration of etcd.. - `ETCD_CLIENT_CERTIFICATE`: Filename of certificate for etcd client. - `ETCD_CLIENT_KEY`: Filename of private key for etcd client. - `ETCD_CLIENT_CA`: Filename of CA for etcd client. - `TOKENS_ETCD`: Set to `1` if etcd should be used to configure tokens. - `TOKEN_KEY_FORMAT`: Format of key name to retrieve the public key from, "%s" will be replaced with the token id. - `TOKENS`: Space-separated list of token ids. - `TOKEN__KEY`: Filename of public key for token `ID` (where `ID` is the uppercase token id). Example with two tokens: docker run \ ... \ -e TOKENS="foo signaling.server1.tld" \ -e TOKEN_FOO_KEY=/path/to/foo.key \ -e TOKEN_SIGNALING_SERVER1_TLD_KEY=/path/to/signaling.server1.tld.key \ ... See https://github.com/strukturag/nextcloud-spreed-signaling/blob/master/proxy.conf.in for further details on the different options. nextcloud-spreed-signaling-1.2.4/docker/docker-compose.yml000066400000000000000000000023331460321600400236550ustar00rootroot00000000000000version: '3' services: spreedbackend: build: context: .. dockerfile: docker/server/Dockerfile platforms: - "linux/amd64" volumes: - ./server.conf:/config/server.conf network_mode: host restart: unless-stopped depends_on: - nats - janus - coturn nats: image: nats:2.2.1 volumes: - ./gnatsd.conf:/config/gnatsd.conf command: ["-c", "/config/gnatsd.conf"] network_mode: host restart: unless-stopped janus: build: janus command: ["janus", "--full-trickle"] network_mode: host restart: unless-stopped coturn: image: coturn/coturn:latest network_mode: host # # Update command parameters as necessary. # # See https://github.com/coturn/coturn/blob/master/README.turnserver for # available options. command: - "--realm" - "nextcloud.domain.invalid" - "--static-auth-secret" - "static_secret_same_in_server_conf" - "--no-stdout-log" - "--log-file" - "stdout" - "--stale-nonce=600" - "--use-auth-secret" - "--lt-cred-mech" - "--fingerprint" - "--no-software-attribute" - "--no-multicast-peers" restart: unless-stopped nextcloud-spreed-signaling-1.2.4/docker/janus/000077500000000000000000000000001460321600400213375ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/docker/janus/Dockerfile000066400000000000000000000027651460321600400233430ustar00rootroot00000000000000# Modified from https://gitlab.com/powerpaul17/nc_talk_backend/-/blob/dcbb918d8716dad1eb72a889d1e6aa1e3a543641/docker/janus/Dockerfile FROM alpine:3.19 RUN apk add --no-cache curl autoconf automake libtool pkgconf build-base \ glib-dev libconfig-dev libnice-dev jansson-dev openssl-dev zlib libsrtp-dev \ gengetopt libwebsockets-dev git curl-dev libogg-dev # usrsctp # 08 Oct 2021 ARG USRSCTP_VERSION=7c31bd35c79ba67084ce029511193a19ceb97447 RUN cd /tmp && \ git clone https://github.com/sctplab/usrsctp && \ cd usrsctp && \ git checkout $USRSCTP_VERSION && \ ./bootstrap && \ ./configure --prefix=/usr && \ make && make install # libsrtp ARG LIBSRTP_VERSION=2.4.2 RUN cd /tmp && \ wget https://github.com/cisco/libsrtp/archive/v$LIBSRTP_VERSION.tar.gz && \ tar xfv v$LIBSRTP_VERSION.tar.gz && \ cd libsrtp-$LIBSRTP_VERSION && \ ./configure --prefix=/usr --enable-openssl && \ make shared_library && \ make install && \ rm -fr /libsrtp-$LIBSRTP_VERSION && \ rm -f /v$LIBSRTP_VERSION.tar.gz # JANUS ARG JANUS_VERSION=0.14.1 RUN mkdir -p /usr/src/janus && \ cd /usr/src/janus && \ curl -L https://github.com/meetecho/janus-gateway/archive/v$JANUS_VERSION.tar.gz | tar -xz && \ cd /usr/src/janus/janus-gateway-$JANUS_VERSION && \ ./autogen.sh && \ ./configure --disable-rabbitmq --disable-mqtt --disable-boringssl && \ make && \ make install && \ make configs WORKDIR /usr/src/janus/janus-gateway-$JANUS_VERSION CMD [ "janus" ] nextcloud-spreed-signaling-1.2.4/docker/proxy/000077500000000000000000000000001460321600400214005ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/docker/proxy/Dockerfile000066400000000000000000000015451460321600400233770ustar00rootroot00000000000000FROM --platform=${BUILDPLATFORM} golang:1.22-alpine AS builder ARG TARGETARCH ARG TARGETOS WORKDIR /workdir COPY . . RUN touch /.dockerenv && \ apk add --no-cache bash git build-base protobuf && \ if [ -d "vendor" ]; then GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOPROXY=off make proxy; else \ GOOS=${TARGETOS} GOARCH=${TARGETARCH} make proxy; fi FROM alpine:3 ENV CONFIG=/config/proxy.conf RUN adduser -D spreedbackend && \ apk add --no-cache bash tzdata ca-certificates COPY --from=builder /workdir/bin/proxy /usr/bin/nextcloud-spreed-signaling-proxy COPY ./proxy.conf.in /config/proxy.conf.in COPY ./docker/proxy/entrypoint.sh / COPY ./docker/proxy/stop.sh / COPY ./docker/proxy/wait.sh / RUN chown spreedbackend /config RUN /usr/bin/nextcloud-spreed-signaling-proxy -version USER spreedbackend STOPSIGNAL SIGUSR1 ENTRYPOINT [ "/entrypoint.sh" ] nextcloud-spreed-signaling-1.2.4/docker/proxy/entrypoint.sh000077500000000000000000000067561460321600400241700ustar00rootroot00000000000000#!/bin/bash # # Standalone signaling server for the Nextcloud Spreed app. # Copyright (C) 2022 struktur AG # # @author Joachim Bauch # # @license GNU AGPL version 3 or any later version # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # set -e if [ -n "$1" ]; then # Run custom command. exec "$@" fi if [ -z "$CONFIG" ]; then echo "No configuration filename given in CONFIG environment variable" exit 1 fi if [ ! -f "$CONFIG" ]; then echo "Preparing signaling proxy configuration in $CONFIG ..." cp /config/proxy.conf.in "$CONFIG" if [ -n "$HTTP_LISTEN" ]; then sed -i "s|#listen = 127.0.0.1:9090|listen = $HTTP_LISTEN|" "$CONFIG" fi if [ -n "$COUNTRY" ]; then sed -i "s|#country =.*|country = $COUNTRY|" "$CONFIG" fi HAS_ETCD= if [ -n "$ETCD_ENDPOINTS" ]; then sed -i "s|#endpoints =.*|endpoints = $ETCD_ENDPOINTS|" "$CONFIG" HAS_ETCD=1 else if [ -n "$ETCD_DISCOVERY_SRV" ]; then sed -i "s|#discoverysrv =.*|discoverysrv = $ETCD_DISCOVERY_SRV|" "$CONFIG" HAS_ETCD=1 fi if [ -n "$ETCD_DISCOVERY_SERVICE" ]; then sed -i "s|#discoveryservice =.*|discoveryservice = $ETCD_DISCOVERY_SERVICE|" "$CONFIG" fi fi if [ -n "$HAS_ETCD" ]; then if [ -n "$ETCD_CLIENT_KEY" ]; then sed -i "s|#clientkey = /path/to/etcd-client.key|clientkey = $ETCD_CLIENT_KEY|" "$CONFIG" fi if [ -n "$ETCD_CLIENT_CERTIFICATE" ]; then sed -i "s|#clientcert = /path/to/etcd-client.crt|clientcert = $ETCD_CLIENT_CERTIFICATE|" "$CONFIG" fi if [ -n "$ETCD_CLIENT_CA" ]; then sed -i "s|#cacert = /path/to/etcd-ca.crt|cacert = $ETCD_CLIENT_CA|" "$CONFIG" fi fi if [ -n "$JANUS_URL" ]; then sed -i "s|url =.*|url = $JANUS_URL|" "$CONFIG" else sed -i "s|url =.*|#url =|" "$CONFIG" fi if [ -n "$MAX_STREAM_BITRATE" ]; then sed -i "s|#maxstreambitrate =.*|maxstreambitrate = $MAX_STREAM_BITRATE|" "$CONFIG" fi if [ -n "$MAX_SCREEN_BITRATE" ]; then sed -i "s|#maxscreenbitrate =.*|maxscreenbitrate = $MAX_SCREEN_BITRATE|" "$CONFIG" fi if [ -n "$TOKENS_ETCD" ]; then if [ -z "$HAS_ETCD" ]; then echo "No etcd endpoint configured, can't use etcd for proxy tokens" exit 1 fi sed -i "s|tokentype =.*|tokentype = etcd|" "$CONFIG" if [ -n "$TOKEN_KEY_FORMAT" ]; then sed -i "s|#keyformat =.*|keyformat = $TOKEN_KEY_FORMAT|" "$CONFIG" fi else sed -i "s|\[tokens\]|#[tokens]|" "$CONFIG" echo >> "$CONFIG" echo "[tokens]" >> "$CONFIG" for token in $TOKENS; do declare var="TOKEN_${token^^}_KEY" var=${var//./_} if [ -n "${!var}" ]; then echo "$token = ${!var}" >> "$CONFIG" fi done echo >> "$CONFIG" fi if [ -n "$STATS_IPS" ]; then sed -i "s|#allowed_ips =.*|allowed_ips = $STATS_IPS|" "$CONFIG" fi fi echo "Starting signaling proxy with $CONFIG ..." exec /usr/bin/nextcloud-spreed-signaling-proxy -config "$CONFIG" nextcloud-spreed-signaling-1.2.4/docker/proxy/stop.sh000077500000000000000000000017151460321600400227300ustar00rootroot00000000000000#!/bin/bash # # Standalone signaling server for the Nextcloud Spreed app. # Copyright (C) 2024 struktur AG # # @author Joachim Bauch # # @license GNU AGPL version 3 or any later version # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # set -e echo "Schedule signaling proxy to shutdown ..." exec killall -USR1 nextcloud-spreed-signaling-proxy nextcloud-spreed-signaling-1.2.4/docker/proxy/wait.sh000077500000000000000000000020611460321600400227020ustar00rootroot00000000000000#!/bin/bash # # Standalone signaling server for the Nextcloud Spreed app. # Copyright (C) 2024 struktur AG # # @author Joachim Bauch # # @license GNU AGPL version 3 or any later version # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # set -e echo "Waiting for signaling proxy to shutdown ..." while true do if ! pgrep nextcloud-spreed-signaling-proxy > /dev/null ; then echo "Signaling proxy has stopped" exit 0 fi sleep 1 done nextcloud-spreed-signaling-1.2.4/docker/server/000077500000000000000000000000001460321600400215255ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/docker/server/Dockerfile000066400000000000000000000014241460321600400235200ustar00rootroot00000000000000FROM --platform=${BUILDPLATFORM} golang:1.22-alpine AS builder ARG TARGETARCH ARG TARGETOS WORKDIR /workdir COPY . . RUN touch /.dockerenv && \ apk add --no-cache bash git build-base protobuf && \ if [ -d "vendor" ]; then GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOPROXY=off make server; else \ GOOS=${TARGETOS} GOARCH=${TARGETARCH} make server; fi FROM alpine:3 ENV CONFIG=/config/server.conf RUN adduser -D spreedbackend && \ apk add --no-cache bash tzdata ca-certificates COPY --from=builder /workdir/bin/signaling /usr/bin/nextcloud-spreed-signaling COPY ./server.conf.in /config/server.conf.in COPY ./docker/server/entrypoint.sh / RUN chown spreedbackend /config RUN /usr/bin/nextcloud-spreed-signaling -version USER spreedbackend ENTRYPOINT [ "/entrypoint.sh" ] nextcloud-spreed-signaling-1.2.4/docker/server/entrypoint.sh000077500000000000000000000211721460321600400243020ustar00rootroot00000000000000#!/bin/bash # # Standalone signaling server for the Nextcloud Spreed app. # Copyright (C) 2022 struktur AG # # @author Joachim Bauch # # @license GNU AGPL version 3 or any later version # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # set -e if [ -n "$1" ]; then # Run custom command. exec "$@" fi if [ -z "$CONFIG" ]; then echo "No configuration filename given in CONFIG environment variable" exit 1 fi if [ ! -f "$CONFIG" ]; then echo "Preparing signaling server configuration in $CONFIG ..." cp /config/server.conf.in "$CONFIG" if [ -n "$HTTP_LISTEN" ]; then sed -i "s|#listen = 127.0.0.1:8080|listen = $HTTP_LISTEN|" "$CONFIG" fi if [ -n "$HTTPS_LISTEN" ]; then sed -i "s|#listen = 127.0.0.1:8443|listen = $HTTPS_LISTEN|" "$CONFIG" if [ -n "$HTTPS_CERTIFICATE" ]; then sed -i "s|certificate = /etc/nginx/ssl/server.crt|certificate = $HTTPS_CERTIFICATE|" "$CONFIG" fi if [ -n "$HTTPS_KEY" ]; then sed -i "s|key = /etc/nginx/ssl/server.key|key = $HTTPS_KEY|" "$CONFIG" fi fi if [ -n "$HASH_KEY" ]; then sed -i "s|the-secret-for-session-checksums|$HASH_KEY|" "$CONFIG" fi if [ -n "$BLOCK_KEY" ]; then sed -i "s|-encryption-key-|$BLOCK_KEY|" "$CONFIG" fi if [ -n "$INTERNAL_SHARED_SECRET_KEY" ]; then sed -i "s|the-shared-secret-for-internal-clients|$INTERNAL_SHARED_SECRET_KEY|" "$CONFIG" fi if [ -n "$NATS_URL" ]; then sed -i "s|#url = nats://localhost:4222|url = $NATS_URL|" "$CONFIG" else sed -i "s|#url = nats://localhost:4222|url = nats://loopback|" "$CONFIG" fi HAS_ETCD= if [ -n "$ETCD_ENDPOINTS" ]; then sed -i "s|#endpoints =.*|endpoints = $ETCD_ENDPOINTS|" "$CONFIG" HAS_ETCD=1 else if [ -n "$ETCD_DISCOVERY_SRV" ]; then sed -i "s|#discoverysrv =.*|discoverysrv = $ETCD_DISCOVERY_SRV|" "$CONFIG" HAS_ETCD=1 fi if [ -n "$ETCD_DISCOVERY_SERVICE" ]; then sed -i "s|#discoveryservice =.*|discoveryservice = $ETCD_DISCOVERY_SERVICE|" "$CONFIG" fi fi if [ -n "$HAS_ETCD" ]; then if [ -n "$ETCD_CLIENT_KEY" ]; then sed -i "s|#clientkey = /path/to/etcd-client.key|clientkey = $ETCD_CLIENT_KEY|" "$CONFIG" fi if [ -n "$ETCD_CLIENT_CERTIFICATE" ]; then sed -i "s|#clientcert = /path/to/etcd-client.crt|clientcert = $ETCD_CLIENT_CERTIFICATE|" "$CONFIG" fi if [ -n "$ETCD_CLIENT_CA" ]; then sed -i "s|#cacert = /path/to/etcd-ca.crt|cacert = $ETCD_CLIENT_CA|" "$CONFIG" fi fi if [ -n "$USE_JANUS" ]; then sed -i "s|#type =$|type = janus|" "$CONFIG" if [ -n "$JANUS_URL" ]; then sed -i "/proxy URLs to connect to/{n;s|#url =$|url = $JANUS_URL|}" "$CONFIG" fi elif [ -n "$USE_PROXY" ]; then sed -i "s|#type =$|type = proxy|" "$CONFIG" if [ -n "$PROXY_TOKEN_ID" ]; then sed -i "s|#token_id =.*|token_id = $PROXY_TOKEN_ID|" "$CONFIG" fi if [ -n "$PROXY_TOKEN_KEY" ]; then sed -i "s|#token_key =.*|token_key = $PROXY_TOKEN_KEY|" "$CONFIG" fi if [ -n "$PROXY_ETCD" ]; then if [ -z "$HAS_ETCD" ]; then echo "No etcd endpoint configured, can't use etcd for proxy connections" exit 1 fi sed -i "s|#urltype = static|urltype = etcd|" "$CONFIG" if [ -n "$PROXY_KEY_PREFIX" ]; then sed -i "s|#keyprefix =.*|keyprefix = $PROXY_KEY_PREFIX|" "$CONFIG" fi else if [ -n "$PROXY_URLS" ]; then sed -i "/proxy URLs to connect to/{n;s|#url =$|url = $PROXY_URLS|}" "$CONFIG" fi if [ -n "$PROXY_DNS_DISCOVERY" ]; then sed -i "/or deleted as necessary/{n;s|#dnsdiscovery =.*|dnsdiscovery = true|}" "$CONFIG" fi fi fi if [ -n "$MAX_STREAM_BITRATE" ]; then sed -i "s|#maxstreambitrate =.*|maxstreambitrate = $MAX_STREAM_BITRATE|" "$CONFIG" fi if [ -n "$MAX_SCREEN_BITRATE" ]; then sed -i "s|#maxscreenbitrate =.*|maxscreenbitrate = $MAX_SCREEN_BITRATE|" "$CONFIG" fi if [ -n "$SKIP_VERIFY" ]; then sed -i "s|#skipverify =.*|skipverify = $SKIP_VERIFY|" "$CONFIG" fi if [ -n "$TURN_API_KEY" ]; then sed -i "s|#\?apikey =.*|apikey = $TURN_API_KEY|" "$CONFIG" fi if [ -n "$TURN_SECRET" ]; then sed -i "/same as on the TURN server/{n;s|#\?secret =.*|secret = $TURN_SECRET|}" "$CONFIG" fi if [ -n "$TURN_SERVERS" ]; then sed -i "s|#servers =.*|servers = $TURN_SERVERS|" "$CONFIG" fi if [ -n "$GEOIP_LICENSE" ]; then sed -i "s|#license =.*|license = $GEOIP_LICENSE|" "$CONFIG" fi if [ -n "$GEOIP_URL" ]; then sed -i "/looking up IP addresses/{n;s|#url =$|url = $GEOIP_URL|}" "$CONFIG" fi if [ -n "$STATS_IPS" ]; then sed -i "s|#allowed_ips =.*|allowed_ips = $STATS_IPS|" "$CONFIG" fi if [ -n "$GRPC_LISTEN" ]; then sed -i "s|#listen = 0.0.0.0:9090|listen = $GRPC_LISTEN|" "$CONFIG" if [ -n "$GRPC_SERVER_CERTIFICATE" ]; then sed -i "s|#servercertificate =.*|servercertificate = $GRPC_SERVER_CERTIFICATE|" "$CONFIG" fi if [ -n "$GRPC_SERVER_KEY" ]; then sed -i "s|#serverkey =.*|serverkey = $GRPC_SERVER_KEY|" "$CONFIG" fi if [ -n "$GRPC_SERVER_CA" ]; then sed -i "s|#serverca =.*|serverca = $GRPC_SERVER_CA|" "$CONFIG" fi if [ -n "$GRPC_CLIENT_CERTIFICATE" ]; then sed -i "s|#clientcertificate =.*|clientcertificate = $GRPC_CLIENT_CERTIFICATE|" "$CONFIG" fi if [ -n "$GRPC_CLIENT_KEY" ]; then sed -i "s|#clientkey = /path/to/grpc-client.key|clientkey = $GRPC_CLIENT_KEY|" "$CONFIG" fi if [ -n "$GRPC_CLIENT_CA" ]; then sed -i "s|#clientca =.*|clientca = $GRPC_CLIENT_CA|" "$CONFIG" fi if [ -n "$GRPC_ETCD" ]; then if [ -z "$HAS_ETCD" ]; then echo "No etcd endpoint configured, can't use etcd for GRPC" exit 1 fi sed -i "s|#targettype =$|targettype = etcd|" "$CONFIG" if [ -n "$GRPC_TARGET_PREFIX" ]; then sed -i "s|#targetprefix =.*|targetprefix = $GRPC_TARGET_PREFIX|" "$CONFIG" fi else if [ -n "$GRPC_TARGETS" ]; then sed -i "s|#targets =.*|targets = $GRPC_TARGETS|" "$CONFIG" if [ -n "$GRPC_DNS_DISCOVERY" ]; then sed -i "/# deleted as necessary/{n;s|#dnsdiscovery =.*|dnsdiscovery = true|}" "$CONFIG" fi fi fi fi if [ -n "$GEOIP_OVERRIDES" ]; then sed -i "s|\[geoip-overrides\]|#[geoip-overrides]|" "$CONFIG" echo >> "$CONFIG" echo "[geoip-overrides]" >> "$CONFIG" for override in $GEOIP_OVERRIDES; do echo "$override" >> "$CONFIG" done echo >> "$CONFIG" fi if [ -n "$CONTINENT_OVERRIDES" ]; then sed -i "s|\[continent-overrides\]|#[continent-overrides]|" "$CONFIG" echo >> "$CONFIG" echo "[continent-overrides]" >> "$CONFIG" for override in $CONTINENT_OVERRIDES; do echo "$override" >> "$CONFIG" done echo >> "$CONFIG" fi if [ -n "$BACKENDS_ALLOWALL" ]; then sed -i "s|allowall = false|allowall = $BACKENDS_ALLOWALL|" "$CONFIG" fi if [ -n "$BACKENDS_ALLOWALL_SECRET" ]; then sed -i "s|#secret = the-shared-secret-for-allowall|secret = $BACKENDS_ALLOWALL_SECRET|" "$CONFIG" fi if [ -n "$BACKENDS" ]; then BACKENDS_CONFIG=${BACKENDS// /,} sed -i "s|#backends = .*|backends = $BACKENDS_CONFIG|" "$CONFIG" echo >> "$CONFIG" for backend in $BACKENDS; do echo "[$backend]" >> "$CONFIG" declare var="BACKEND_${backend^^}_URL" if [ -n "${!var}" ]; then echo "url = ${!var}" >> "$CONFIG" fi declare var="BACKEND_${backend^^}_SHARED_SECRET" if [ -n "${!var}" ]; then echo "secret = ${!var}" >> "$CONFIG" fi declare var="BACKEND_${backend^^}_SESSION_LIMIT" if [ -n "${!var}" ]; then echo "sessionlimit = ${!var}" >> "$CONFIG" fi declare var="BACKEND_${backend^^}_MAX_STREAM_BITRATE" if [ -n "${!var}" ]; then echo "maxstreambitrate = ${!var}" >> "$CONFIG" fi declare var="BACKEND_${backend^^}_MAX_SCREEN_BITRATE" if [ -n "${!var}" ]; then echo "maxscreenbitrate = ${!var}" >> "$CONFIG" fi echo >> "$CONFIG" done fi fi echo "Starting signaling server with $CONFIG ..." exec /usr/bin/nextcloud-spreed-signaling -config "$CONFIG" nextcloud-spreed-signaling-1.2.4/docs/000077500000000000000000000000001460321600400177005ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/docs/index.md000066400000000000000000000002611460321600400213300ustar00rootroot00000000000000# Nextcloud Spreed Signaling Server Documentation ## API * [Standalone signaling API](standalone-signaling-api-v1.md) ## Other * [Prometheus Metrics](prometheus-metrics.md) nextcloud-spreed-signaling-1.2.4/docs/prometheus-metrics.md000066400000000000000000000164071460321600400240710ustar00rootroot00000000000000# Prometheus metrics The signaling server and -proxy expose various metrics that can be queried by a [Prometheus](https://prometheus.io/) server from the `/metrics` endpoint. Only clients connecting from an IP that is included in the `allowed_ips` value of the `[stats]` entry in the configuration file are allowed to query the metrics. ## Available metrics The following metrics are available: | Metric | Type | Since | Description | Labels | | :------------------------------------------------ | :-------- | --------: | :------------------------------------------------------------------------ | :-------------------------------- | | `signaling_proxy_sessions` | Gauge | 0.4.0 | The current number of sessions | | | `signaling_proxy_sessions_total` | Counter | 0.4.0 | The total number of created sessions | | | `signaling_proxy_sessions_resumed_total` | Counter | 0.4.0 | The total number of resumed sessions | | | `signaling_proxy_publishers` | Gauge | 0.4.0 | The current number of publishers | `type` | | `signaling_proxy_publishers_total` | Counter | 0.4.0 | The total number of created publishers | `type` | | `signaling_proxy_subscribers` | Gauge | 0.4.0 | The current number of subscribers | `type` | | `signaling_proxy_subscribers_total` | Counter | 0.4.0 | The total number of created subscribers | `type` | | `signaling_proxy_command_messages_total` | Counter | 0.4.0 | The total number of command messages | `type` | | `signaling_proxy_payload_messages_total` | Counter | 0.4.0 | The total number of payload messages | `type` | | `signaling_proxy_token_errors_total` | Counter | 0.4.0 | The total number of token errors | `reason` | | `signaling_backend_session_limit_exceeded_total` | Counter | 0.4.0 | The number of times the session limit exceeded | `backend` | | `signaling_backend_current` | Gauge | 0.4.0 | The current number of configured backends | | | `signaling_client_countries_total` | Counter | 0.4.0 | The total number of connections by country | `country` | | `signaling_hub_rooms` | Gauge | 0.4.0 | The current number of rooms per backend | `backend` | | `signaling_hub_sessions` | Gauge | 0.4.0 | The current number of sessions per backend | `backend`, `clienttype` | | `signaling_hub_sessions_total` | Counter | 0.4.0 | The total number of sessions per backend | `backend`, `clienttype` | | `signaling_hub_sessions_resume_total` | Counter | 0.4.0 | The total number of resumed sessions per backend | `backend`, `clienttype` | | `signaling_hub_sessions_resume_failed_total` | Counter | 0.4.0 | The total number of failed session resume requests | | | `signaling_mcu_publishers` | Gauge | 0.4.0 | The current number of publishers | `type` | | `signaling_mcu_publishers_total` | Counter | 0.4.0 | The total number of created publishers | `type` | | `signaling_mcu_subscribers` | Gauge | 0.4.0 | The current number of subscribers | `type` | | `signaling_mcu_subscribers_total` | Counter | 0.4.0 | The total number of created subscribers | `type` | | `signaling_mcu_nopublisher_total` | Counter | 0.4.0 | The total number of subscribe requests where no publisher exists | `type` | | `signaling_mcu_messages_total` | Counter | 0.4.0 | The total number of MCU messages | `type` | | `signaling_mcu_publisher_streams` | Gauge | 0.4.0 | The current number of published media streams | `type` | | `signaling_mcu_subscriber_streams` | Gauge | 0.4.0 | The current number of subscribed media streams | `type` | | `signaling_mcu_backend_connections` | Gauge | 0.4.0 | Current number of connections to signaling proxy backends | `country` | | `signaling_mcu_backend_load` | Gauge | 0.4.0 | Current load of signaling proxy backends | `url` | | `signaling_mcu_no_backend_available_total` | Counter | 0.4.0 | Total number of publishing requests where no backend was available | `type` | | `signaling_room_sessions` | Gauge | 0.4.0 | The current number of sessions in a room | `backend`, `room`, `clienttype` | | `signaling_server_messages_total` | Counter | 0.4.0 | The total number of signaling messages | `type` | | `signaling_grpc_clients` | Gauge | 1.0.0 | The current number of GRPC clients | | | `signaling_grpc_client_calls_total` | Counter | 1.0.0 | The total number of GRPC client calls | `method` | | `signaling_grpc_server_calls_total` | Counter | 1.0.0 | The total number of GRPC server calls | `method` | | `signaling_http_client_pool_connections` | Gauge | 1.2.4 | The current number of HTTP client connections per host | `host` | nextcloud-spreed-signaling-1.2.4/docs/requirements.txt000066400000000000000000000001611460321600400231620ustar00rootroot00000000000000jinja2==3.1.3 markdown==3.6 mkdocs==1.5.3 readthedocs-sphinx-search==0.3.2 sphinx==7.2.6 sphinx_rtd_theme==2.0.0 nextcloud-spreed-signaling-1.2.4/docs/standalone-signaling-api-v1.md000066400000000000000000001024771460321600400254310ustar00rootroot00000000000000# External signaling API This document gives a rough overview on the API version 1.0 of the Spreed signaling server. Clients can use the signaling server to send realtime messages between different users / sessions. The API describes the various messages that can be sent by a client or the server to join rooms or distribute events between clients. Depending on the server implementation, clients can use WebSockets (preferred) or COMET (i.e. long-polling) requests to communicate with the signaling server. For WebSockets, only the API described in this document is necessary. For COMET, an extension to this API is required to identify a (virtual) connection between multiple requests. The payload for COMET is the messages as described below. See https://nextcloud-talk.readthedocs.io/en/latest/internal-signaling/ for the API of the regular PHP backend. ## Request { "id": "unique-request-id", "type": "the-request-type", "the-request-type": { ...object defining the request... } } Example: { "id": "123-abc", "type": "samplemessage", "samplemessage": { "foo": "bar", "baz": 1234 } } ## Response { "id": "unique-request-id-from-request-if-present", "type": "the-response-type", "the-response-type": { ...object defining the response... } } Example: { "id": "123-abc", "type": "sampleresponse", "sampleresponse": { "hello": "world!" } } ## Errors The server can send error messages as a response to any request the client has sent. Message format: { "id": "unique-request-id-from-request-if-present", "type": "error", "error": { "code": "the-internal-message-id", "message": "human-readable-error-message", "details": { ...optional additional details... } } } ## Backend requests For some messages, the signaling server has to perform a request to the Nextcloud backend (e.g. to validate the user authentication). The backend must be able to verify the request to make sure it is coming from a valid signaling server. Also the Nextcloud backend can send requests to the signaling server to notify about events related to a room or user (e.g. a user is no longer invited to a room). Here the signaling server must be able to verify the request to check if it is coming from a valid Nextcloud instance. Therefore all backend requests, either from the signaling server or vice versa must contain two additional HTTP headers: - `Spreed-Signaling-Random`: Random string of at least 32 bytes. - `Spreed-Signaling-Checksum`: SHA256-HMAC of the random string and the request body, calculated with a shared secret. The shared secret is configured on both sides, so the checksum can be verified. - `Spreed-Signaling-Backend`: Base URL of the Nextcloud server performing the request. ### Example - Request body: `{"type":"auth","auth":{"version":"1.0","params":{"hello":"world"}}}` - Random: `afb6b872ab03e3376b31bf0af601067222ff7990335ca02d327071b73c0119c6` - Shared secret: `MySecretValue` - Calculated checksum: `3c4a69ff328299803ac2879614b707c807b4758cf19450755c60656cac46e3bc` ## Welcome message When a client connects, the server will immediately send a `welcome` message to notify the client about supported features. This is available if the server supports the `welcome` feature id. Message format (Server -> Client): { "type": "welcome", "welcome": { "features": ["optional", "list, "of", "feature", "ids"], ...additional information about the server... } } ## Establish connection This must be the first request by a newly connected client and is used to authenticate the connection. No other messages can be sent without a successful `hello` handshake. Message format (Client -> Server): { "id": "unique-request-id", "type": "hello", "hello": { "version": "the-protocol-version", "features": ["optional", "list, "of", "client", "feature", "ids"], "auth": { "url": "the-url-to-the-auth-backend", "params": { ...object containing auth params... } } } } Message format (Server -> Client): { "id": "unique-request-id-from-request", "type": "hello", "hello": { "sessionid": "the-unique-session-id", "resumeid": "the-unique-resume-id", "userid": "the-user-id-for-known-users", "version": "the-protocol-version", "server": { "features": ["optional", "list, "of", "feature", "ids"], ...additional information about the server... } } } Please note that the `server` entry is deprecated and will be removed in a future version. Clients should use the data from the [`welcome` message](#welcome-message) instead. ### Protocol version "1.0" For protocol version `1.0` in the `hello` request, the `params` from the `auth` field are sent to the Nextcloud backend for [validation](#backend-validation). ### Protocol version "2.0" For protocol version `2.0` in the `hello` request, the `params` from the `auth` field must contain a `token` entry containing a [JWT](https://jwt.io/). The JWT must contain the following fields: - `iss`: URL of the Nextcloud server that issued the token. - `iat`: Timestamp when the token has been issued. - `exp`: Timestamp of the token expiration. - `sub`: User Id (if known). - `userdata`: Optional JSON containing more user data. It must be signed with an RSA, ECDSA or Ed25519 key. Example token: ``` eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiJ9.eyJpc3MiOiJodHRwczovL25leHRjbG91ZC1tYXN0ZXIubG9jYWwvIiwiaWF0IjoxNjU0ODQyMDgwLCJleHAiOjE2NTQ4NDIzODAsInN1YiI6ImFkbWluIiwidXNlcmRhdGEiOnsiZGlzcGxheW5hbWUiOiJBZG1pbmlzdHJhdG9yIn19.5rV0jh89_0fG2L-BUPtciu1q49PoYkLboj33EOdD0qQeYcvE7_di2r5WXM1WmKUCOGeX3hzn6qldDMrJBNuxvQ ``` Example public key: ``` -----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIoCsNSCXyxK25zvSKRio0uiBzwub ONq3tiGTPZo3p2Ogn6wAhhsuSxbFuUQDWMX7Tsu9fDzVdwpRHPT4y3V9cA== -----END PUBLIC KEY----- ``` Example payload: ``` { "iss": "https://nextcloud-master.local/", "iat": 1654842080, "exp": 1654842380, "sub": "admin", "userdata": { "displayname": "Administrator" } } ``` The public key is retrieved from the capabilities of the Nextcloud instance in `config` key `hello-v2-token-key` inside `signaling`. ``` "spreed": { "features": [ "audio", "video", "chat-v2", "conversation-v4", ... ], "config": { … "signaling": { "hello-v2-token-key": "-----BEGIN RSA PUBLIC KEY----- ..." } } }, ``` ### Backend validation For `hello` protocol version `1.0`, the server validates the connection request against the passed auth backend (needs to make sure the passed url / hostname is in a whitelist). It performs a POST request and passes the provided `params` as JSON payload in the body of the request. Message format (Server -> Auth backend): { "type": "auth", "auth": { "version": "the-protocol-version-must-be-1.0", "params": { ...object containing auth params from hello request... } } } If the auth params are valid, the backend returns information about the user that is connecting (as JSON response). Message format (Auth backend -> Server): { "type": "auth", "auth": { "version": "the-protocol-version-must-be-1.0", "userid": "the-user-id-for-known-users", "user": { ...additional data of the user... } } } Anonymous connections that are not mapped to a user in Nextcloud will have an empty or omitted `userid` field in the response. If the connection can not be authorized, the backend returns an error and the hello request will be rejected. ### Error codes - `unsupported-version`: The requested version is not supported. - `auth-failed`: The session could not be authenticated. - `too-many-sessions`: Too many sessions exist for this user id. - `invalid_backend`: The requested backend URL is not supported. - `invalid_client_type`: The [client type](#client-types) is not supported. - `invalid_token`: The passed token is invalid (can happen for [client type `internal`](#client-type-internal)). ### Client types In order to support clients with different functionality on the server, an optional `type` can be specified in the `auth` struct when connecting to the server. If no `type` is present, the default value `client` will be used and a regular "user" client is created internally. Message format (Client -> Server): { "id": "unique-request-id", "type": "hello", "hello": { "version": "the-protocol-version", "features": ["optional", "list, "of", "client", "feature", "ids"], "auth": { "type": "the-client-type", ...other attributes depending on the client type... "params": { ...object containing auth params... } } } } The key `params` is required for all client types, other keys depend on the `type` value. #### Client type `client` (default) For the client type `client` (which is the default if no `type` is given), the URL to the backend server for this client must be given as described above. This client type must be supported by all server implementations of the signaling protocol. #### Client type `internal` "Internal" clients are used for connections from internal services where the connection doesn't map to a user (or session) in Nextcloud. These clients can skip some internal validations, e.g. they can join any room, even if they have not been invited (which is not possible as the client doesn't map to a user). This client type is not required to be supported by server implementations of the signaling protocol, but some additional services might not work without "internal" clients. To authenticate the connection, the `params` struct must contain keys `random` (containing any random string of at least 32 bytes) and `token` containing the SHA-256 HMAC of `random` with a secret that is shared between the signaling server and the service connecting to it. ## Resuming sessions If a connection was interrupted for a client, the server may decide to keep the session alive for a short time, so the client can reconnect and resume the session. In this case, no complete `hello` handshake is required and a client can use a shorter `hello` request. On success, the session will resume as if no interruption happened, i.e. the client will stay in his room and will get all messages from the time the interruption happened. Message format (Client -> Server): { "id": "unique-request-id", "type": "hello", "hello": { "version": "the-protocol-version", "resumeid": "the-resume-id-from-the-original-hello-response" } } Message format (Server -> Client): { "id": "unique-request-id-from-request", "type": "hello", "hello": { "sessionid": "the-unique-session-id", "version": "the-protocol-version" } } If the session is no longer valid (e.g. because the resume was too late), the server will return an error and a normal `hello` handshake has to be performed. ### Error codes - `no_such_session`: The session id is no longer valid. ## Releasing sessions By default, the signaling server tries to maintain the session so clients can resume it in case of intermittent connection problems. To support cases where a client wants to close the connection and release all session data, he can send a `bye` message so the server knows he doesn't need to keep data for resuming. Message format (Client -> Server): { "id": "unique-request-id", "type": "bye", "bye": {} } Message format (Server -> Client): { "id": "unique-request-id-from-request", "type": "bye", "bye": {} } After the `bye` has been confirmed, the session can no longer be used. ## Join room After joining the room through the PHP backend, the room must be changed on the signaling server, too. Message format (Client -> Server): { "id": "unique-request-id", "type": "room", "room": { "roomid": "the-room-id", "sessionid": "the-nextcloud-session-id" } } - The client can ask about joining a room using this request. - The session id received from the PHP backend must be passed as `sessionid`. - The `roomid` can be empty to leave the room. - A session can only be connected to one room, i.e. joining a room will leave the room currently in. Message format (Server -> Client): { "id": "unique-request-id-from-request", "type": "room", "room": { "roomid": "the-room-id", "properties": { ...additional room properties... } } } - Sent to confirm a request from the client. - The `roomid` will be empty if the client is no longer in a room. - Can be sent without a request if the server moves a client to a room / out of the current room or the properties of a room change. Message format (Server -> Client if already joined before): { "id": "unique-request-id-from-request", "type": "error", "error": { "code": "already_joined", "message": "Human readable error message", "details": { "roomid": "the-room-id", "properties": { ...additional room properties... } } } } - Sent if a client tried to join a room it is already in. ### Backend validation Rooms are managed by the Nextcloud backend, so the signaling server has to verify that a room exists and a user is allowed to join it. Message format (Server -> Room backend): { "type": "room", "room": { "version": "the-protocol-version-must-be-1.0", "roomid": "the-room-id", "userid": "the-user-id-for-known-users", "sessionid": "the-nextcloud-session-id", "action": "join-or-leave" } } The `userid` is empty or omitted for anonymous sessions that don't belong to a user in Nextcloud. Message format (Room backend -> Server): { "type": "room", "room": { "version": "the-protocol-version-must-be-1.0", "roomid": "the-room-id", "properties": { ...additional room properties... } } } If the room does not exist or can not be joined by the given (or anonymous) user, the backend returns an error and the room request will be rejected. ### Error codes - `no_such_room`: The requested room does not exist or the user is not invited to the room. ## Leave room To leave a room, a [join room](#join-room) message must be sent with an empty `roomid` parameter. ## Room events When users join or leave a room, the server generates events that are sent to all sessions in that room. Such events are also sent to users joining a room as initial list of users in the room. Multiple user joins/leaves can be batched into one event to reduce the message overhead. Message format (Server -> Client, user(s) joined): { "type": "event" "event": { "target": "room", "type": "join", "join": [ ...list of session objects that joined the room... ] } } Room event session object: { "sessionid": "the-unique-session-id", "userid": "the-user-id-for-known-users", "user": { ...additional data of the user as received from the auth backend... } } Message format (Server -> Client, user(s) left): { "type": "event" "event": { "target": "room", "type": "leave", "leave": [ ...list of session ids that left the room... ] } } Message format (Server -> Client, user(s) changed): { "type": "event" "event": { "target": "room", "type": "change", "change": [ ...list of sessions that have changed... ] } } ## Room list events When users are invited to rooms or are disinvited from them, they get notified so they can update the list of available rooms. Message format (Server -> Client, invited to room): { "type": "event" "event": { "target": "roomlist", "type": "invite", "invite": [ "roomid": "the-room-id", "properties": [ ...additional room properties... ] ] } } Message format (Server -> Client, disinvited from room): { "type": "event" "event": { "target": "roomlist", "type": "disinvite", "disinvite": [ "roomid": "the-room-id" ] } } Message format (Server -> Client, room updated): { "type": "event" "event": { "target": "roomlist", "type": "update", "update": [ "roomid": "the-room-id", "properties": [ ...additional room properties... ] ] } } ## Participants list events When the list of participants or flags of a participant in a room changes, an event is triggered by the server so clients can update their UI accordingly or trigger actions like starting calls with other peers. Message format (Server -> Client, participants change): { "type": "event" "event": { "target": "participants", "type": "update", "update": [ "roomid": "the-room-id", "users": [ ...list of changed participant objects... ] ] } } If a participant has the `inCall` flag set, he has joined the call of the room and a WebRTC peerconnection should be established if the local client is also in the call. In that case the participant information will contain properties for both the signaling session id (`sessionId`) and the Nextcloud session id (`nextcloudSessionId`). ### All participants "incall" changed events When the `inCall` flag of all participants is changed from the backend (see [backend request](#in-call-state-of-all-participants-changed) below), a dedicated event is sent that doesn't include information on all participants, but an `all` flag. Message format (Server -> Client, incall change): { "type": "event" "event": { "target": "participants", "type": "update", "update": [ "roomid": "the-room-id", "incall": new-incall-state, "all": true ] } } ## Room messages The server can notify clients about events that happened in a room. Currently such messages are only sent out when chat messages are posted to notify clients they should load the new messages. Message format (Server -> Client, chat messages available): { "type": "event" "event": { "target": "room", "type": "message", "message": { "roomid": "the-room-id", "data": { "type": "chat", "chat": { "refresh": true } } } } } ## Sending messages between clients Messages between clients are sent realtime and not stored by the server, i.e. they are only delivered if the recipient is currently connected. This also applies to rooms, where only sessions currently in the room will receive the messages, but not if they join at a later time. Use this for establishing WebRTC connections between peers, i.e. sending offers, answers and candidates. Message format (Client -> Server, to other sessions): { "id": "unique-request-id", "type": "message", "message": { "recipient": { "type": "session", "sessionid": "the-session-id-to-send-to" }, "data": { ...object containing the data to send... } } } Message format (Client -> Server, to all sessions of a user): { "id": "unique-request-id", "type": "message", "message": { "recipient": { "type": "user", "userid": "the-user-id-to-send-to" }, "data": { ...object containing the data to send... } } } Message format (Client -> Server, to all sessions in the same room): { "id": "unique-request-id", "type": "message", "message": { "recipient": { "type": "room" }, "data": { ...object containing the data to send... } } } Message format (Server -> Client, receive message) { "type": "message", "message": { "sender": { "type": "the-type-when-sending", "sessionid": "the-session-id-of-the-sender", "userid": "the-user-id-of-the-sender" }, "data": { ...object containing the data of the message... } } } - The `userid` is omitted if a message was sent by an anonymous user. ## Control messages Similar to regular messages between clients which can be sent by any session, messages with type `control` can only be sent if the permission flag `control` is available. These messages can be used to perform actions on clients that should only be possible by some users (e.g. moderators). Message format (Client -> Server, mute phone): { "id": "unique-request-id", "type": "control", "control": { "recipient": { "type": "session", "sessionid": "the-session-id-to-send-to" }, "data": { "type": "mute", "audio": "audio-flags" } } } The bit-field `audio-flags` supports the following bits: - `1`: mute speaking (i.e. phone can no longer talk) - `2`: mute listening (i.e. phone is on hold and can no longer hear) To unmute, a value of `0` must be sent. Message format (Client -> Server, hangup phone): { "id": "unique-request-id", "type": "control", "control": { "recipient": { "type": "session", "sessionid": "the-session-id-to-send-to" }, "data": { "type": "hangup" } } } Message format (Client -> Server, send DTMF): { "id": "unique-request-id", "type": "control", "control": { "recipient": { "type": "session", "sessionid": "the-session-id-to-send-to" }, "data": { "type": "dtmf", "digit": "the-digit" } } } Supported digits are `0`-`9`, `*` and `#`. ## Transient data Transient data can be used to share data in a room that is valid while sessions are still connected to the room. This can be used for example to have a shared state in a meeting without having each client to request data from the Nextcloud server. The data is automatically cleared when the last session disconnects. Sessions must be in a room and need the permission flag `transient-data` in order to set or remove values. All sessions in a room automatically receive all transient data update events. Transient data is supported if the server returns the `transient-data` feature id in the [hello response](#establish-connection). ### Set value Message format (Client -> Server): { "type": "transient", "transient": { "type": "set", "key": "sample-key", "value": "any-json-object", "ttl": "optional-ttl" } } - The `key` must be a string. - The `value` can be of any type (i.e. string, number, array, object, etc.). - The `ttl` is the time to live in nanoseconds. The value will be removed after that time (if it is still present). - Requests to set a value that is already present for the key are silently ignored. Any TTL value will be updated / removed. Message format (Server -> Client): { "type": "transient", "transient": { "type": "set", "key": "sample-key", "value": "any-json-object", "oldvalue": "the-previous-value-if-any" } } - The `oldvalue` is only present if a previous value was stored for the key. ### Remove value Message format (Client -> Server): { "type": "transient", "transient": { "type": "remove", "key": "sample-key" } } - The `key` must be a string. - Requests to remove a key that doesn't exist are silently ignored. Message format (Server -> Client): { "type": "transient", "transient": { "type": "remove", "key": "sample-key", "oldvalue": "the-previous-value-if-any" } } - The `oldvalue` is only present if a previous value was stored for the key. ### Initial data When sessions initially join a room, they receive the current state of the transient data. Message format (Server -> Client): { "type": "transient", "transient": { "type": "initial", "data": { "sample-key": "sample-value", ... } } } ## Internal clients Internal clients can be used by third-party applications to perform tasks that a regular client can not be used. Examples are adding virtual sessions or sending media without a regular client connected. This is used for example by the SIP bridge to publish mixed phone audio and show "virtual" sessions for the individial phone calls. See above for details on how to connect as internal client. By default, internal clients have their "inCall" and the "publishing audio" flags set. Virtual sessions have their "inCall" and the "publishing phone" flags set. This can be changed by including the client feature flag `internal-incall` which will require the client to set the flags as necessary. ### Add virtual session Message format (Client -> Server): { "type": "internal", "internal": { "type": "addsession", "addsession": { "sessionid": "the-virtual-sessionid", "roomid": "the-room-id-to-add-the-session", "userid": "optional-user-id", "user": { ...additional data of the user... }, "flags": "optional-initial-flags", "incall": "optional-initial-incall", "options": { "actorId": "optional-actor-id", "actorType": "optional-actor-type", } } } } Phone sessions will have `type` set to `phone` in the additional user data (which will be included in the `joined` [room event](#room-events)), `callid` will be the id of the phone call and `number` the target of the call. The call id will match the one returned for accepted outgoing calls and the associated session id can be used to hangup a call or send DTMF tones to it. ### Update virtual session Message format (Client -> Server): { "type": "internal", "internal": { "type": "updatesession", "updatesession": { "sessionid": "the-virtual-sessionid", "roomid": "the-room-id-to-update-the-session", "flags": "optional-updated-flags", "incall": "optional-updated-incall" } } } ### Remove virtual session Message format (Client -> Server): { "type": "internal", "internal": { "type": "removesession", "removesession": { "sessionid": "the-virtual-sessionid", "roomid": "the-room-id-to-add-the-session", "userid": "optional-user-id" } } } ### Change inCall flags of internal client Message format (Client -> Server): { "type": "internal", "internal": { "type": "incall", "incall": { "incall": "the-incall-flags" } } } # Internal signaling server API The signaling server provides an internal API that can be called from Nextcloud to trigger events from the server side. ## Rooms API The base URL for the rooms API is `/api/vi/room/`, all requests must be sent as `POST` request with proper checksum headers as described above. ### New users invited to room This can be used to notify users that they are now invited to a room. Message format (Backend -> Server) { "type": "invite" "invite" { "userids": [ ...list of user ids that are now invited to the room... ], "alluserids": [ ...list of all user ids that invited to the room... ], "properties": [ ...additional room properties... ] } } ### Users no longer invited to room This can be used to notify users that they are no longer invited to a room. Message format (Backend -> Server) { "type": "disinvite" "disinvite" { "userids": [ ...list of user ids that are no longer invited to the room... ], "alluserids": [ ...list of all user ids that still invited to the room... ] } } ### Room updated This can be used to notify about changes to a room. The room properties are the same as described in section "Join room" above. Message format (Backend -> Server) { "type": "update" "update" { "userids": [ ...list of user ids that are invited to the room... ], "properties": [ ...additional room properties... ] } } ### Room deleted This can be used to notify about a deleted room. All sessions currently connected to the room will leave the room. Message format (Backend -> Server) { "type": "delete" "delete" { "userids": [ ...list of user ids that were invited to the room... ] } } ### Participants changed This can be used to notify about changed participants. Message format (Backend -> Server) { "type": "participants" "participants" { "changed": [ ...list of users that were changed... ], "users": [ ...list of users in the room... ] } } ### In call state of participants changed This can be used to notify about participants that changed their `inCall` flag. Message format (Backend -> Server) { "type": "incall" "incall" { "incall": new-incall-state, "changed": [ ...list of users that were changed... ], "users": [ ...list of users in the room... ] } } ### In call state of all participants changed This can be used to notify when all participants changed their `inCall` flag to the same new value (available if the server returns the `incall-all` feature id in the [hello response](#establish-connection)). Message format (Backend -> Server) { "type": "incall" "incall" { "incall": new-incall-state, "all": true } } ### Send an arbitrary room message This can be used to send arbitrary messages to participants in a room. It is currently used to notify about new chat messages. Message format (Backend -> Server) { "type": "message" "message" { "data": { ...arbitrary object to sent to clients... } } } ### Notify sessions to switch to a different room This can be used to let sessions in a room know that they switch to a different room (available if the server returns the `switchto` feature). The session ids sent should be the Talk room session ids. Message format (Backend -> Server, no additional details) { "type": "switchto" "switchto" { "roomid": "target-room-id", "sessions": [ "the-nextcloud-session-id-1", "the-nextcloud-session-id-2", ] } } Message format (Backend -> Server, with additional details) { "type": "switchto" "switchto" { "roomid": "target-room-id", "sessions": { "the-nextcloud-session-id-1": { ...arbitrary object to sent to clients... }, "the-nextcloud-session-id-2": null } } } The signaling server will sent messages to the sessions mentioned in the received `switchto` event. If a details object was included for a session, it will be forwarded in the client message, otherwise the `details` will be omitted. Message format (Server -> Client): { "type": "event" "event": { "target": "room", "type": "switchto", "switchto": { "roomid": "target-room-id", "details": { ...arbitrary object to sent to clients... } } } } Clients are expected to follow the `switchto` message. If clients don't switch to the target room after some time, they might get disconnected. ### Start dialout from a room Use this to start a phone dialout to a new user in a given room. Message format (Backend -> Server) { "type": "dialout" "dialout" { "number": "e164-target-number", "options": { ...arbitrary options that will be sent back to validate... } } } Please note that this requires a connected internal client that supports dialout (e.g. the SIP bridge). Message format (Server -> Backend, request was accepted) { "type": "dialout" "dialout" { "callid": "the-unique-call-id" } } Message format (Server -> Backend, request could not be processed) { "type": "dialout" "dialout" { "error": { "code": "the-internal-message-id", "message": "human-readable-error-message", "details": { ...optional additional details... } } } } A HTTP error status code will be set in this case. nextcloud-spreed-signaling-1.2.4/etcd_client.go000066400000000000000000000157561460321600400215720ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "fmt" "log" "strings" "sync" "sync/atomic" "time" "github.com/dlintw/goconf" "go.etcd.io/etcd/client/pkg/v3/srv" "go.etcd.io/etcd/client/pkg/v3/transport" clientv3 "go.etcd.io/etcd/client/v3" ) type EtcdClientListener interface { EtcdClientCreated(client *EtcdClient) } type EtcdClientWatcher interface { EtcdWatchCreated(client *EtcdClient, key string) EtcdKeyUpdated(client *EtcdClient, key string, value []byte) EtcdKeyDeleted(client *EtcdClient, key string) } type EtcdClient struct { compatSection string mu sync.Mutex client atomic.Value listeners map[EtcdClientListener]bool } func NewEtcdClient(config *goconf.ConfigFile, compatSection string) (*EtcdClient, error) { result := &EtcdClient{ compatSection: compatSection, } if err := result.load(config, false); err != nil { return nil, err } return result, nil } func (c *EtcdClient) getConfigStringWithFallback(config *goconf.ConfigFile, option string) string { value, _ := config.GetString("etcd", option) if value == "" && c.compatSection != "" { value, _ = config.GetString(c.compatSection, option) if value != "" { log.Printf("WARNING: Configuring etcd option \"%s\" in section \"%s\" is deprecated, use section \"etcd\" instead", option, c.compatSection) } } return value } func (c *EtcdClient) load(config *goconf.ConfigFile, ignoreErrors bool) error { var endpoints []string if endpointsString := c.getConfigStringWithFallback(config, "endpoints"); endpointsString != "" { for _, ep := range strings.Split(endpointsString, ",") { ep := strings.TrimSpace(ep) if ep != "" { endpoints = append(endpoints, ep) } } } else if discoverySrv := c.getConfigStringWithFallback(config, "discoverysrv"); discoverySrv != "" { discoveryService := c.getConfigStringWithFallback(config, "discoveryservice") clients, err := srv.GetClient("etcd-client", discoverySrv, discoveryService) if err != nil { if !ignoreErrors { return fmt.Errorf("Could not discover etcd endpoints for %s: %w", discoverySrv, err) } } else { endpoints = clients.Endpoints } } if len(endpoints) == 0 { if !ignoreErrors { return nil } log.Printf("No etcd endpoints configured, not changing client") } else { cfg := clientv3.Config{ Endpoints: endpoints, // set timeout per request to fail fast when the target endpoint is unavailable DialTimeout: time.Second, } clientKey := c.getConfigStringWithFallback(config, "clientkey") clientCert := c.getConfigStringWithFallback(config, "clientcert") caCert := c.getConfigStringWithFallback(config, "cacert") if clientKey != "" && clientCert != "" && caCert != "" { tlsInfo := transport.TLSInfo{ CertFile: clientCert, KeyFile: clientKey, TrustedCAFile: caCert, } tlsConfig, err := tlsInfo.ClientConfig() if err != nil { if !ignoreErrors { return fmt.Errorf("Could not setup etcd TLS configuration: %w", err) } log.Printf("Could not setup TLS configuration, will be disabled (%s)", err) } else { cfg.TLS = tlsConfig } } client, err := clientv3.New(cfg) if err != nil { if !ignoreErrors { return err } log.Printf("Could not create new client from etd endpoints %+v: %s", endpoints, err) } else { prev := c.getEtcdClient() if prev != nil { prev.Close() } c.client.Store(client) log.Printf("Using etcd endpoints %+v", endpoints) c.notifyListeners() } } return nil } func (c *EtcdClient) Close() error { client := c.getEtcdClient() if client != nil { return client.Close() } return nil } func (c *EtcdClient) IsConfigured() bool { return c.getEtcdClient() != nil } func (c *EtcdClient) getEtcdClient() *clientv3.Client { client := c.client.Load() if client == nil { return nil } return client.(*clientv3.Client) } func (c *EtcdClient) syncClient() error { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() return c.getEtcdClient().Sync(ctx) } func (c *EtcdClient) notifyListeners() { c.mu.Lock() defer c.mu.Unlock() for listener := range c.listeners { listener.EtcdClientCreated(c) } } func (c *EtcdClient) AddListener(listener EtcdClientListener) { c.mu.Lock() defer c.mu.Unlock() if c.listeners == nil { c.listeners = make(map[EtcdClientListener]bool) } c.listeners[listener] = true if client := c.getEtcdClient(); client != nil { go listener.EtcdClientCreated(c) } } func (c *EtcdClient) RemoveListener(listener EtcdClientListener) { c.mu.Lock() defer c.mu.Unlock() delete(c.listeners, listener) } func (c *EtcdClient) WaitForConnection(ctx context.Context) error { backoff, err := NewExponentialBackoff(initialWaitDelay, maxWaitDelay) if err != nil { return err } for { if err := ctx.Err(); err != nil { return err } if err := c.syncClient(); err != nil { if err == context.DeadlineExceeded { log.Printf("Timeout waiting for etcd client to connect to the cluster, retry in %s", backoff.NextWait()) } else { log.Printf("Could not sync etcd client with the cluster, retry in %s: %s", backoff.NextWait(), err) } backoff.Wait(ctx) continue } log.Printf("Client synced, using endpoints %+v", c.getEtcdClient().Endpoints()) return nil } } func (c *EtcdClient) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { return c.getEtcdClient().Get(ctx, key, opts...) } func (c *EtcdClient) Watch(ctx context.Context, key string, watcher EtcdClientWatcher, opts ...clientv3.OpOption) error { log.Printf("Wait for leader and start watching on %s", key) ch := c.getEtcdClient().Watch(clientv3.WithRequireLeader(ctx), key, opts...) log.Printf("Watch created for %s", key) watcher.EtcdWatchCreated(c, key) for response := range ch { if err := response.Err(); err != nil { return err } for _, ev := range response.Events { switch ev.Type { case clientv3.EventTypePut: watcher.EtcdKeyUpdated(c, string(ev.Kv.Key), ev.Kv.Value) case clientv3.EventTypeDelete: watcher.EtcdKeyDeleted(c, string(ev.Kv.Key)) default: log.Printf("Unsupported watch event %s %q -> %q", ev.Type, ev.Kv.Key, ev.Kv.Value) } } } return nil } nextcloud-spreed-signaling-1.2.4/etcd_client_test.go000066400000000000000000000204741460321600400226220ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "errors" "net" "net/url" "os" "runtime" "strconv" "sync" "syscall" "testing" "time" "github.com/dlintw/goconf" "go.etcd.io/etcd/api/v3/mvccpb" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/embed" "go.etcd.io/etcd/server/v3/lease" ) var ( etcdListenUrl = "http://localhost:8080" ) func isErrorAddressAlreadyInUse(err error) bool { var eOsSyscall *os.SyscallError if !errors.As(err, &eOsSyscall) { return false } var errErrno syscall.Errno // doesn't need a "*" (ptr) because it's already a ptr (uintptr) if !errors.As(eOsSyscall, &errErrno) { return false } if errErrno == syscall.EADDRINUSE { return true } const WSAEADDRINUSE = 10048 if runtime.GOOS == "windows" && errErrno == WSAEADDRINUSE { return true } return false } func NewEtcdForTest(t *testing.T) *embed.Etcd { cfg := embed.NewConfig() cfg.Dir = t.TempDir() os.Chmod(cfg.Dir, 0700) // nolint cfg.LogLevel = "warn" u, err := url.Parse(etcdListenUrl) if err != nil { t.Fatal(err) } // Find a free port to bind the server to. var etcd *embed.Etcd for port := 50000; port < 50100; port++ { u.Host = net.JoinHostPort("localhost", strconv.Itoa(port)) cfg.ListenClientUrls = []url.URL{*u} cfg.AdvertiseClientUrls = []url.URL{*u} httpListener := u httpListener.Host = net.JoinHostPort("localhost", strconv.Itoa(port+1)) cfg.ListenClientHttpUrls = []url.URL{*httpListener} peerListener := u peerListener.Host = net.JoinHostPort("localhost", strconv.Itoa(port+2)) cfg.ListenPeerUrls = []url.URL{*peerListener} cfg.AdvertisePeerUrls = []url.URL{*peerListener} cfg.InitialCluster = "default=" + peerListener.String() etcd, err = embed.StartEtcd(cfg) if isErrorAddressAlreadyInUse(err) { continue } else if err != nil { t.Fatal(err) } break } if etcd == nil { t.Fatal("could not find free port") } t.Cleanup(func() { etcd.Close() }) // Wait for server to be ready. <-etcd.Server.ReadyNotify() return etcd } func NewEtcdClientForTest(t *testing.T) (*embed.Etcd, *EtcdClient) { etcd := NewEtcdForTest(t) config := goconf.NewConfigFile() config.AddOption("etcd", "endpoints", etcd.Config().ListenClientUrls[0].String()) client, err := NewEtcdClient(config, "") if err != nil { t.Fatal(err) } t.Cleanup(func() { if err := client.Close(); err != nil { t.Error(err) } }) return etcd, client } func SetEtcdValue(etcd *embed.Etcd, key string, value []byte) { if kv := etcd.Server.KV(); kv != nil { kv.Put([]byte(key), value, lease.NoLease) kv.Commit() } } func DeleteEtcdValue(etcd *embed.Etcd, key string) { if kv := etcd.Server.KV(); kv != nil { kv.DeleteRange([]byte(key), nil) kv.Commit() } } func Test_EtcdClient_Get(t *testing.T) { etcd, client := NewEtcdClientForTest(t) if response, err := client.Get(context.Background(), "foo"); err != nil { t.Error(err) } else if response.Count != 0 { t.Errorf("expected 0 response, got %d", response.Count) } SetEtcdValue(etcd, "foo", []byte("bar")) if response, err := client.Get(context.Background(), "foo"); err != nil { t.Error(err) } else if response.Count != 1 { t.Errorf("expected 1 responses, got %d", response.Count) } else if string(response.Kvs[0].Key) != "foo" { t.Errorf("expected key \"foo\", got \"%s\"", string(response.Kvs[0].Key)) } else if string(response.Kvs[0].Value) != "bar" { t.Errorf("expected value \"bar\", got \"%s\"", string(response.Kvs[0].Value)) } } func Test_EtcdClient_GetPrefix(t *testing.T) { etcd, client := NewEtcdClientForTest(t) if response, err := client.Get(context.Background(), "foo"); err != nil { t.Error(err) } else if response.Count != 0 { t.Errorf("expected 0 response, got %d", response.Count) } SetEtcdValue(etcd, "foo", []byte("1")) SetEtcdValue(etcd, "foo/lala", []byte("2")) SetEtcdValue(etcd, "lala/foo", []byte("3")) if response, err := client.Get(context.Background(), "foo", clientv3.WithPrefix()); err != nil { t.Error(err) } else if response.Count != 2 { t.Errorf("expected 2 responses, got %d", response.Count) } else if string(response.Kvs[0].Key) != "foo" { t.Errorf("expected key \"foo\", got \"%s\"", string(response.Kvs[0].Key)) } else if string(response.Kvs[0].Value) != "1" { t.Errorf("expected value \"1\", got \"%s\"", string(response.Kvs[0].Value)) } else if string(response.Kvs[1].Key) != "foo/lala" { t.Errorf("expected key \"foo/lala\", got \"%s\"", string(response.Kvs[1].Key)) } else if string(response.Kvs[1].Value) != "2" { t.Errorf("expected value \"2\", got \"%s\"", string(response.Kvs[1].Value)) } } type etcdEvent struct { t mvccpb.Event_EventType key string value string } type EtcdClientTestListener struct { t *testing.T ctx context.Context cancel context.CancelFunc initial chan struct{} initialWg sync.WaitGroup events chan etcdEvent } func NewEtcdClientTestListener(ctx context.Context, t *testing.T) *EtcdClientTestListener { ctx, cancel := context.WithCancel(ctx) return &EtcdClientTestListener{ t: t, ctx: ctx, cancel: cancel, initial: make(chan struct{}), events: make(chan etcdEvent), } } func (l *EtcdClientTestListener) Close() { l.cancel() } func (l *EtcdClientTestListener) EtcdClientCreated(client *EtcdClient) { l.initialWg.Add(1) go func() { if err := client.Watch(clientv3.WithRequireLeader(l.ctx), "foo", l, clientv3.WithPrefix()); err != nil { l.t.Error(err) } }() go func() { defer close(l.initial) if err := client.WaitForConnection(l.ctx); err != nil { l.t.Errorf("error waiting for connection: %s", err) return } ctx, cancel := context.WithTimeout(l.ctx, time.Second) defer cancel() if response, err := client.Get(ctx, "foo", clientv3.WithPrefix()); err != nil { l.t.Error(err) } else if response.Count != 1 { l.t.Errorf("expected 1 responses, got %d", response.Count) } else if string(response.Kvs[0].Key) != "foo/a" { l.t.Errorf("expected key \"foo/a\", got \"%s\"", string(response.Kvs[0].Key)) } else if string(response.Kvs[0].Value) != "1" { l.t.Errorf("expected value \"1\", got \"%s\"", string(response.Kvs[0].Value)) } l.initialWg.Wait() }() } func (l *EtcdClientTestListener) EtcdWatchCreated(client *EtcdClient, key string) { l.initialWg.Done() } func (l *EtcdClientTestListener) EtcdKeyUpdated(client *EtcdClient, key string, value []byte) { l.events <- etcdEvent{ t: clientv3.EventTypePut, key: string(key), value: string(value), } } func (l *EtcdClientTestListener) EtcdKeyDeleted(client *EtcdClient, key string) { l.events <- etcdEvent{ t: clientv3.EventTypeDelete, key: string(key), } } func Test_EtcdClient_Watch(t *testing.T) { etcd, client := NewEtcdClientForTest(t) SetEtcdValue(etcd, "foo/a", []byte("1")) listener := NewEtcdClientTestListener(context.Background(), t) defer listener.Close() client.AddListener(listener) defer client.RemoveListener(listener) <-listener.initial SetEtcdValue(etcd, "foo/b", []byte("2")) event := <-listener.events if event.t != clientv3.EventTypePut { t.Errorf("expected type %d, got %d", clientv3.EventTypePut, event.t) } else if event.key != "foo/b" { t.Errorf("expected key %s, got %s", "foo/b", event.key) } else if event.value != "2" { t.Errorf("expected value %s, got %s", "2", event.value) } DeleteEtcdValue(etcd, "foo/a") event = <-listener.events if event.t != clientv3.EventTypeDelete { t.Errorf("expected type %d, got %d", clientv3.EventTypeDelete, event.t) } else if event.key != "foo/a" { t.Errorf("expected key %s, got %s", "foo/a", event.key) } } nextcloud-spreed-signaling-1.2.4/file_watcher.go000066400000000000000000000070201460321600400217320ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2024 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "errors" "log" "os" "path" "path/filepath" "strings" "sync" "sync/atomic" "time" "github.com/fsnotify/fsnotify" ) const ( defaultDeduplicateWatchEvents = 100 * time.Millisecond ) var ( deduplicateWatchEvents atomic.Int64 ) func init() { deduplicateWatchEvents.Store(int64(defaultDeduplicateWatchEvents)) } type FileWatcherCallback func(filename string) type FileWatcher struct { filename string target string callback FileWatcherCallback watcher *fsnotify.Watcher } func NewFileWatcher(filename string, callback FileWatcherCallback) (*FileWatcher, error) { realFilename, err := filepath.EvalSymlinks(filename) if err != nil { return nil, err } watcher, err := fsnotify.NewWatcher() if err != nil { return nil, err } if err := watcher.Add(realFilename); err != nil { watcher.Close() // nolint return nil, err } if filename != realFilename { if err := watcher.Add(path.Dir(filename)); err != nil { watcher.Close() // nolint return nil, err } } w := &FileWatcher{ filename: filename, target: realFilename, callback: callback, watcher: watcher, } go w.run() return w, nil } func (f *FileWatcher) Close() error { return f.watcher.Close() } func (f *FileWatcher) run() { var mu sync.Mutex timers := make(map[string]*time.Timer) triggerEvent := func(event fsnotify.Event) { deduplicate := time.Duration(deduplicateWatchEvents.Load()) if deduplicate <= 0 { f.callback(f.filename) return } // Use timer to deduplicate multiple events for the same file. mu.Lock() t, found := timers[event.Name] mu.Unlock() if !found { t = time.AfterFunc(deduplicate, func() { f.callback(f.filename) mu.Lock() delete(timers, event.Name) mu.Unlock() }) mu.Lock() timers[event.Name] = t mu.Unlock() } else { t.Reset(deduplicate) } } for { select { case event := <-f.watcher.Events: if !event.Has(fsnotify.Write) && !event.Has(fsnotify.Create) && !event.Has(fsnotify.Rename) { continue } if stat, err := os.Lstat(event.Name); err != nil { if !errors.Is(err, os.ErrNotExist) { log.Printf("Could not lstat %s: %s", event.Name, err) } } else if stat.Mode()&os.ModeSymlink != 0 { target, err := filepath.EvalSymlinks(event.Name) if err == nil && target != f.target && strings.HasSuffix(event.Name, f.filename) { f.target = target triggerEvent(event) } continue } if strings.HasSuffix(event.Name, f.filename) || strings.HasSuffix(event.Name, f.target) { triggerEvent(event) } case err := <-f.watcher.Errors: if err == nil { return } log.Printf("Error watching %s: %s", f.filename, err) } } } nextcloud-spreed-signaling-1.2.4/file_watcher_test.go000066400000000000000000000124661460321600400230030ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2024 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "errors" "os" "path" "testing" ) var ( testWatcherNoEventTimeout = 2 * defaultDeduplicateWatchEvents ) func TestFileWatcher_NotExist(t *testing.T) { tmpdir := t.TempDir() w, err := NewFileWatcher(path.Join(tmpdir, "test.txt"), func(filename string) {}) if err == nil { t.Error("should not be able to watch non-existing files") if err := w.Close(); err != nil { t.Error(err) } } else if !errors.Is(err, os.ErrNotExist) { t.Error(err) } } func TestFileWatcher_File(t *testing.T) { tmpdir := t.TempDir() filename := path.Join(tmpdir, "test.txt") if err := os.WriteFile(filename, []byte("Hello world!"), 0644); err != nil { t.Fatal(err) } modified := make(chan struct{}) w, err := NewFileWatcher(filename, func(filename string) { modified <- struct{}{} }) if err != nil { t.Fatal(err) } defer w.Close() if err := os.WriteFile(filename, []byte("Updated"), 0644); err != nil { t.Fatal(err) } <-modified ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout) defer cancel() select { case <-modified: t.Error("should not have received another event") case <-ctxTimeout.Done(): } if err := os.WriteFile(filename, []byte("Updated"), 0644); err != nil { t.Fatal(err) } <-modified ctxTimeout, cancel = context.WithTimeout(context.Background(), testWatcherNoEventTimeout) defer cancel() select { case <-modified: t.Error("should not have received another event") case <-ctxTimeout.Done(): } } func TestFileWatcher_Symlink(t *testing.T) { tmpdir := t.TempDir() sourceFilename := path.Join(tmpdir, "test1.txt") if err := os.WriteFile(sourceFilename, []byte("Hello world!"), 0644); err != nil { t.Fatal(err) } filename := path.Join(tmpdir, "symlink.txt") if err := os.Symlink(sourceFilename, filename); err != nil { t.Fatal(err) } modified := make(chan struct{}) w, err := NewFileWatcher(filename, func(filename string) { modified <- struct{}{} }) if err != nil { t.Fatal(err) } defer w.Close() if err := os.WriteFile(sourceFilename, []byte("Updated"), 0644); err != nil { t.Fatal(err) } <-modified ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout) defer cancel() select { case <-modified: t.Error("should not have received another event") case <-ctxTimeout.Done(): } } func TestFileWatcher_ChangeSymlinkTarget(t *testing.T) { tmpdir := t.TempDir() sourceFilename1 := path.Join(tmpdir, "test1.txt") if err := os.WriteFile(sourceFilename1, []byte("Hello world!"), 0644); err != nil { t.Fatal(err) } sourceFilename2 := path.Join(tmpdir, "test2.txt") if err := os.WriteFile(sourceFilename2, []byte("Updated"), 0644); err != nil { t.Fatal(err) } filename := path.Join(tmpdir, "symlink.txt") if err := os.Symlink(sourceFilename1, filename); err != nil { t.Fatal(err) } modified := make(chan struct{}) w, err := NewFileWatcher(filename, func(filename string) { modified <- struct{}{} }) if err != nil { t.Fatal(err) } defer w.Close() // Replace symlink by creating new one and rename it to the original target. if err := os.Symlink(sourceFilename2, filename+".tmp"); err != nil { t.Fatal(err) } if err := os.Rename(filename+".tmp", filename); err != nil { t.Fatal(err) } <-modified ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout) defer cancel() select { case <-modified: t.Error("should not have received another event") case <-ctxTimeout.Done(): } } func TestFileWatcher_OtherSymlink(t *testing.T) { tmpdir := t.TempDir() sourceFilename1 := path.Join(tmpdir, "test1.txt") if err := os.WriteFile(sourceFilename1, []byte("Hello world!"), 0644); err != nil { t.Fatal(err) } sourceFilename2 := path.Join(tmpdir, "test2.txt") if err := os.WriteFile(sourceFilename2, []byte("Updated"), 0644); err != nil { t.Fatal(err) } filename := path.Join(tmpdir, "symlink.txt") if err := os.Symlink(sourceFilename1, filename); err != nil { t.Fatal(err) } modified := make(chan struct{}) w, err := NewFileWatcher(filename, func(filename string) { modified <- struct{}{} }) if err != nil { t.Fatal(err) } defer w.Close() if err := os.Symlink(sourceFilename2, filename+".tmp"); err != nil { t.Fatal(err) } ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout) defer cancel() select { case <-modified: t.Error("should not have received event for other symlink") case <-ctxTimeout.Done(): } } nextcloud-spreed-signaling-1.2.4/flags.go000066400000000000000000000034261460321600400204000ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "sync/atomic" ) type Flags struct { flags atomic.Uint32 } func (f *Flags) Add(flags uint32) bool { for { old := f.flags.Load() if old&flags == flags { // Flags already set. return false } newFlags := old | flags if f.flags.CompareAndSwap(old, newFlags) { return true } // Another thread updated the flags while we were checking, retry. } } func (f *Flags) Remove(flags uint32) bool { for { old := f.flags.Load() if old&flags == 0 { // Flags not set. return false } newFlags := old & ^flags if f.flags.CompareAndSwap(old, newFlags) { return true } // Another thread updated the flags while we were checking, retry. } } func (f *Flags) Set(flags uint32) bool { for { old := f.flags.Load() if old == flags { return false } if f.flags.CompareAndSwap(old, flags) { return true } } } func (f *Flags) Get() uint32 { return f.flags.Load() } nextcloud-spreed-signaling-1.2.4/flags_test.go000066400000000000000000000057511460321600400214420ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "sync" "sync/atomic" "testing" ) func TestFlags(t *testing.T) { var f Flags if f.Get() != 0 { t.Fatalf("Expected flags 0, got %d", f.Get()) } if !f.Add(1) { t.Error("expected true") } if f.Get() != 1 { t.Fatalf("Expected flags 1, got %d", f.Get()) } if f.Add(1) { t.Error("expected false") } if f.Get() != 1 { t.Fatalf("Expected flags 1, got %d", f.Get()) } if !f.Add(2) { t.Error("expected true") } if f.Get() != 3 { t.Fatalf("Expected flags 3, got %d", f.Get()) } if !f.Remove(1) { t.Error("expected true") } if f.Get() != 2 { t.Fatalf("Expected flags 2, got %d", f.Get()) } if f.Remove(1) { t.Error("expected false") } if f.Get() != 2 { t.Fatalf("Expected flags 2, got %d", f.Get()) } if !f.Add(3) { t.Error("expected true") } if f.Get() != 3 { t.Fatalf("Expected flags 3, got %d", f.Get()) } if !f.Remove(1) { t.Error("expected true") } if f.Get() != 2 { t.Fatalf("Expected flags 2, got %d", f.Get()) } } func runConcurrentFlags(t *testing.T, count int, f func()) { var start sync.WaitGroup start.Add(1) var ready sync.WaitGroup var done sync.WaitGroup for i := 0; i < count; i++ { done.Add(1) ready.Add(1) go func() { defer done.Done() ready.Done() start.Wait() f() }() } ready.Wait() start.Done() done.Wait() } func TestFlagsConcurrentAdd(t *testing.T) { var flags Flags var added atomic.Int32 runConcurrentFlags(t, 100, func() { if flags.Add(1) { added.Add(1) } }) if added.Load() != 1 { t.Errorf("expected only one successfull attempt, got %d", added.Load()) } } func TestFlagsConcurrentRemove(t *testing.T) { var flags Flags flags.Set(1) var removed atomic.Int32 runConcurrentFlags(t, 100, func() { if flags.Remove(1) { removed.Add(1) } }) if removed.Load() != 1 { t.Errorf("expected only one successfull attempt, got %d", removed.Load()) } } func TestFlagsConcurrentSet(t *testing.T) { var flags Flags var set atomic.Int32 runConcurrentFlags(t, 100, func() { if flags.Set(1) { set.Add(1) } }) if set.Load() != 1 { t.Errorf("expected only one successfull attempt, got %d", set.Load()) } } nextcloud-spreed-signaling-1.2.4/geoip.go000066400000000000000000000126721460321600400204120ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2019 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "archive/tar" "compress/gzip" "fmt" "io" "log" "net" "net/http" "net/url" "os" "strings" "sync" "time" "github.com/oschwald/maxminddb-golang" ) var ( ErrDatabaseNotInitialized = fmt.Errorf("GeoIP database not initialized yet") ) func GetGeoIpDownloadUrl(license string) string { if license == "" { return "" } result := "https://download.maxmind.com/app/geoip_download" result += "?edition_id=GeoLite2-Country" result += "&license_key=" + url.QueryEscape(license) result += "&suffix=tar.gz" return result } type GeoLookup struct { url string isFile bool client http.Client mu sync.Mutex lastModifiedHeader string lastModifiedTime time.Time reader *maxminddb.Reader } func NewGeoLookupFromUrl(url string) (*GeoLookup, error) { geoip := &GeoLookup{ url: url, } return geoip, nil } func NewGeoLookupFromFile(filename string) (*GeoLookup, error) { geoip := &GeoLookup{ url: filename, isFile: true, } if err := geoip.Update(); err != nil { geoip.Close() return nil, err } return geoip, nil } func (g *GeoLookup) Close() { g.mu.Lock() if g.reader != nil { g.reader.Close() g.reader = nil } g.mu.Unlock() } func (g *GeoLookup) Update() error { if g.isFile { return g.updateFile() } return g.updateUrl() } func (g *GeoLookup) updateFile() error { info, err := os.Stat(g.url) if err != nil { return err } if info.ModTime().Equal(g.lastModifiedTime) { return nil } reader, err := maxminddb.Open(g.url) if err != nil { return err } if err := reader.Verify(); err != nil { return err } metadata := reader.Metadata log.Printf("Using %s GeoIP database from %s (built on %s)", metadata.DatabaseType, g.url, time.Unix(int64(metadata.BuildEpoch), 0).UTC()) g.mu.Lock() if g.reader != nil { g.reader.Close() } g.reader = reader g.lastModifiedTime = info.ModTime() g.mu.Unlock() return nil } func (g *GeoLookup) updateUrl() error { request, err := http.NewRequest("GET", g.url, nil) if err != nil { return err } if g.lastModifiedHeader != "" { request.Header.Add("If-Modified-Since", g.lastModifiedHeader) } response, err := g.client.Do(request) if err != nil { return err } defer response.Body.Close() if response.StatusCode == http.StatusNotModified { log.Printf("GeoIP database at %s has not changed", g.url) return nil } else if response.StatusCode/100 != 2 { return fmt.Errorf("downloading %s returned an error: %s", g.url, response.Status) } body := response.Body url := g.url if strings.HasSuffix(url, ".gz") { body, err = gzip.NewReader(body) if err != nil { return err } url = strings.TrimSuffix(url, ".gz") } var geoipdata []byte if strings.HasSuffix(url, ".tar") || strings.HasSuffix(url, "=tar") { tarfile := tar.NewReader(body) for { header, err := tarfile.Next() if err == io.EOF { break } else if err != nil { return err } if !strings.HasSuffix(header.Name, ".mmdb") { continue } geoipdata, err = io.ReadAll(tarfile) if err != nil { return err } break } } else { geoipdata, err = io.ReadAll(body) if err != nil { return err } } if len(geoipdata) == 0 { return fmt.Errorf("did not find GeoIP database in download from %s", g.url) } reader, err := maxminddb.FromBytes(geoipdata) if err != nil { return err } if err := reader.Verify(); err != nil { return err } metadata := reader.Metadata log.Printf("Using %s GeoIP database from %s (built on %s)", metadata.DatabaseType, g.url, time.Unix(int64(metadata.BuildEpoch), 0).UTC()) g.mu.Lock() if g.reader != nil { g.reader.Close() } g.reader = reader g.lastModifiedHeader = response.Header.Get("Last-Modified") g.mu.Unlock() return nil } func (g *GeoLookup) LookupCountry(ip net.IP) (string, error) { var record struct { Country struct { ISOCode string `maxminddb:"iso_code"` } `maxminddb:"country"` } g.mu.Lock() if g.reader == nil { g.mu.Unlock() return "", ErrDatabaseNotInitialized } err := g.reader.Lookup(ip, &record) g.mu.Unlock() if err != nil { return "", err } return record.Country.ISOCode, nil } func LookupContinents(country string) []string { continents, found := ContinentMap[country] if !found { return nil } return continents } func IsValidContinent(continent string) bool { switch continent { case "AF": // Africa fallthrough case "AN": // Antartica fallthrough case "AS": // Asia fallthrough case "EU": // Europe fallthrough case "NA": // North America fallthrough case "SA": // South America fallthrough case "OC": // Oceania return true default: return false } } nextcloud-spreed-signaling-1.2.4/geoip_test.go000066400000000000000000000123311460321600400214410ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2019 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "archive/tar" "compress/gzip" "fmt" "io" "net" "net/http" "os" "strings" "testing" "time" ) func testGeoLookupReader(t *testing.T, reader *GeoLookup) { tests := map[string]string{ // Example from maxminddb-golang code. "81.2.69.142": "GB", // Local addresses don't have a country assigned. "127.0.0.1": "", } for ip, expected := range tests { ip := ip expected := expected t.Run(ip, func(t *testing.T) { country, err := reader.LookupCountry(net.ParseIP(ip)) if err != nil { t.Errorf("Could not lookup %s: %s", ip, err) return } if country != expected { t.Errorf("Expected %s for %s, got %s", expected, ip, country) } }) } } func GetGeoIpUrlForTest(t *testing.T) string { t.Helper() var geoIpUrl string if os.Getenv("USE_DB_IP_GEOIP_DATABASE") != "" { now := time.Now().UTC() geoIpUrl = fmt.Sprintf("https://download.db-ip.com/free/dbip-country-lite-%d-%.2d.mmdb.gz", now.Year(), now.Month()) } if geoIpUrl == "" { license := os.Getenv("MAXMIND_GEOLITE2_LICENSE") if license == "" { t.Skip("No MaxMind GeoLite2 license was set in MAXMIND_GEOLITE2_LICENSE environment variable.") } geoIpUrl = GetGeoIpDownloadUrl(license) } return geoIpUrl } func TestGeoLookup(t *testing.T) { reader, err := NewGeoLookupFromUrl(GetGeoIpUrlForTest(t)) if err != nil { t.Fatal(err) } defer reader.Close() if err := reader.Update(); err != nil { t.Fatal(err) } testGeoLookupReader(t, reader) } func TestGeoLookupCaching(t *testing.T) { reader, err := NewGeoLookupFromUrl(GetGeoIpUrlForTest(t)) if err != nil { t.Fatal(err) } defer reader.Close() if err := reader.Update(); err != nil { t.Fatal(err) } // Updating the second time will most likely return a "304 Not Modified". // Make sure this doesn't trigger an error. if err := reader.Update(); err != nil { t.Fatal(err) } } func TestGeoLookupContinent(t *testing.T) { tests := map[string][]string{ "AU": {"OC"}, "DE": {"EU"}, "RU": {"EU"}, "": nil, "INVALID ": nil, } for country, expected := range tests { country := country expected := expected t.Run(country, func(t *testing.T) { continents := LookupContinents(country) if len(continents) != len(expected) { t.Errorf("Continents didn't match for %s: got %s, expected %s", country, continents, expected) return } for idx, c := range expected { if continents[idx] != c { t.Errorf("Continents didn't match for %s: got %s, expected %s", country, continents, expected) break } } }) } } func TestGeoLookupCloseEmpty(t *testing.T) { reader, err := NewGeoLookupFromUrl("ignore-url") if err != nil { t.Fatal(err) } reader.Close() } func TestGeoLookupFromFile(t *testing.T) { geoIpUrl := GetGeoIpUrlForTest(t) resp, err := http.Get(geoIpUrl) if err != nil { t.Fatal(err) } defer resp.Body.Close() body := resp.Body url := geoIpUrl if strings.HasSuffix(geoIpUrl, ".gz") { body, err = gzip.NewReader(body) if err != nil { t.Fatal(err) } url = strings.TrimSuffix(url, ".gz") } tmpfile, err := os.CreateTemp("", "geoipdb") if err != nil { t.Fatal(err) } t.Cleanup(func() { os.Remove(tmpfile.Name()) }) foundDatabase := false if strings.HasSuffix(url, ".tar") || strings.HasSuffix(url, "=tar") { tarfile := tar.NewReader(body) for { header, err := tarfile.Next() if err == io.EOF { break } else if err != nil { t.Fatal(err) } if !strings.HasSuffix(header.Name, ".mmdb") { continue } if _, err := io.Copy(tmpfile, tarfile); err != nil { tmpfile.Close() t.Fatal(err) } if err := tmpfile.Close(); err != nil { t.Fatal(err) } foundDatabase = true break } } else { if _, err := io.Copy(tmpfile, body); err != nil { tmpfile.Close() t.Fatal(err) } if err := tmpfile.Close(); err != nil { t.Fatal(err) } foundDatabase = true } if !foundDatabase { t.Fatalf("Did not find GeoIP database in download from %s", geoIpUrl) } reader, err := NewGeoLookupFromFile(tmpfile.Name()) if err != nil { t.Fatal(err) } defer reader.Close() testGeoLookupReader(t, reader) } func TestIsValidContinent(t *testing.T) { for country, continents := range ContinentMap { for _, continent := range continents { if !IsValidContinent(continent) { t.Errorf("Continent %s of country %s is not valid", continent, country) } } } } nextcloud-spreed-signaling-1.2.4/gnatsd.conf000066400000000000000000000002531460321600400210770ustar00rootroot00000000000000cluster { port: 4244 # port for inbound route connections routes = [ # You can add other servers here to build up a cluster. #nats-route://otherserver:4244 ] } nextcloud-spreed-signaling-1.2.4/go.mod000066400000000000000000000100411460321600400200520ustar00rootroot00000000000000module github.com/strukturag/nextcloud-spreed-signaling go 1.20 require ( github.com/dlintw/goconf v0.0.0-20120228082610-dcc070983490 github.com/fsnotify/fsnotify v1.7.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 github.com/gorilla/securecookie v1.1.2 github.com/gorilla/websocket v1.5.1 github.com/mailru/easyjson v0.7.7 github.com/nats-io/nats-server/v2 v2.10.12 github.com/nats-io/nats.go v1.34.0 github.com/notedit/janus-go v0.0.0-20200517101215-10eb8b95d1a0 github.com/oschwald/maxminddb-golang v1.12.0 github.com/pion/sdp/v3 v3.0.9 github.com/prometheus/client_golang v1.19.0 go.etcd.io/etcd/api/v3 v3.5.12 go.etcd.io/etcd/client/pkg/v3 v3.5.12 go.etcd.io/etcd/client/v3 v3.5.12 go.etcd.io/etcd/server/v3 v3.5.12 google.golang.org/grpc v1.62.1 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 google.golang.org/protobuf v1.33.0 ) require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.0.1 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/jonboulle/clockwork v0.2.2 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.7 // indirect github.com/minio/highwayhash v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/nats-io/jwt/v2 v2.5.5 // indirect github.com/nats-io/nkeys v0.4.7 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/pion/randutil v0.1.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/sirupsen/logrus v1.7.0 // indirect github.com/soheilhy/cmux v0.1.5 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect go.etcd.io/bbolt v1.3.8 // indirect go.etcd.io/etcd/client/v2 v2.305.12 // indirect go.etcd.io/etcd/pkg/v3 v3.5.12 // indirect go.etcd.io/etcd/raft/v3 v3.5.12 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect go.opentelemetry.io/otel v1.20.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect go.opentelemetry.io/otel/metric v1.20.0 // indirect go.opentelemetry.io/otel/sdk v1.20.0 // indirect go.opentelemetry.io/otel/trace v1.20.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.17.0 // indirect golang.org/x/crypto v0.21.0 // indirect golang.org/x/net v0.21.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect sigs.k8s.io/yaml v1.2.0 // indirect ) nextcloud-spreed-signaling-1.2.4/go.sum000066400000000000000000000707521460321600400201160ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dlintw/goconf v0.0.0-20120228082610-dcc070983490 h1:I8/Qu5NTaiXi1TsEYmTeLDUlf7u9pEdbG+azjDvx8Vg= github.com/dlintw/goconf v0.0.0-20120228082610-dcc070983490/go.mod h1:jWlUIP63OLr0cV2FGN2IEzSFsMAe58if8rk/SAE0JRE= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/nats-io/jwt/v2 v2.5.5 h1:ROfXb50elFq5c9+1ztaUbdlrArNFl2+fQWP6B8HGEq4= github.com/nats-io/jwt/v2 v2.5.5/go.mod h1:ZdWS1nZa6WMZfFwwgpEaqBV8EPGVgOTDHN/wTbz0Y5A= github.com/nats-io/nats-server/v2 v2.10.12 h1:G6u+RDrHkw4bkwn7I911O5jqys7jJVRY6MwgndyUsnE= github.com/nats-io/nats-server/v2 v2.10.12/go.mod h1:H1n6zXtYLFCgXcf/SF8QNTSIFuS8tyZQMN9NguUHdEs= github.com/nats-io/nats.go v1.34.0 h1:fnxnPCNiwIG5w08rlMcEKTUw4AV/nKyGCOJE8TdhSPk= github.com/nats-io/nats.go v1.34.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI= github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/notedit/janus-go v0.0.0-20200517101215-10eb8b95d1a0 h1:EFU9iv8BMPyBo8iFMHvQleYlF5M3PY6zpAbxsngImjE= github.com/notedit/janus-go v0.0.0-20200517101215-10eb8b95d1a0/go.mod h1:BN/Txse3qz8tZOmCm2OfajB2wHVujWmX3o9nVdsI6gE= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs= github.com/oschwald/maxminddb-golang v1.12.0/go.mod h1:q0Nob5lTCqyQ8WT6FYgS1L7PXKVVbgiymefNwIjPzgY= github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd/api/v3 v3.5.12 h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c= go.etcd.io/etcd/api/v3 v3.5.12/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= go.etcd.io/etcd/client/pkg/v3 v3.5.12 h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarindU2A= go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= go.etcd.io/etcd/client/v2 v2.305.12 h1:0m4ovXYo1CHaA/Mp3X/Fak5sRNIWf01wk/X1/G3sGKI= go.etcd.io/etcd/client/v2 v2.305.12/go.mod h1:aQ/yhsxMu+Oht1FOupSr60oBvcS9cKXHrzBpDsPTf9E= go.etcd.io/etcd/client/v3 v3.5.12 h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg= go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw= go.etcd.io/etcd/pkg/v3 v3.5.12 h1:OK2fZKI5hX/+BTK76gXSTyZMrbnARyX9S643GenNGb8= go.etcd.io/etcd/pkg/v3 v3.5.12/go.mod h1:UVwg/QIMoJncyeb/YxvJBJCE/NEwtHWashqc8A1nj/M= go.etcd.io/etcd/raft/v3 v3.5.12 h1:7r22RufdDsq2z3STjoR7Msz6fYH8tmbkdheGfwJNRmU= go.etcd.io/etcd/raft/v3 v3.5.12/go.mod h1:ERQuZVe79PI6vcC3DlKBukDCLja/L7YMu29B74Iwj4U= go.etcd.io/etcd/server/v3 v3.5.12 h1:EtMjsbfyfkwZuA2JlKOiBfuGkFCekv5H178qjXypbG8= go.etcd.io/etcd/server/v3 v3.5.12/go.mod h1:axB0oCjMy+cemo5290/CutIjoxlfA6KVYKD1w0uue10= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 h1:DeFD0VgTZ+Cj6hxravYYZE2W4GlneVH81iAOPjZkzk8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0= go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= nextcloud-spreed-signaling-1.2.4/grpc_backend.proto000066400000000000000000000023051460321600400224370ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ syntax = "proto3"; option go_package = "github.com/strukturag/nextcloud-spreed-signaling;signaling"; package signaling; service RpcBackend { rpc GetSessionCount(GetSessionCountRequest) returns (GetSessionCountReply) {} } message GetSessionCountRequest { string url = 1; } message GetSessionCountReply { uint32 count = 1; } nextcloud-spreed-signaling-1.2.4/grpc_client.go000066400000000000000000000462551460321600400216040ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "encoding/json" "fmt" "log" "net" "net/url" "strings" "sync" "sync/atomic" "time" "github.com/dlintw/goconf" clientv3 "go.etcd.io/etcd/client/v3" "google.golang.org/grpc" codes "google.golang.org/grpc/codes" "google.golang.org/grpc/resolver" status "google.golang.org/grpc/status" ) const ( GrpcTargetTypeStatic = "static" GrpcTargetTypeEtcd = "etcd" DefaultGrpcTargetType = GrpcTargetTypeStatic ) var ( customResolverPrefix atomic.Uint64 ) func init() { RegisterGrpcClientStats() } type grpcClientImpl struct { RpcBackendClient RpcInternalClient RpcMcuClient RpcSessionsClient } func newGrpcClientImpl(conn grpc.ClientConnInterface) *grpcClientImpl { return &grpcClientImpl{ RpcBackendClient: NewRpcBackendClient(conn), RpcInternalClient: NewRpcInternalClient(conn), RpcMcuClient: NewRpcMcuClient(conn), RpcSessionsClient: NewRpcSessionsClient(conn), } } type GrpcClient struct { ip net.IP target string conn *grpc.ClientConn impl *grpcClientImpl isSelf atomic.Bool } type customIpResolver struct { resolver.Builder resolver.Resolver scheme string addr string hostname string } func (r *customIpResolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { state := resolver.State{ Addresses: []resolver.Address{ { Addr: r.addr, ServerName: r.hostname, }, }, } if err := cc.UpdateState(state); err != nil { return nil, err } return r, nil } func (r *customIpResolver) Scheme() string { return r.scheme } func (r *customIpResolver) ResolveNow(opts resolver.ResolveNowOptions) { // Noop, we use a static configuration. } func (r *customIpResolver) Close() { // Noop } func NewGrpcClient(target string, ip net.IP, opts ...grpc.DialOption) (*GrpcClient, error) { var conn *grpc.ClientConn var err error if ip != nil { prefix := customResolverPrefix.Add(1) addr := ip.String() hostname := target if host, port, err := net.SplitHostPort(target); err == nil { addr = net.JoinHostPort(addr, port) hostname = host } resolver := &customIpResolver{ scheme: fmt.Sprintf("custom%d", prefix), addr: addr, hostname: hostname, } opts = append(opts, grpc.WithResolvers(resolver)) conn, err = grpc.Dial(fmt.Sprintf("%s://%s", resolver.Scheme(), target), opts...) } else { conn, err = grpc.Dial(target, opts...) } if err != nil { return nil, err } result := &GrpcClient{ ip: ip, target: target, conn: conn, impl: newGrpcClientImpl(conn), } if ip != nil { result.target += " (" + ip.String() + ")" } return result, nil } func (c *GrpcClient) Target() string { return c.target } func (c *GrpcClient) Close() error { return c.conn.Close() } func (c *GrpcClient) IsSelf() bool { return c.isSelf.Load() } func (c *GrpcClient) SetSelf(self bool) { c.isSelf.Store(self) } func (c *GrpcClient) GetServerId(ctx context.Context) (string, error) { statsGrpcClientCalls.WithLabelValues("GetServerId").Inc() response, err := c.impl.GetServerId(ctx, &GetServerIdRequest{}, grpc.WaitForReady(true)) if err != nil { return "", err } return response.GetServerId(), nil } func (c *GrpcClient) LookupSessionId(ctx context.Context, roomSessionId string, disconnectReason string) (string, error) { statsGrpcClientCalls.WithLabelValues("LookupSessionId").Inc() // TODO: Remove debug logging log.Printf("Lookup room session %s on %s", roomSessionId, c.Target()) response, err := c.impl.LookupSessionId(ctx, &LookupSessionIdRequest{ RoomSessionId: roomSessionId, DisconnectReason: disconnectReason, }, grpc.WaitForReady(true)) if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return "", ErrNoSuchRoomSession } else if err != nil { return "", err } sessionId := response.GetSessionId() if sessionId == "" { return "", ErrNoSuchRoomSession } return sessionId, nil } func (c *GrpcClient) IsSessionInCall(ctx context.Context, sessionId string, room *Room) (bool, error) { statsGrpcClientCalls.WithLabelValues("IsSessionInCall").Inc() // TODO: Remove debug logging log.Printf("Check if session %s is in call %s on %s", sessionId, room.Id(), c.Target()) response, err := c.impl.IsSessionInCall(ctx, &IsSessionInCallRequest{ SessionId: sessionId, RoomId: room.Id(), BackendUrl: room.Backend().url, }, grpc.WaitForReady(true)) if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return false, nil } else if err != nil { return false, err } return response.GetInCall(), nil } func (c *GrpcClient) GetPublisherId(ctx context.Context, sessionId string, streamType StreamType) (string, string, net.IP, error) { statsGrpcClientCalls.WithLabelValues("GetPublisherId").Inc() // TODO: Remove debug logging log.Printf("Get %s publisher id %s on %s", streamType, sessionId, c.Target()) response, err := c.impl.GetPublisherId(ctx, &GetPublisherIdRequest{ SessionId: sessionId, StreamType: string(streamType), }, grpc.WaitForReady(true)) if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return "", "", nil, nil } else if err != nil { return "", "", nil, err } return response.GetPublisherId(), response.GetProxyUrl(), net.ParseIP(response.GetIp()), nil } func (c *GrpcClient) GetSessionCount(ctx context.Context, u *url.URL) (uint32, error) { statsGrpcClientCalls.WithLabelValues("GetSessionCount").Inc() // TODO: Remove debug logging log.Printf("Get session count for %s on %s", u, c.Target()) response, err := c.impl.GetSessionCount(ctx, &GetSessionCountRequest{ Url: u.String(), }, grpc.WaitForReady(true)) if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return 0, nil } else if err != nil { return 0, err } return response.GetCount(), nil } type grpcClientsList struct { clients []*GrpcClient entry *DnsMonitorEntry } type GrpcClients struct { mu sync.RWMutex clientsMap map[string]*grpcClientsList clients []*GrpcClient dnsMonitor *DnsMonitor dnsDiscovery bool etcdClient *EtcdClient targetPrefix string targetInformation map[string]*GrpcTargetInformationEtcd dialOptions atomic.Value // []grpc.DialOption initializedCtx context.Context initializedFunc context.CancelFunc initializedWg sync.WaitGroup wakeupChanForTesting chan struct{} selfCheckWaitGroup sync.WaitGroup } func NewGrpcClients(config *goconf.ConfigFile, etcdClient *EtcdClient, dnsMonitor *DnsMonitor) (*GrpcClients, error) { initializedCtx, initializedFunc := context.WithCancel(context.Background()) result := &GrpcClients{ dnsMonitor: dnsMonitor, etcdClient: etcdClient, initializedCtx: initializedCtx, initializedFunc: initializedFunc, } if err := result.load(config, false); err != nil { return nil, err } return result, nil } func (c *GrpcClients) load(config *goconf.ConfigFile, fromReload bool) error { creds, err := NewReloadableCredentials(config, false) if err != nil { return err } opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)} c.dialOptions.Store(opts) targetType, _ := config.GetString("grpc", "targettype") if targetType == "" { targetType = DefaultGrpcTargetType } switch targetType { case GrpcTargetTypeStatic: err = c.loadTargetsStatic(config, fromReload, opts...) case GrpcTargetTypeEtcd: err = c.loadTargetsEtcd(config, fromReload, opts...) default: err = fmt.Errorf("unknown GRPC target type: %s", targetType) } return err } func (c *GrpcClients) closeClient(client *GrpcClient) { if client.IsSelf() { // Already closed. return } if err := client.Close(); err != nil { log.Printf("Error closing client to %s: %s", client.Target(), err) } } func (c *GrpcClients) isClientAvailable(target string, client *GrpcClient) bool { c.mu.RLock() defer c.mu.RUnlock() entries, found := c.clientsMap[target] if !found { return false } for _, entry := range entries.clients { if entry == client { return true } } return false } func (c *GrpcClients) getServerIdWithTimeout(ctx context.Context, client *GrpcClient) (string, error) { ctx2, cancel := context.WithTimeout(ctx, time.Second) defer cancel() id, err := client.GetServerId(ctx2) return id, err } func (c *GrpcClients) checkIsSelf(ctx context.Context, target string, client *GrpcClient) { backoff, _ := NewExponentialBackoff(initialWaitDelay, maxWaitDelay) defer c.selfCheckWaitGroup.Done() loop: for { select { case <-ctx.Done(): // Cancelled return default: if !c.isClientAvailable(target, client) { return } id, err := c.getServerIdWithTimeout(ctx, client) if err != nil { if status.Code(err) != codes.Canceled { log.Printf("Error checking GRPC server id of %s, retrying in %s: %s", client.Target(), backoff.NextWait(), err) } backoff.Wait(ctx) continue } if id == GrpcServerId { log.Printf("GRPC target %s is this server, removing", client.Target()) c.closeClient(client) client.SetSelf(true) } else { log.Printf("Checked GRPC server id of %s", client.Target()) } break loop } } } func (c *GrpcClients) loadTargetsStatic(config *goconf.ConfigFile, fromReload bool, opts ...grpc.DialOption) error { c.mu.Lock() defer c.mu.Unlock() dnsDiscovery, _ := config.GetBool("grpc", "dnsdiscovery") if dnsDiscovery != c.dnsDiscovery { if !dnsDiscovery { for _, entry := range c.clientsMap { if entry.entry != nil { c.dnsMonitor.Remove(entry.entry) entry.entry = nil } } } c.dnsDiscovery = dnsDiscovery } clientsMap := make(map[string]*grpcClientsList) var clients []*GrpcClient removeTargets := make(map[string]bool, len(c.clientsMap)) for target, entries := range c.clientsMap { removeTargets[target] = true clientsMap[target] = entries } targets, _ := config.GetString("grpc", "targets") for _, target := range strings.Split(targets, ",") { target = strings.TrimSpace(target) if target == "" { continue } if entries, found := clientsMap[target]; found { clients = append(clients, entries.clients...) if dnsDiscovery && entries.entry == nil { entry, err := c.dnsMonitor.Add(target, c.onLookup) if err != nil { return err } entries.entry = entry } delete(removeTargets, target) continue } host := target if h, _, err := net.SplitHostPort(target); err == nil { host = h } if dnsDiscovery && net.ParseIP(host) == nil { // Use dedicated client for each IP address. entry, err := c.dnsMonitor.Add(target, c.onLookup) if err != nil { return err } clientsMap[target] = &grpcClientsList{ entry: entry, } continue } client, err := NewGrpcClient(target, nil, opts...) if err != nil { for _, entry := range clientsMap { for _, client := range entry.clients { c.closeClient(client) } if entry.entry != nil { c.dnsMonitor.Remove(entry.entry) entry.entry = nil } } return err } c.selfCheckWaitGroup.Add(1) go c.checkIsSelf(context.Background(), target, client) log.Printf("Adding %s as GRPC target", client.Target()) entry, found := clientsMap[target] if !found { entry = &grpcClientsList{} } entry.clients = append(entry.clients, client) clients = append(clients, client) } for target := range removeTargets { if entry, found := clientsMap[target]; found { for _, client := range entry.clients { log.Printf("Deleting GRPC target %s", client.Target()) c.closeClient(client) } if entry.entry != nil { c.dnsMonitor.Remove(entry.entry) entry.entry = nil } delete(clientsMap, target) } } c.clients = clients c.clientsMap = clientsMap c.initializedFunc() statsGrpcClients.Set(float64(len(clients))) return nil } func (c *GrpcClients) onLookup(entry *DnsMonitorEntry, all []net.IP, added []net.IP, keep []net.IP, removed []net.IP) { c.mu.Lock() defer c.mu.Unlock() target := entry.URL() e, found := c.clientsMap[target] if !found { return } opts := c.dialOptions.Load().([]grpc.DialOption) mapModified := false var newClients []*GrpcClient for _, ip := range removed { for _, client := range e.clients { if ip.Equal(client.ip) { mapModified = true log.Printf("Removing connection to %s", client.Target()) c.closeClient(client) c.wakeupForTesting() } } } for _, ip := range keep { for _, client := range e.clients { if ip.Equal(client.ip) { newClients = append(newClients, client) } } } for _, ip := range added { client, err := NewGrpcClient(target, ip, opts...) if err != nil { log.Printf("Error creating client to %s with IP %s: %s", target, ip.String(), err) continue } c.selfCheckWaitGroup.Add(1) go c.checkIsSelf(context.Background(), target, client) log.Printf("Adding %s as GRPC target", client.Target()) newClients = append(newClients, client) mapModified = true c.wakeupForTesting() } if mapModified { c.clientsMap[target].clients = newClients c.clients = make([]*GrpcClient, 0, len(c.clientsMap)) for _, entry := range c.clientsMap { c.clients = append(c.clients, entry.clients...) } statsGrpcClients.Set(float64(len(c.clients))) } } func (c *GrpcClients) loadTargetsEtcd(config *goconf.ConfigFile, fromReload bool, opts ...grpc.DialOption) error { if !c.etcdClient.IsConfigured() { return fmt.Errorf("No etcd endpoints configured") } targetPrefix, _ := config.GetString("grpc", "targetprefix") if targetPrefix == "" { return fmt.Errorf("No GRPC target prefix configured") } c.targetPrefix = targetPrefix if c.targetInformation == nil { c.targetInformation = make(map[string]*GrpcTargetInformationEtcd) } c.etcdClient.AddListener(c) return nil } func (c *GrpcClients) EtcdClientCreated(client *EtcdClient) { c.initializedWg.Add(1) go func() { if err := client.Watch(context.Background(), c.targetPrefix, c, clientv3.WithPrefix()); err != nil { log.Printf("Error processing watch for %s: %s", c.targetPrefix, err) } }() go func() { if err := client.WaitForConnection(context.Background()); err != nil { panic(err) } backoff, _ := NewExponentialBackoff(initialWaitDelay, maxWaitDelay) for { response, err := c.getGrpcTargets(client, c.targetPrefix) if err != nil { if err == context.DeadlineExceeded { log.Printf("Timeout getting initial list of GRPC targets, retry in %s", backoff.NextWait()) } else { log.Printf("Could not get initial list of GRPC targets, retry in %s: %s", backoff.NextWait(), err) } backoff.Wait(context.Background()) continue } for _, ev := range response.Kvs { c.EtcdKeyUpdated(client, string(ev.Key), ev.Value) } c.initializedWg.Wait() c.initializedFunc() return } }() } func (c *GrpcClients) EtcdWatchCreated(client *EtcdClient, key string) { c.initializedWg.Done() } func (c *GrpcClients) getGrpcTargets(client *EtcdClient, targetPrefix string) (*clientv3.GetResponse, error) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() return client.Get(ctx, targetPrefix, clientv3.WithPrefix()) } func (c *GrpcClients) EtcdKeyUpdated(client *EtcdClient, key string, data []byte) { var info GrpcTargetInformationEtcd if err := json.Unmarshal(data, &info); err != nil { log.Printf("Could not decode GRPC target %s=%s: %s", key, string(data), err) return } if err := info.CheckValid(); err != nil { log.Printf("Received invalid GRPC target %s=%s: %s", key, string(data), err) return } c.mu.Lock() defer c.mu.Unlock() prev, found := c.targetInformation[key] if found && prev.Address != info.Address { // Address of endpoint has changed, remove old one. c.removeEtcdClientLocked(key) } if _, found := c.clientsMap[info.Address]; found { log.Printf("GRPC target %s already exists, ignoring %s", info.Address, key) return } opts := c.dialOptions.Load().([]grpc.DialOption) cl, err := NewGrpcClient(info.Address, nil, opts...) if err != nil { log.Printf("Could not create GRPC client for target %s: %s", info.Address, err) return } c.selfCheckWaitGroup.Add(1) go c.checkIsSelf(context.Background(), info.Address, cl) log.Printf("Adding %s as GRPC target", cl.Target()) if c.clientsMap == nil { c.clientsMap = make(map[string]*grpcClientsList) } c.clientsMap[info.Address] = &grpcClientsList{ clients: []*GrpcClient{cl}, } c.clients = append(c.clients, cl) c.targetInformation[key] = &info statsGrpcClients.Inc() c.wakeupForTesting() } func (c *GrpcClients) EtcdKeyDeleted(client *EtcdClient, key string) { c.mu.Lock() defer c.mu.Unlock() c.removeEtcdClientLocked(key) } func (c *GrpcClients) removeEtcdClientLocked(key string) { info, found := c.targetInformation[key] if !found { log.Printf("No connection found for %s, ignoring", key) c.wakeupForTesting() return } delete(c.targetInformation, key) entry, found := c.clientsMap[info.Address] if !found { return } for _, client := range entry.clients { log.Printf("Removing connection to %s (from %s)", client.Target(), key) c.closeClient(client) } delete(c.clientsMap, info.Address) c.clients = make([]*GrpcClient, 0, len(c.clientsMap)) for _, entry := range c.clientsMap { c.clients = append(c.clients, entry.clients...) } statsGrpcClients.Dec() c.wakeupForTesting() } func (c *GrpcClients) WaitForInitialized(ctx context.Context) error { select { case <-ctx.Done(): return ctx.Err() case <-c.initializedCtx.Done(): return nil } } func (c *GrpcClients) wakeupForTesting() { if c.wakeupChanForTesting == nil { return } select { case c.wakeupChanForTesting <- struct{}{}: default: } } func (c *GrpcClients) Reload(config *goconf.ConfigFile) { if err := c.load(config, true); err != nil { log.Printf("Could not reload RPC clients: %s", err) } } func (c *GrpcClients) Close() { c.mu.Lock() defer c.mu.Unlock() for _, entry := range c.clientsMap { for _, client := range entry.clients { if err := client.Close(); err != nil { log.Printf("Error closing client to %s: %s", client.Target(), err) } } if entry.entry != nil { c.dnsMonitor.Remove(entry.entry) entry.entry = nil } } c.clients = nil c.clientsMap = nil c.dnsDiscovery = false if c.etcdClient != nil { c.etcdClient.RemoveListener(c) } } func (c *GrpcClients) GetClients() []*GrpcClient { c.mu.RLock() defer c.mu.RUnlock() if len(c.clients) == 0 { return c.clients } result := make([]*GrpcClient, 0, len(c.clients)-1) for _, client := range c.clients { if client.IsSelf() { continue } result = append(result, client) } return result } nextcloud-spreed-signaling-1.2.4/grpc_client_test.go000066400000000000000000000277351460321600400226450ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "crypto/rand" "crypto/rsa" "fmt" "net" "os" "path" "testing" "time" "github.com/dlintw/goconf" "go.etcd.io/etcd/server/v3/embed" ) func (c *GrpcClients) getWakeupChannelForTesting() <-chan struct{} { c.mu.Lock() defer c.mu.Unlock() if c.wakeupChanForTesting != nil { return c.wakeupChanForTesting } ch := make(chan struct{}, 1) c.wakeupChanForTesting = ch return ch } func NewGrpcClientsForTestWithConfig(t *testing.T, config *goconf.ConfigFile, etcdClient *EtcdClient) (*GrpcClients, *DnsMonitor) { dnsMonitor := newDnsMonitorForTest(t, time.Hour) // will be updated manually client, err := NewGrpcClients(config, etcdClient, dnsMonitor) if err != nil { t.Fatal(err) } t.Cleanup(func() { client.Close() }) return client, dnsMonitor } func NewGrpcClientsForTest(t *testing.T, addr string) (*GrpcClients, *DnsMonitor) { config := goconf.NewConfigFile() config.AddOption("grpc", "targets", addr) config.AddOption("grpc", "dnsdiscovery", "true") return NewGrpcClientsForTestWithConfig(t, config, nil) } func NewGrpcClientsWithEtcdForTest(t *testing.T, etcd *embed.Etcd) (*GrpcClients, *DnsMonitor) { config := goconf.NewConfigFile() config.AddOption("etcd", "endpoints", etcd.Config().ListenClientUrls[0].String()) config.AddOption("grpc", "targettype", "etcd") config.AddOption("grpc", "targetprefix", "/grpctargets") etcdClient, err := NewEtcdClient(config, "") if err != nil { t.Fatal(err) } t.Cleanup(func() { if err := etcdClient.Close(); err != nil { t.Error(err) } }) return NewGrpcClientsForTestWithConfig(t, config, etcdClient) } func drainWakeupChannel(ch <-chan struct{}) { for { select { case <-ch: default: return } } } func waitForEvent(ctx context.Context, t *testing.T, ch <-chan struct{}) { t.Helper() select { case <-ch: return case <-ctx.Done(): t.Error("timeout waiting for event") } } func Test_GrpcClients_EtcdInitial(t *testing.T) { _, addr1 := NewGrpcServerForTest(t) _, addr2 := NewGrpcServerForTest(t) etcd := NewEtcdForTest(t) SetEtcdValue(etcd, "/grpctargets/one", []byte("{\"address\":\""+addr1+"\"}")) SetEtcdValue(etcd, "/grpctargets/two", []byte("{\"address\":\""+addr2+"\"}")) client, _ := NewGrpcClientsWithEtcdForTest(t, etcd) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if err := client.WaitForInitialized(ctx); err != nil { t.Fatal(err) } if clients := client.GetClients(); len(clients) != 2 { t.Errorf("Expected two clients, got %+v", clients) } } func Test_GrpcClients_EtcdUpdate(t *testing.T) { etcd := NewEtcdForTest(t) client, _ := NewGrpcClientsWithEtcdForTest(t, etcd) ch := client.getWakeupChannelForTesting() ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() if clients := client.GetClients(); len(clients) != 0 { t.Errorf("Expected no clients, got %+v", clients) } drainWakeupChannel(ch) _, addr1 := NewGrpcServerForTest(t) SetEtcdValue(etcd, "/grpctargets/one", []byte("{\"address\":\""+addr1+"\"}")) waitForEvent(ctx, t, ch) if clients := client.GetClients(); len(clients) != 1 { t.Errorf("Expected one client, got %+v", clients) } else if clients[0].Target() != addr1 { t.Errorf("Expected target %s, got %s", addr1, clients[0].Target()) } drainWakeupChannel(ch) _, addr2 := NewGrpcServerForTest(t) SetEtcdValue(etcd, "/grpctargets/two", []byte("{\"address\":\""+addr2+"\"}")) waitForEvent(ctx, t, ch) if clients := client.GetClients(); len(clients) != 2 { t.Errorf("Expected two clients, got %+v", clients) } else if clients[0].Target() != addr1 { t.Errorf("Expected target %s, got %s", addr1, clients[0].Target()) } else if clients[1].Target() != addr2 { t.Errorf("Expected target %s, got %s", addr2, clients[1].Target()) } drainWakeupChannel(ch) DeleteEtcdValue(etcd, "/grpctargets/one") waitForEvent(ctx, t, ch) if clients := client.GetClients(); len(clients) != 1 { t.Errorf("Expected one client, got %+v", clients) } else if clients[0].Target() != addr2 { t.Errorf("Expected target %s, got %s", addr2, clients[0].Target()) } drainWakeupChannel(ch) _, addr3 := NewGrpcServerForTest(t) SetEtcdValue(etcd, "/grpctargets/two", []byte("{\"address\":\""+addr3+"\"}")) waitForEvent(ctx, t, ch) if clients := client.GetClients(); len(clients) != 1 { t.Errorf("Expected one client, got %+v", clients) } else if clients[0].Target() != addr3 { t.Errorf("Expected target %s, got %s", addr3, clients[0].Target()) } } func Test_GrpcClients_EtcdIgnoreSelf(t *testing.T) { etcd := NewEtcdForTest(t) client, _ := NewGrpcClientsWithEtcdForTest(t, etcd) ch := client.getWakeupChannelForTesting() ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() if clients := client.GetClients(); len(clients) != 0 { t.Errorf("Expected no clients, got %+v", clients) } drainWakeupChannel(ch) _, addr1 := NewGrpcServerForTest(t) SetEtcdValue(etcd, "/grpctargets/one", []byte("{\"address\":\""+addr1+"\"}")) waitForEvent(ctx, t, ch) if clients := client.GetClients(); len(clients) != 1 { t.Errorf("Expected one client, got %+v", clients) } else if clients[0].Target() != addr1 { t.Errorf("Expected target %s, got %s", addr1, clients[0].Target()) } drainWakeupChannel(ch) server2, addr2 := NewGrpcServerForTest(t) server2.serverId = GrpcServerId SetEtcdValue(etcd, "/grpctargets/two", []byte("{\"address\":\""+addr2+"\"}")) waitForEvent(ctx, t, ch) client.selfCheckWaitGroup.Wait() if clients := client.GetClients(); len(clients) != 1 { t.Errorf("Expected one client, got %+v", clients) } else if clients[0].Target() != addr1 { t.Errorf("Expected target %s, got %s", addr1, clients[0].Target()) } drainWakeupChannel(ch) DeleteEtcdValue(etcd, "/grpctargets/two") waitForEvent(ctx, t, ch) if clients := client.GetClients(); len(clients) != 1 { t.Errorf("Expected one client, got %+v", clients) } else if clients[0].Target() != addr1 { t.Errorf("Expected target %s, got %s", addr1, clients[0].Target()) } } func Test_GrpcClients_DnsDiscovery(t *testing.T) { lookup := newMockDnsLookupForTest(t) target := "testgrpc:12345" ip1 := net.ParseIP("192.168.0.1") ip2 := net.ParseIP("192.168.0.2") targetWithIp1 := fmt.Sprintf("%s (%s)", target, ip1) targetWithIp2 := fmt.Sprintf("%s (%s)", target, ip2) lookup.Set("testgrpc", []net.IP{ip1}) client, dnsMonitor := NewGrpcClientsForTest(t, target) ch := client.getWakeupChannelForTesting() ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() dnsMonitor.checkHostnames() if clients := client.GetClients(); len(clients) != 1 { t.Errorf("Expected one client, got %+v", clients) } else if clients[0].Target() != targetWithIp1 { t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target()) } else if !clients[0].ip.Equal(ip1) { t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip) } lookup.Set("testgrpc", []net.IP{ip1, ip2}) drainWakeupChannel(ch) dnsMonitor.checkHostnames() waitForEvent(ctx, t, ch) if clients := client.GetClients(); len(clients) != 2 { t.Errorf("Expected two client, got %+v", clients) } else if clients[0].Target() != targetWithIp1 { t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target()) } else if !clients[0].ip.Equal(ip1) { t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip) } else if clients[1].Target() != targetWithIp2 { t.Errorf("Expected target %s, got %s", targetWithIp2, clients[1].Target()) } else if !clients[1].ip.Equal(ip2) { t.Errorf("Expected IP %s, got %s", ip2, clients[1].ip) } lookup.Set("testgrpc", []net.IP{ip2}) drainWakeupChannel(ch) dnsMonitor.checkHostnames() waitForEvent(ctx, t, ch) if clients := client.GetClients(); len(clients) != 1 { t.Errorf("Expected one client, got %+v", clients) } else if clients[0].Target() != targetWithIp2 { t.Errorf("Expected target %s, got %s", targetWithIp2, clients[0].Target()) } else if !clients[0].ip.Equal(ip2) { t.Errorf("Expected IP %s, got %s", ip2, clients[0].ip) } } func Test_GrpcClients_DnsDiscoveryInitialFailed(t *testing.T) { lookup := newMockDnsLookupForTest(t) target := "testgrpc:12345" ip1 := net.ParseIP("192.168.0.1") targetWithIp1 := fmt.Sprintf("%s (%s)", target, ip1) client, dnsMonitor := NewGrpcClientsForTest(t, target) ch := client.getWakeupChannelForTesting() testCtx, testCtxCancel := context.WithTimeout(context.Background(), testTimeout) defer testCtxCancel() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if err := client.WaitForInitialized(ctx); err != nil { t.Fatal(err) } if clients := client.GetClients(); len(clients) != 0 { t.Errorf("Expected no client, got %+v", clients) } lookup.Set("testgrpc", []net.IP{ip1}) drainWakeupChannel(ch) dnsMonitor.checkHostnames() waitForEvent(testCtx, t, ch) if clients := client.GetClients(); len(clients) != 1 { t.Errorf("Expected one client, got %+v", clients) } else if clients[0].Target() != targetWithIp1 { t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target()) } else if !clients[0].ip.Equal(ip1) { t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip) } } func Test_GrpcClients_Encryption(t *testing.T) { serverKey, err := rsa.GenerateKey(rand.Reader, 1024) if err != nil { t.Fatal(err) } clientKey, err := rsa.GenerateKey(rand.Reader, 1024) if err != nil { t.Fatal(err) } serverCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Server cert", serverKey) clientCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Testing client", clientKey) dir := t.TempDir() serverPrivkeyFile := path.Join(dir, "server-privkey.pem") serverPubkeyFile := path.Join(dir, "server-pubkey.pem") serverCertFile := path.Join(dir, "server-cert.pem") WritePrivateKey(serverKey, serverPrivkeyFile) // nolint WritePublicKey(&serverKey.PublicKey, serverPubkeyFile) // nolint os.WriteFile(serverCertFile, serverCert, 0755) // nolint clientPrivkeyFile := path.Join(dir, "client-privkey.pem") clientPubkeyFile := path.Join(dir, "client-pubkey.pem") clientCertFile := path.Join(dir, "client-cert.pem") WritePrivateKey(clientKey, clientPrivkeyFile) // nolint WritePublicKey(&clientKey.PublicKey, clientPubkeyFile) // nolint os.WriteFile(clientCertFile, clientCert, 0755) // nolint serverConfig := goconf.NewConfigFile() serverConfig.AddOption("grpc", "servercertificate", serverCertFile) serverConfig.AddOption("grpc", "serverkey", serverPrivkeyFile) serverConfig.AddOption("grpc", "clientca", clientCertFile) _, addr := NewGrpcServerForTestWithConfig(t, serverConfig) clientConfig := goconf.NewConfigFile() clientConfig.AddOption("grpc", "targets", addr) clientConfig.AddOption("grpc", "clientcertificate", clientCertFile) clientConfig.AddOption("grpc", "clientkey", clientPrivkeyFile) clientConfig.AddOption("grpc", "serverca", serverCertFile) clients, _ := NewGrpcClientsForTestWithConfig(t, clientConfig, nil) ctx, cancel1 := context.WithTimeout(context.Background(), time.Second) defer cancel1() if err := clients.WaitForInitialized(ctx); err != nil { t.Fatal(err) } for _, client := range clients.GetClients() { if _, err := client.GetServerId(ctx); err != nil { t.Fatal(err) } } } nextcloud-spreed-signaling-1.2.4/grpc_common.go000066400000000000000000000113671460321600400216120ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "crypto/tls" "fmt" "log" "net" "github.com/dlintw/goconf" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) type reloadableCredentials struct { config *tls.Config loader *CertificateReloader pool *CertPoolReloader } func (c *reloadableCredentials) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints cfg := c.config.Clone() if c.loader != nil { cfg.GetClientCertificate = c.loader.GetClientCertificate } if c.pool != nil { cfg.RootCAs = c.pool.GetCertPool() } if cfg.ServerName == "" { serverName, _, err := net.SplitHostPort(authority) if err != nil { // If the authority had no host port or if the authority cannot be parsed, use it as-is. serverName = authority } cfg.ServerName = serverName } conn := tls.Client(rawConn, cfg) errChannel := make(chan error, 1) go func() { errChannel <- conn.Handshake() close(errChannel) }() select { case err := <-errChannel: if err != nil { conn.Close() return nil, nil, err } case <-ctx.Done(): conn.Close() return nil, nil, ctx.Err() } tlsInfo := credentials.TLSInfo{ State: conn.ConnectionState(), CommonAuthInfo: credentials.CommonAuthInfo{ SecurityLevel: credentials.PrivacyAndIntegrity, }, } return WrapSyscallConn(rawConn, conn), tlsInfo, nil } func (c *reloadableCredentials) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { cfg := c.config.Clone() if c.loader != nil { cfg.GetCertificate = c.loader.GetCertificate } if c.pool != nil { cfg.ClientCAs = c.pool.GetCertPool() } conn := tls.Server(rawConn, cfg) if err := conn.Handshake(); err != nil { conn.Close() return nil, nil, err } tlsInfo := credentials.TLSInfo{ State: conn.ConnectionState(), CommonAuthInfo: credentials.CommonAuthInfo{ SecurityLevel: credentials.PrivacyAndIntegrity, }, } return WrapSyscallConn(rawConn, conn), tlsInfo, nil } func (c *reloadableCredentials) Info() credentials.ProtocolInfo { return credentials.ProtocolInfo{ SecurityProtocol: "tls", SecurityVersion: "1.2", ServerName: c.config.ServerName, } } func (c *reloadableCredentials) Clone() credentials.TransportCredentials { return &reloadableCredentials{ config: c.config.Clone(), pool: c.pool, } } func (c *reloadableCredentials) OverrideServerName(serverName string) error { c.config.ServerName = serverName return nil } func NewReloadableCredentials(config *goconf.ConfigFile, server bool) (credentials.TransportCredentials, error) { var prefix string var caPrefix string if server { prefix = "server" caPrefix = "client" } else { prefix = "client" caPrefix = "server" } certificateFile, _ := config.GetString("grpc", prefix+"certificate") keyFile, _ := config.GetString("grpc", prefix+"key") caFile, _ := config.GetString("grpc", caPrefix+"ca") cfg := &tls.Config{ NextProtos: []string{"h2"}, } var loader *CertificateReloader var err error if certificateFile != "" && keyFile != "" { loader, err = NewCertificateReloader(certificateFile, keyFile) if err != nil { return nil, fmt.Errorf("invalid GRPC %s certificate / key in %s / %s: %w", prefix, certificateFile, keyFile, err) } } var pool *CertPoolReloader if caFile != "" { pool, err = NewCertPoolReloader(caFile) if err != nil { return nil, err } if server { cfg.ClientAuth = tls.RequireAndVerifyClientCert } } if loader == nil && pool == nil { if server { log.Printf("WARNING: No GRPC server certificate and/or key configured, running unencrypted") } else { log.Printf("WARNING: No GRPC CA configured, expecting unencrypted connections") } return insecure.NewCredentials(), nil } creds := &reloadableCredentials{ config: cfg, loader: loader, pool: pool, } return creds, nil } nextcloud-spreed-signaling-1.2.4/grpc_common_test.go000066400000000000000000000065131460321600400226460ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "crypto/rand" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "errors" "io/fs" "math/big" "net" "os" "testing" "time" ) func (c *reloadableCredentials) WaitForCertificateReload(ctx context.Context) error { if c.loader == nil { return errors.New("no certificate loaded") } return c.loader.WaitForReload(ctx) } func (c *reloadableCredentials) WaitForCertPoolReload(ctx context.Context) error { if c.pool == nil { return errors.New("no certificate pool loaded") } return c.pool.WaitForReload(ctx) } func GenerateSelfSignedCertificateForTesting(t *testing.T, bits int, organization string, key *rsa.PrivateKey) []byte { template := x509.Certificate{ SerialNumber: big.NewInt(1), Subject: pkix.Name{ Organization: []string{organization}, }, NotBefore: time.Now(), NotAfter: time.Now().Add(time.Hour * 24 * 180), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{ x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, }, BasicConstraintsValid: true, IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, } data, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key) if err != nil { t.Fatal(err) } data = pem.EncodeToMemory(&pem.Block{ Type: "CERTIFICATE", Bytes: data, }) return data } func WritePrivateKey(key *rsa.PrivateKey, filename string) error { data := pem.EncodeToMemory(&pem.Block{ Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key), }) return os.WriteFile(filename, data, 0600) } func WritePublicKey(key *rsa.PublicKey, filename string) error { data, err := x509.MarshalPKIXPublicKey(key) if err != nil { return err } data = pem.EncodeToMemory(&pem.Block{ Type: "RSA PUBLIC KEY", Bytes: data, }) return os.WriteFile(filename, data, 0755) } func replaceFile(t *testing.T, filename string, data []byte, perm fs.FileMode) { t.Helper() oldStat, err := os.Stat(filename) if err != nil { t.Fatalf("can't stat old file %s: %s", filename, err) return } for { if err := os.WriteFile(filename, data, perm); err != nil { t.Fatalf("can't write file %s: %s", filename, err) return } newStat, err := os.Stat(filename) if err != nil { t.Fatalf("can't stat new file %s: %s", filename, err) return } // We need different modification times. if !newStat.ModTime().Equal(oldStat.ModTime()) { break } time.Sleep(time.Millisecond) } } nextcloud-spreed-signaling-1.2.4/grpc_internal.proto000066400000000000000000000022301460321600400226610ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ syntax = "proto3"; option go_package = "github.com/strukturag/nextcloud-spreed-signaling;signaling"; package signaling; service RpcInternal { rpc GetServerId(GetServerIdRequest) returns (GetServerIdReply) {} } message GetServerIdRequest { } message GetServerIdReply { string serverId = 1; } nextcloud-spreed-signaling-1.2.4/grpc_mcu.proto000066400000000000000000000023761460321600400216440ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ syntax = "proto3"; option go_package = "github.com/strukturag/nextcloud-spreed-signaling;signaling"; package signaling; service RpcMcu { rpc GetPublisherId(GetPublisherIdRequest) returns (GetPublisherIdReply) {} } message GetPublisherIdRequest { string sessionId = 1; string streamType = 2; } message GetPublisherIdReply { string publisherId = 1; string proxyUrl = 2; string ip = 3; } nextcloud-spreed-signaling-1.2.4/grpc_server.go000066400000000000000000000143421460321600400216240ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "crypto/sha256" "encoding/hex" "errors" "fmt" "log" "net" "net/url" "os" "github.com/dlintw/goconf" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" status "google.golang.org/grpc/status" ) var ( GrpcServerId string ) func init() { RegisterGrpcServerStats() hostname, err := os.Hostname() if err != nil { hostname = newRandomString(8) } md := sha256.New() md.Write([]byte(fmt.Sprintf("%s-%s-%d", newRandomString(32), hostname, os.Getpid()))) GrpcServerId = hex.EncodeToString(md.Sum(nil)) } type GrpcServer struct { UnimplementedRpcBackendServer UnimplementedRpcInternalServer UnimplementedRpcMcuServer UnimplementedRpcSessionsServer creds credentials.TransportCredentials conn *grpc.Server listener net.Listener serverId string // can be overwritten from tests hub *Hub } func NewGrpcServer(config *goconf.ConfigFile) (*GrpcServer, error) { var listener net.Listener if addr, _ := config.GetString("grpc", "listen"); addr != "" { var err error listener, err = net.Listen("tcp", addr) if err != nil { return nil, fmt.Errorf("could not create GRPC listener %s: %w", addr, err) } } creds, err := NewReloadableCredentials(config, true) if err != nil { return nil, err } conn := grpc.NewServer(grpc.Creds(creds)) result := &GrpcServer{ creds: creds, conn: conn, listener: listener, serverId: GrpcServerId, } RegisterRpcBackendServer(conn, result) RegisterRpcInternalServer(conn, result) RegisterRpcSessionsServer(conn, result) RegisterRpcMcuServer(conn, result) return result, nil } func (s *GrpcServer) Run() error { if s.listener == nil { return nil } return s.conn.Serve(s.listener) } func (s *GrpcServer) Close() { s.conn.GracefulStop() } func (s *GrpcServer) LookupSessionId(ctx context.Context, request *LookupSessionIdRequest) (*LookupSessionIdReply, error) { statsGrpcServerCalls.WithLabelValues("LookupSessionId").Inc() // TODO: Remove debug logging log.Printf("Lookup session id for room session id %s", request.RoomSessionId) sid, err := s.hub.roomSessions.GetSessionId(request.RoomSessionId) if errors.Is(err, ErrNoSuchRoomSession) { return nil, status.Error(codes.NotFound, "no such room session id") } else if err != nil { return nil, err } if sid != "" && request.DisconnectReason != "" { if session := s.hub.GetSessionByPublicId(sid); session != nil { log.Printf("Closing session %s because same room session %s connected", session.PublicId(), request.RoomSessionId) session.LeaveRoom(false) switch sess := session.(type) { case *ClientSession: if client := sess.GetClient(); client != nil { client.SendByeResponseWithReason(nil, "room_session_reconnected") } } session.Close() } } return &LookupSessionIdReply{ SessionId: sid, }, nil } func (s *GrpcServer) IsSessionInCall(ctx context.Context, request *IsSessionInCallRequest) (*IsSessionInCallReply, error) { statsGrpcServerCalls.WithLabelValues("IsSessionInCall").Inc() // TODO: Remove debug logging log.Printf("Check if session %s is in call %s on %s", request.SessionId, request.RoomId, request.BackendUrl) session := s.hub.GetSessionByPublicId(request.SessionId) if session == nil { return nil, status.Error(codes.NotFound, "no such session id") } result := &IsSessionInCallReply{} room := session.GetRoom() if room == nil || room.Id() != request.GetRoomId() || room.Backend().url != request.GetBackendUrl() || (session.ClientType() != HelloClientTypeInternal && !room.IsSessionInCall(session)) { // Recipient is not in a room, a different room or not in the call. result.InCall = false } else { result.InCall = true } return result, nil } func (s *GrpcServer) GetPublisherId(ctx context.Context, request *GetPublisherIdRequest) (*GetPublisherIdReply, error) { statsGrpcServerCalls.WithLabelValues("GetPublisherId").Inc() // TODO: Remove debug logging log.Printf("Get %s publisher id for session %s", request.StreamType, request.SessionId) session := s.hub.GetSessionByPublicId(request.SessionId) if session == nil { return nil, status.Error(codes.NotFound, "no such session") } clientSession, ok := session.(*ClientSession) if !ok { return nil, status.Error(codes.NotFound, "no such session") } publisher := clientSession.GetOrWaitForPublisher(ctx, StreamType(request.StreamType)) if publisher, ok := publisher.(*mcuProxyPublisher); ok { reply := &GetPublisherIdReply{ PublisherId: publisher.Id(), ProxyUrl: publisher.conn.rawUrl, } if ip := publisher.conn.ip; ip != nil { reply.Ip = ip.String() } return reply, nil } return nil, status.Error(codes.NotFound, "no such publisher") } func (s *GrpcServer) GetServerId(ctx context.Context, request *GetServerIdRequest) (*GetServerIdReply, error) { statsGrpcServerCalls.WithLabelValues("GetServerId").Inc() return &GetServerIdReply{ ServerId: s.serverId, }, nil } func (s *GrpcServer) GetSessionCount(ctx context.Context, request *GetSessionCountRequest) (*GetSessionCountReply, error) { statsGrpcServerCalls.WithLabelValues("SessionCount").Inc() u, err := url.Parse(request.Url) if err != nil { return nil, status.Error(codes.InvalidArgument, "invalid url") } backend := s.hub.backend.GetBackend(u) if backend == nil { return nil, status.Error(codes.NotFound, "no such backend") } return &GetSessionCountReply{ Count: uint32(backend.Len()), }, nil } nextcloud-spreed-signaling-1.2.4/grpc_server_test.go000066400000000000000000000170301460321600400226600ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "crypto/rand" "crypto/rsa" "crypto/tls" "crypto/x509" "encoding/pem" "errors" "net" "os" "path" "strconv" "testing" "time" "github.com/dlintw/goconf" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) func (s *GrpcServer) WaitForCertificateReload(ctx context.Context) error { c, ok := s.creds.(*reloadableCredentials) if !ok { return errors.New("no reloadable credentials found") } return c.WaitForCertificateReload(ctx) } func (s *GrpcServer) WaitForCertPoolReload(ctx context.Context) error { c, ok := s.creds.(*reloadableCredentials) if !ok { return errors.New("no reloadable credentials found") } return c.WaitForCertPoolReload(ctx) } func NewGrpcServerForTestWithConfig(t *testing.T, config *goconf.ConfigFile) (server *GrpcServer, addr string) { for port := 50000; port < 50100; port++ { addr = net.JoinHostPort("127.0.0.1", strconv.Itoa(port)) config.AddOption("grpc", "listen", addr) var err error server, err = NewGrpcServer(config) if isErrorAddressAlreadyInUse(err) { continue } else if err != nil { t.Fatal(err) } break } if server == nil { t.Fatal("could not find free port") } // Don't match with own server id by default. server.serverId = "dont-match" go func() { if err := server.Run(); err != nil { t.Errorf("could not start GRPC server: %s", err) } }() t.Cleanup(func() { server.Close() }) return server, addr } func NewGrpcServerForTest(t *testing.T) (server *GrpcServer, addr string) { config := goconf.NewConfigFile() return NewGrpcServerForTestWithConfig(t, config) } func Test_GrpcServer_ReloadCerts(t *testing.T) { key, err := rsa.GenerateKey(rand.Reader, 1024) if err != nil { t.Fatal(err) } org1 := "Testing certificate" cert1 := GenerateSelfSignedCertificateForTesting(t, 1024, org1, key) dir := t.TempDir() privkeyFile := path.Join(dir, "privkey.pem") pubkeyFile := path.Join(dir, "pubkey.pem") certFile := path.Join(dir, "cert.pem") WritePrivateKey(key, privkeyFile) // nolint WritePublicKey(&key.PublicKey, pubkeyFile) // nolint os.WriteFile(certFile, cert1, 0755) // nolint config := goconf.NewConfigFile() config.AddOption("grpc", "servercertificate", certFile) config.AddOption("grpc", "serverkey", privkeyFile) UpdateCertificateCheckIntervalForTest(t, 0) server, addr := NewGrpcServerForTestWithConfig(t, config) cp1 := x509.NewCertPool() if !cp1.AppendCertsFromPEM(cert1) { t.Fatalf("could not add certificate") } cfg1 := &tls.Config{ RootCAs: cp1, } conn1, err := tls.Dial("tcp", addr, cfg1) if err != nil { t.Fatal(err) } defer conn1.Close() // nolint state1 := conn1.ConnectionState() if certs := state1.PeerCertificates; len(certs) == 0 { t.Errorf("expected certificates, got %+v", state1) } else if len(certs[0].Subject.Organization) == 0 { t.Errorf("expected organization, got %s", certs[0].Subject) } else if certs[0].Subject.Organization[0] != org1 { t.Errorf("expected organization %s, got %s", org1, certs[0].Subject) } org2 := "Updated certificate" cert2 := GenerateSelfSignedCertificateForTesting(t, 1024, org2, key) replaceFile(t, certFile, cert2, 0755) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if err := server.WaitForCertificateReload(ctx); err != nil { t.Fatal(err) } cp2 := x509.NewCertPool() if !cp2.AppendCertsFromPEM(cert2) { t.Fatalf("could not add certificate") } cfg2 := &tls.Config{ RootCAs: cp2, } conn2, err := tls.Dial("tcp", addr, cfg2) if err != nil { t.Fatal(err) } defer conn2.Close() // nolint state2 := conn2.ConnectionState() if certs := state2.PeerCertificates; len(certs) == 0 { t.Errorf("expected certificates, got %+v", state2) } else if len(certs[0].Subject.Organization) == 0 { t.Errorf("expected organization, got %s", certs[0].Subject) } else if certs[0].Subject.Organization[0] != org2 { t.Errorf("expected organization %s, got %s", org2, certs[0].Subject) } } func Test_GrpcServer_ReloadCA(t *testing.T) { serverKey, err := rsa.GenerateKey(rand.Reader, 1024) if err != nil { t.Fatal(err) } clientKey, err := rsa.GenerateKey(rand.Reader, 1024) if err != nil { t.Fatal(err) } serverCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Server cert", serverKey) org1 := "Testing client" clientCert1 := GenerateSelfSignedCertificateForTesting(t, 1024, org1, clientKey) dir := t.TempDir() privkeyFile := path.Join(dir, "privkey.pem") pubkeyFile := path.Join(dir, "pubkey.pem") certFile := path.Join(dir, "cert.pem") caFile := path.Join(dir, "ca.pem") WritePrivateKey(serverKey, privkeyFile) // nolint WritePublicKey(&serverKey.PublicKey, pubkeyFile) // nolint os.WriteFile(certFile, serverCert, 0755) // nolint os.WriteFile(caFile, clientCert1, 0755) // nolint config := goconf.NewConfigFile() config.AddOption("grpc", "servercertificate", certFile) config.AddOption("grpc", "serverkey", privkeyFile) config.AddOption("grpc", "clientca", caFile) UpdateCertificateCheckIntervalForTest(t, 0) server, addr := NewGrpcServerForTestWithConfig(t, config) pool := x509.NewCertPool() if !pool.AppendCertsFromPEM(serverCert) { t.Fatalf("could not add certificate") } pair1, err := tls.X509KeyPair(clientCert1, pem.EncodeToMemory(&pem.Block{ Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(clientKey), })) if err != nil { t.Fatal(err) } cfg1 := &tls.Config{ RootCAs: pool, Certificates: []tls.Certificate{pair1}, } client1, err := NewGrpcClient(addr, nil, grpc.WithTransportCredentials(credentials.NewTLS(cfg1))) if err != nil { t.Fatal(err) } defer client1.Close() // nolint ctx1, cancel1 := context.WithTimeout(context.Background(), time.Second) defer cancel1() if _, err := client1.GetServerId(ctx1); err != nil { t.Fatal(err) } org2 := "Updated client" clientCert2 := GenerateSelfSignedCertificateForTesting(t, 1024, org2, clientKey) replaceFile(t, caFile, clientCert2, 0755) if err := server.WaitForCertPoolReload(ctx1); err != nil { t.Fatal(err) } pair2, err := tls.X509KeyPair(clientCert2, pem.EncodeToMemory(&pem.Block{ Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(clientKey), })) if err != nil { t.Fatal(err) } cfg2 := &tls.Config{ RootCAs: pool, Certificates: []tls.Certificate{pair2}, } client2, err := NewGrpcClient(addr, nil, grpc.WithTransportCredentials(credentials.NewTLS(cfg2))) if err != nil { t.Fatal(err) } defer client2.Close() // nolint ctx2, cancel2 := context.WithTimeout(context.Background(), time.Second) defer cancel2() // This will fail if the CA certificate has not been reloaded by the server. if _, err := client2.GetServerId(ctx2); err != nil { t.Fatal(err) } } nextcloud-spreed-signaling-1.2.4/grpc_sessions.proto000066400000000000000000000030451460321600400227200ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ syntax = "proto3"; option go_package = "github.com/strukturag/nextcloud-spreed-signaling;signaling"; package signaling; service RpcSessions { rpc LookupSessionId(LookupSessionIdRequest) returns (LookupSessionIdReply) {} rpc IsSessionInCall(IsSessionInCallRequest) returns (IsSessionInCallReply) {} } message LookupSessionIdRequest { string roomSessionId = 1; // Optional: set if the session should be disconnected with a given reason. string disconnectReason = 2; } message LookupSessionIdReply { string sessionId = 1; } message IsSessionInCallRequest { string sessionId = 1; string roomId = 2; string backendUrl = 3; } message IsSessionInCallReply { bool inCall = 1; } nextcloud-spreed-signaling-1.2.4/grpc_stats_prometheus.go000066400000000000000000000036001460321600400237220ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "github.com/prometheus/client_golang/prometheus" ) var ( statsGrpcClients = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "signaling", Subsystem: "grpc", Name: "clients", Help: "The current number of GRPC clients", }) statsGrpcClientCalls = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "grpc", Name: "client_calls_total", Help: "The total number of GRPC client calls", }, []string{"method"}) grpcClientStats = []prometheus.Collector{ statsGrpcClients, statsGrpcClientCalls, } statsGrpcServerCalls = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "grpc", Name: "server_calls_total", Help: "The total number of GRPC server calls", }, []string{"method"}) grpcServerStats = []prometheus.Collector{ statsGrpcServerCalls, } ) func RegisterGrpcClientStats() { registerAll(grpcClientStats...) } func RegisterGrpcServerStats() { registerAll(grpcServerStats...) } nextcloud-spreed-signaling-1.2.4/http_client_pool.go000066400000000000000000000071311460321600400226470ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "crypto/tls" "errors" "fmt" "net/http" "net/url" "sync" "github.com/prometheus/client_golang/prometheus" ) func init() { RegisterHttpClientPoolStats() } type Pool struct { pool chan *http.Client currentConnections prometheus.Gauge } func (p *Pool) get(ctx context.Context) (client *http.Client, err error) { select { case <-ctx.Done(): return nil, ctx.Err() case client := <-p.pool: p.currentConnections.Inc() return client, nil } } func (p *Pool) Put(c *http.Client) { p.currentConnections.Dec() p.pool <- c } func newPool(host string, constructor func() *http.Client, size int) (*Pool, error) { if size <= 0 { return nil, fmt.Errorf("can't create empty pool") } p := &Pool{ pool: make(chan *http.Client, size), currentConnections: connectionsPerHostCurrent.WithLabelValues(host), } for size > 0 { c := constructor() p.pool <- c size-- } return p, nil } type HttpClientPool struct { mu sync.Mutex transport *http.Transport clients map[string]*Pool maxConcurrentRequestsPerHost int } func NewHttpClientPool(maxConcurrentRequestsPerHost int, skipVerify bool) (*HttpClientPool, error) { if maxConcurrentRequestsPerHost <= 0 { return nil, fmt.Errorf("can't create empty pool") } tlsconfig := &tls.Config{ InsecureSkipVerify: skipVerify, } transport := &http.Transport{ MaxIdleConnsPerHost: maxConcurrentRequestsPerHost, TLSClientConfig: tlsconfig, Proxy: http.ProxyFromEnvironment, } result := &HttpClientPool{ transport: transport, clients: make(map[string]*Pool), maxConcurrentRequestsPerHost: maxConcurrentRequestsPerHost, } return result, nil } func (p *HttpClientPool) getPool(url *url.URL) (*Pool, error) { p.mu.Lock() defer p.mu.Unlock() if pool, found := p.clients[url.Host]; found { return pool, nil } pool, err := newPool(url.Host, func() *http.Client { return &http.Client{ Transport: p.transport, // Only send body in redirect if going to same scheme / host. CheckRedirect: func(req *http.Request, via []*http.Request) error { if len(via) >= 10 { return errors.New("stopped after 10 redirects") } else if len(via) > 0 { viaReq := via[len(via)-1] if req.URL.Scheme != viaReq.URL.Scheme || req.URL.Host != viaReq.URL.Host { return ErrNotRedirecting } } return nil }, } }, p.maxConcurrentRequestsPerHost) if err != nil { return nil, err } p.clients[url.Host] = pool return pool, nil } func (p *HttpClientPool) Get(ctx context.Context, url *url.URL) (*http.Client, *Pool, error) { pool, err := p.getPool(url) if err != nil { return nil, nil, err } client, err := pool.get(ctx) if err != nil { return nil, nil, err } return client, pool, err } nextcloud-spreed-signaling-1.2.4/http_client_pool_stats_prometheus.go000066400000000000000000000025401460321600400263370ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2024 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "github.com/prometheus/client_golang/prometheus" ) var ( connectionsPerHostCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "signaling", Subsystem: "http_client_pool", Name: "connections", Help: "The current number of HTTP client connections per host", }, []string{"host"}) httpClientPoolStats = []prometheus.Collector{ connectionsPerHostCurrent, } ) func RegisterHttpClientPoolStats() { registerAll(httpClientPoolStats...) } nextcloud-spreed-signaling-1.2.4/http_client_pool_test.go000066400000000000000000000042541460321600400237110ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "net/url" "testing" "time" ) func TestHttpClientPool(t *testing.T) { if _, err := NewHttpClientPool(0, false); err == nil { t.Error("should not be possible to create empty pool") } pool, err := NewHttpClientPool(1, false) if err != nil { t.Fatal(err) } u, err := url.Parse("http://localhost/foo/bar") if err != nil { t.Fatal(err) } ctx := context.Background() if _, _, err := pool.Get(ctx, u); err != nil { t.Fatal(err) } ctx2, cancel := context.WithTimeout(ctx, 10*time.Millisecond) defer cancel() if _, _, err := pool.Get(ctx2, u); err == nil { t.Error("fetching from empty pool should have timed out") } else if err != context.DeadlineExceeded { t.Errorf("fetching from empty pool should have timed out, got %s", err) } // Pools are separated by hostname, so can get client for different host. u2, err := url.Parse("http://local.host/foo/bar") if err != nil { t.Fatal(err) } if _, _, err := pool.Get(ctx, u2); err != nil { t.Fatal(err) } ctx3, cancel2 := context.WithTimeout(ctx, 10*time.Millisecond) defer cancel2() if _, _, err := pool.Get(ctx3, u2); err == nil { t.Error("fetching from empty pool should have timed out") } else if err != context.DeadlineExceeded { t.Errorf("fetching from empty pool should have timed out, got %s", err) } } nextcloud-spreed-signaling-1.2.4/hub.go000066400000000000000000002201201460321600400200520ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "bytes" "context" "crypto/ed25519" "crypto/hmac" "crypto/sha256" "crypto/x509" "encoding/base64" "encoding/hex" "encoding/json" "encoding/pem" "errors" "fmt" "hash/fnv" "log" "net" "net/http" "strings" "sync" "sync/atomic" "time" "github.com/dlintw/goconf" "github.com/golang-jwt/jwt/v4" "github.com/gorilla/mux" "github.com/gorilla/securecookie" "github.com/gorilla/websocket" ) var ( DuplicateClient = NewError("duplicate_client", "Client already registered.") HelloExpected = NewError("hello_expected", "Expected Hello request.") InvalidHelloVersion = NewError("invalid_hello_version", "The hello version is not supported.") UserAuthFailed = NewError("auth_failed", "The user could not be authenticated.") RoomJoinFailed = NewError("room_join_failed", "Could not join the room.") InvalidClientType = NewError("invalid_client_type", "The client type is not supported.") InvalidBackendUrl = NewError("invalid_backend", "The backend URL is not supported.") InvalidToken = NewError("invalid_token", "The passed token is invalid.") NoSuchSession = NewError("no_such_session", "The session to resume does not exist.") TokenNotValidYet = NewError("token_not_valid_yet", "The token is not valid yet.") TokenExpired = NewError("token_expired", "The token is expired.") // Maximum number of concurrent requests to a backend. defaultMaxConcurrentRequestsPerHost = 8 // Backend requests will be cancelled if they take too long. defaultBackendTimeoutSeconds = 10 // MCU requests will be cancelled if they take too long. defaultMcuTimeoutSeconds = 10 // New connections have to send a "Hello" request after 2 seconds. initialHelloTimeout = 2 * time.Second // Anonymous clients have to join a room after 10 seconds. anonmyousJoinRoomTimeout = 10 * time.Second // Run housekeeping jobs once per second housekeepingInterval = time.Second // Number of decoded session ids to keep. decodeCacheSize = 8192 // Minimum length of random data for tokens. minTokenRandomLength = 32 // Number of caches to use for keeping decoded session ids. The cache will // be selected based on the cache key to avoid lock contention. numDecodeCaches = 32 // Buffer sizes when reading/writing websocket connections. websocketReadBufferSize = 4096 websocketWriteBufferSize = 4096 // Delay after which a screen publisher should be cleaned up. cleanupScreenPublisherDelay = time.Second // Delay after which a "cleared" / "rejected" dialout status should be removed. removeCallStatusTTL = 5 * time.Second ) const ( privateSessionName = "private-session" publicSessionName = "public-session" ) func init() { RegisterHubStats() } type Hub struct { events AsyncEvents upgrader websocket.Upgrader cookie *securecookie.SecureCookie info *WelcomeServerMessage infoInternal *WelcomeServerMessage welcome atomic.Value // *ServerMessage closer *Closer readPumpActive atomic.Int32 writePumpActive atomic.Int32 roomUpdated chan *BackendServerRoomRequest roomDeleted chan *BackendServerRoomRequest roomInCall chan *BackendServerRoomRequest roomParticipants chan *BackendServerRoomRequest mu sync.RWMutex ru sync.RWMutex sid atomic.Uint64 clients map[uint64]*Client sessions map[uint64]Session rooms map[string]*Room roomSessions RoomSessions roomPing *RoomPing virtualSessions map[string]uint64 decodeCaches []*LruCache mcu Mcu mcuTimeout time.Duration internalClientsSecret []byte allowSubscribeAnyStream bool expiredSessions map[Session]bool anonymousSessions map[*ClientSession]time.Time expectHelloClients map[*Client]time.Time dialoutSessions map[*ClientSession]bool backendTimeout time.Duration backend *BackendClient geoip *GeoLookup geoipOverrides map[*net.IPNet]string geoipUpdating atomic.Bool rpcServer *GrpcServer rpcClients *GrpcClients } func NewHub(config *goconf.ConfigFile, events AsyncEvents, rpcServer *GrpcServer, rpcClients *GrpcClients, etcdClient *EtcdClient, r *mux.Router, version string) (*Hub, error) { hashKey, _ := config.GetString("sessions", "hashkey") switch len(hashKey) { case 32: case 64: default: log.Printf("WARNING: The sessions hash key should be 32 or 64 bytes but is %d bytes", len(hashKey)) } blockKey, _ := config.GetString("sessions", "blockkey") blockBytes := []byte(blockKey) switch len(blockKey) { case 0: blockBytes = nil case 16: case 24: case 32: default: return nil, fmt.Errorf("the sessions block key must be 16, 24 or 32 bytes but is %d bytes", len(blockKey)) } internalClientsSecret, _ := config.GetString("clients", "internalsecret") if internalClientsSecret == "" { log.Println("WARNING: No shared secret has been set for internal clients.") } maxConcurrentRequestsPerHost, _ := config.GetInt("backend", "connectionsperhost") if maxConcurrentRequestsPerHost <= 0 { maxConcurrentRequestsPerHost = defaultMaxConcurrentRequestsPerHost } backend, err := NewBackendClient(config, maxConcurrentRequestsPerHost, version, etcdClient) if err != nil { return nil, err } log.Printf("Using a maximum of %d concurrent backend connections per host", maxConcurrentRequestsPerHost) backendTimeoutSeconds, _ := config.GetInt("backend", "timeout") if backendTimeoutSeconds <= 0 { backendTimeoutSeconds = defaultBackendTimeoutSeconds } backendTimeout := time.Duration(backendTimeoutSeconds) * time.Second log.Printf("Using a timeout of %s for backend connections", backendTimeout) mcuTimeoutSeconds, _ := config.GetInt("mcu", "timeout") if mcuTimeoutSeconds <= 0 { mcuTimeoutSeconds = defaultMcuTimeoutSeconds } mcuTimeout := time.Duration(mcuTimeoutSeconds) * time.Second allowSubscribeAnyStream, _ := config.GetBool("app", "allowsubscribeany") if allowSubscribeAnyStream { log.Printf("WARNING: Allow subscribing any streams, this is insecure and should only be enabled for testing") } decodeCaches := make([]*LruCache, 0, numDecodeCaches) for i := 0; i < numDecodeCaches; i++ { decodeCaches = append(decodeCaches, NewLruCache(decodeCacheSize)) } roomSessions, err := NewBuiltinRoomSessions(rpcClients) if err != nil { return nil, err } roomPing, err := NewRoomPing(backend, backend.capabilities) if err != nil { return nil, err } geoipUrl, _ := config.GetString("geoip", "url") if geoipUrl == "default" || geoipUrl == "none" { geoipUrl = "" } if geoipUrl == "" { if geoipLicense, _ := config.GetString("geoip", "license"); geoipLicense != "" { geoipUrl = GetGeoIpDownloadUrl(geoipLicense) } } var geoip *GeoLookup var geoipOverrides map[*net.IPNet]string if geoipUrl != "" { if strings.HasPrefix(geoipUrl, "file://") { geoipUrl = geoipUrl[7:] log.Printf("Using GeoIP database from %s", geoipUrl) geoip, err = NewGeoLookupFromFile(geoipUrl) } else { log.Printf("Downloading GeoIP database from %s", geoipUrl) geoip, err = NewGeoLookupFromUrl(geoipUrl) } if err != nil { return nil, err } } else { log.Printf("Not using GeoIP database") } if options, _ := GetStringOptions(config, "geoip-overrides", true); len(options) > 0 { geoipOverrides = make(map[*net.IPNet]string, len(options)) for option, value := range options { var ip net.IP var ipNet *net.IPNet if strings.Contains(option, "/") { _, ipNet, err = net.ParseCIDR(option) if err != nil { return nil, fmt.Errorf("could not parse CIDR %s: %s", option, err) } } else { ip = net.ParseIP(option) if ip == nil { return nil, fmt.Errorf("could not parse IP %s", option) } var mask net.IPMask if ipv4 := ip.To4(); ipv4 != nil { mask = net.CIDRMask(32, 32) } else { mask = net.CIDRMask(128, 128) } ipNet = &net.IPNet{ IP: ip, Mask: mask, } } value = strings.ToUpper(strings.TrimSpace(value)) if value == "" { log.Printf("IP %s doesn't have a country assigned, skipping", option) continue } else if !IsValidCountry(value) { log.Printf("Country %s for IP %s is invalid, skipping", value, option) continue } log.Printf("Using country %s for %s", value, ipNet) geoipOverrides[ipNet] = value } } hub := &Hub{ events: events, upgrader: websocket.Upgrader{ ReadBufferSize: websocketReadBufferSize, WriteBufferSize: websocketWriteBufferSize, }, cookie: securecookie.New([]byte(hashKey), blockBytes).MaxAge(0), info: NewWelcomeServerMessage(version, DefaultFeatures...), infoInternal: NewWelcomeServerMessage(version, DefaultFeaturesInternal...), closer: NewCloser(), roomUpdated: make(chan *BackendServerRoomRequest), roomDeleted: make(chan *BackendServerRoomRequest), roomInCall: make(chan *BackendServerRoomRequest), roomParticipants: make(chan *BackendServerRoomRequest), clients: make(map[uint64]*Client), sessions: make(map[uint64]Session), rooms: make(map[string]*Room), roomSessions: roomSessions, roomPing: roomPing, virtualSessions: make(map[string]uint64), decodeCaches: decodeCaches, mcuTimeout: mcuTimeout, internalClientsSecret: []byte(internalClientsSecret), allowSubscribeAnyStream: allowSubscribeAnyStream, expiredSessions: make(map[Session]bool), anonymousSessions: make(map[*ClientSession]time.Time), expectHelloClients: make(map[*Client]time.Time), dialoutSessions: make(map[*ClientSession]bool), backendTimeout: backendTimeout, backend: backend, geoip: geoip, geoipOverrides: geoipOverrides, rpcServer: rpcServer, rpcClients: rpcClients, } hub.setWelcomeMessage(&ServerMessage{ Type: "welcome", Welcome: NewWelcomeServerMessage(version, DefaultWelcomeFeatures...), }) backend.hub = hub if rpcServer != nil { rpcServer.hub = hub } hub.upgrader.CheckOrigin = hub.checkOrigin r.HandleFunc("/spreed", func(w http.ResponseWriter, r *http.Request) { hub.serveWs(w, r) }) return hub, nil } func (h *Hub) setWelcomeMessage(msg *ServerMessage) { h.welcome.Store(msg) } func (h *Hub) getWelcomeMessage() *ServerMessage { return h.welcome.Load().(*ServerMessage) } func (h *Hub) SetMcu(mcu Mcu) { h.mcu = mcu // Create copy of message so it can be updated concurrently. welcome := *h.getWelcomeMessage() if mcu == nil { h.info.RemoveFeature(ServerFeatureMcu, ServerFeatureSimulcast, ServerFeatureUpdateSdp) h.infoInternal.RemoveFeature(ServerFeatureMcu, ServerFeatureSimulcast, ServerFeatureUpdateSdp) welcome.Welcome.RemoveFeature(ServerFeatureMcu, ServerFeatureSimulcast, ServerFeatureUpdateSdp) } else { log.Printf("Using a timeout of %s for MCU requests", h.mcuTimeout) h.info.AddFeature(ServerFeatureMcu, ServerFeatureSimulcast, ServerFeatureUpdateSdp) h.infoInternal.AddFeature(ServerFeatureMcu, ServerFeatureSimulcast, ServerFeatureUpdateSdp) welcome.Welcome.AddFeature(ServerFeatureMcu, ServerFeatureSimulcast, ServerFeatureUpdateSdp) } h.setWelcomeMessage(&welcome) } func (h *Hub) checkOrigin(r *http.Request) bool { // We allow any Origin to connect to the service. return true } func (h *Hub) GetServerInfo(session Session) *WelcomeServerMessage { if session.ClientType() == HelloClientTypeInternal { return h.infoInternal } return h.info } func (h *Hub) updateGeoDatabase() { if h.geoip == nil { return } if !h.geoipUpdating.CompareAndSwap(false, true) { // Already updating return } defer h.geoipUpdating.Store(false) backoff, err := NewExponentialBackoff(time.Second, 5*time.Minute) if err != nil { log.Printf("Could not create exponential backoff: %s", err) return } for !h.closer.IsClosed() { err := h.geoip.Update() if err == nil { break } log.Printf("Could not update GeoIP database, will retry in %s (%s)", backoff.NextWait(), err) backoff.Wait(context.Background()) } } func (h *Hub) Run() { go h.updateGeoDatabase() h.roomPing.Start() defer h.roomPing.Stop() defer h.backend.Close() housekeeping := time.NewTicker(housekeepingInterval) geoipUpdater := time.NewTicker(24 * time.Hour) loop: for { select { // Backend notifications from Nextcloud. case message := <-h.roomUpdated: h.processRoomUpdated(message) case message := <-h.roomDeleted: h.processRoomDeleted(message) case message := <-h.roomInCall: h.processRoomInCallChanged(message) case message := <-h.roomParticipants: h.processRoomParticipants(message) // Periodic internal housekeeping. case now := <-housekeeping.C: h.performHousekeeping(now) case <-geoipUpdater.C: go h.updateGeoDatabase() case <-h.closer.C: break loop } } if h.geoip != nil { h.geoip.Close() } } func (h *Hub) Stop() { h.closer.Close() } func (h *Hub) Reload(config *goconf.ConfigFile) { if h.mcu != nil { h.mcu.Reload(config) } h.backend.Reload(config) h.rpcClients.Reload(config) } func reverseSessionId(s string) (string, error) { // Note that we are assuming base64 encoded strings here. decoded, err := base64.URLEncoding.DecodeString(s) if err != nil { return "", err } for i, j := 0, len(decoded)-1; i < j; i, j = i+1, j-1 { decoded[i], decoded[j] = decoded[j], decoded[i] } return base64.URLEncoding.EncodeToString(decoded), nil } func (h *Hub) encodeSessionId(data *SessionIdData, sessionType string) (string, error) { encoded, err := h.cookie.Encode(sessionType, data) if err != nil { return "", err } if sessionType == publicSessionName { // We are reversing the public session ids because clients compare them // to decide who calls whom. The prefix of the session id is increasing // (a timestamp) but the suffix the (random) hash. // By reversing we move the hash to the front, making the comparison of // session ids "random". encoded, err = reverseSessionId(encoded) } return encoded, err } func (h *Hub) getDecodeCache(cache_key string) *LruCache { hash := fnv.New32a() hash.Write([]byte(cache_key)) // nolint idx := hash.Sum32() % uint32(len(h.decodeCaches)) return h.decodeCaches[idx] } func (h *Hub) invalidateSessionId(id string, sessionType string) { if len(id) == 0 { return } cache_key := id + "|" + sessionType cache := h.getDecodeCache(cache_key) cache.Remove(cache_key) } func (h *Hub) setDecodedSessionId(id string, sessionType string, data *SessionIdData) { if len(id) == 0 { return } cache_key := id + "|" + sessionType cache := h.getDecodeCache(cache_key) cache.Set(cache_key, data) } func (h *Hub) decodeSessionId(id string, sessionType string) *SessionIdData { if len(id) == 0 { return nil } cache_key := id + "|" + sessionType cache := h.getDecodeCache(cache_key) if result := cache.Get(cache_key); result != nil { return result.(*SessionIdData) } if sessionType == publicSessionName { var err error id, err = reverseSessionId(id) if err != nil { return nil } } var data SessionIdData if h.cookie.Decode(sessionType, id, &data) != nil { return nil } cache.Set(cache_key, &data) return &data } func (h *Hub) GetSessionByPublicId(sessionId string) Session { data := h.decodeSessionId(sessionId, publicSessionName) if data == nil { return nil } h.mu.RLock() defer h.mu.RUnlock() session := h.sessions[data.Sid] if session != nil && session.PublicId() != sessionId { // Session was created on different server. return nil } return session } func (h *Hub) GetDialoutSession(roomId string, backend *Backend) *ClientSession { url := backend.Url() h.mu.RLock() defer h.mu.RUnlock() for session := range h.dialoutSessions { if session.backend.Url() != url { continue } if session.GetClient() != nil { return session } } return nil } func (h *Hub) checkExpiredSessions(now time.Time) { for s := range h.expiredSessions { if s.IsExpired(now) { h.mu.Unlock() log.Printf("Closing expired session %s (private=%s)", s.PublicId(), s.PrivateId()) s.Close() h.mu.Lock() // Should already be deleted by the close code, but better be sure. delete(h.expiredSessions, s) } } } func (h *Hub) checkAnonymousSessions(now time.Time) { for session, timeout := range h.anonymousSessions { if now.After(timeout) { // This will close the client connection. h.mu.Unlock() if client := session.GetClient(); client != nil { client.SendByeResponseWithReason(nil, "room_join_timeout") } session.Close() h.mu.Lock() } } } func (h *Hub) checkInitialHello(now time.Time) { for client, timeout := range h.expectHelloClients { if now.After(timeout) { // This will close the client connection. h.mu.Unlock() client.SendByeResponseWithReason(nil, "hello_timeout") h.mu.Lock() } } } func (h *Hub) performHousekeeping(now time.Time) { h.mu.Lock() h.checkExpiredSessions(now) h.checkAnonymousSessions(now) h.checkInitialHello(now) h.mu.Unlock() } func (h *Hub) removeSession(session Session) (removed bool) { session.LeaveRoom(true) h.invalidateSessionId(session.PrivateId(), privateSessionName) h.invalidateSessionId(session.PublicId(), publicSessionName) h.mu.Lock() if data := session.Data(); data != nil && data.Sid > 0 { delete(h.clients, data.Sid) if _, found := h.sessions[data.Sid]; found { delete(h.sessions, data.Sid) statsHubSessionsCurrent.WithLabelValues(session.Backend().Id(), session.ClientType()).Dec() removed = true } } delete(h.expiredSessions, session) if session, ok := session.(*ClientSession); ok { delete(h.anonymousSessions, session) delete(h.dialoutSessions, session) } h.mu.Unlock() return } func (h *Hub) startWaitAnonymousSessionRoom(session *ClientSession) { h.mu.Lock() defer h.mu.Unlock() h.startWaitAnonymousSessionRoomLocked(session) } func (h *Hub) startWaitAnonymousSessionRoomLocked(session *ClientSession) { if session.ClientType() == HelloClientTypeInternal { // Internal clients don't need to join a room. return } // Anonymous sessions must join a public room within a given time, // otherwise they get disconnected to avoid blocking resources forever. now := time.Now() h.anonymousSessions[session] = now.Add(anonmyousJoinRoomTimeout) } func (h *Hub) startExpectHello(client *Client) { h.mu.Lock() defer h.mu.Unlock() if !client.IsConnected() { return } client.mu.Lock() defer client.mu.Unlock() if client.IsAuthenticated() { return } // Clients must send a "Hello" request to get a session within a given time. now := time.Now() h.expectHelloClients[client] = now.Add(initialHelloTimeout) } func (h *Hub) processNewClient(client *Client) { h.startExpectHello(client) h.sendWelcome(client) } func (h *Hub) sendWelcome(client *Client) { client.SendMessage(h.getWelcomeMessage()) } func (h *Hub) newSessionIdData(backend *Backend) *SessionIdData { sid := h.sid.Add(1) for sid == 0 { sid = h.sid.Add(1) } sessionIdData := &SessionIdData{ Sid: sid, Created: time.Now(), BackendId: backend.Id(), } return sessionIdData } func (h *Hub) processRegister(client *Client, message *ClientMessage, backend *Backend, auth *BackendClientResponse) { if !client.IsConnected() { // Client disconnected while waiting for "hello" response. return } if auth.Type == "error" { client.SendMessage(message.NewErrorServerMessage(auth.Error)) return } else if auth.Type != "auth" { client.SendMessage(message.NewErrorServerMessage(UserAuthFailed)) return } sessionIdData := h.newSessionIdData(backend) privateSessionId, err := h.encodeSessionId(sessionIdData, privateSessionName) if err != nil { client.SendMessage(message.NewWrappedErrorServerMessage(err)) return } publicSessionId, err := h.encodeSessionId(sessionIdData, publicSessionName) if err != nil { client.SendMessage(message.NewWrappedErrorServerMessage(err)) return } userId := auth.Auth.UserId if userId != "" { log.Printf("Register user %s@%s from %s in %s (%s) %s (private=%s)", userId, backend.Id(), client.RemoteAddr(), client.Country(), client.UserAgent(), publicSessionId, privateSessionId) } else if message.Hello.Auth.Type != HelloClientTypeClient { log.Printf("Register %s@%s from %s in %s (%s) %s (private=%s)", message.Hello.Auth.Type, backend.Id(), client.RemoteAddr(), client.Country(), client.UserAgent(), publicSessionId, privateSessionId) } else { log.Printf("Register anonymous@%s from %s in %s (%s) %s (private=%s)", backend.Id(), client.RemoteAddr(), client.Country(), client.UserAgent(), publicSessionId, privateSessionId) } session, err := NewClientSession(h, privateSessionId, publicSessionId, sessionIdData, backend, message.Hello, auth.Auth) if err != nil { client.SendMessage(message.NewWrappedErrorServerMessage(err)) return } if err := backend.AddSession(session); err != nil { log.Printf("Error adding session %s to backend %s: %s", session.PublicId(), backend.Id(), err) session.Close() client.SendMessage(message.NewWrappedErrorServerMessage(err)) return } if limit := uint32(backend.Limit()); limit > 0 && h.rpcClients != nil { var totalCount atomic.Uint32 totalCount.Add(uint32(backend.Len())) var wg sync.WaitGroup ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() for _, client := range h.rpcClients.GetClients() { wg.Add(1) go func(c *GrpcClient) { defer wg.Done() count, err := c.GetSessionCount(ctx, backend.ParsedUrl()) if err != nil { log.Printf("Received error while getting session count for %s from %s: %s", backend.Url(), c.Target(), err) return } if count > 0 { log.Printf("%d sessions connected for %s on %s", count, backend.Url(), c.Target()) totalCount.Add(count) } }(client) } wg.Wait() if totalCount.Load() > limit { backend.RemoveSession(session) log.Printf("Error adding session %s to backend %s: %s", session.PublicId(), backend.Id(), SessionLimitExceeded) session.Close() client.SendMessage(message.NewWrappedErrorServerMessage(SessionLimitExceeded)) return } } h.mu.Lock() if !client.IsConnected() { // Client disconnected while waiting for backend response. h.mu.Unlock() session.Close() return } session.SetClient(client) h.sessions[sessionIdData.Sid] = session h.clients[sessionIdData.Sid] = client delete(h.expectHelloClients, client) if userId == "" && session.ClientType() != HelloClientTypeInternal { h.startWaitAnonymousSessionRoomLocked(session) } else if session.ClientType() == HelloClientTypeInternal && session.HasFeature(ClientFeatureStartDialout) { // TODO: There is a small race condition for sessions that take some time // between connecting and joining a room. h.dialoutSessions[session] = true } h.mu.Unlock() if country := client.Country(); IsValidCountry(country) { statsClientCountries.WithLabelValues(country).Inc() } statsHubSessionsCurrent.WithLabelValues(backend.Id(), session.ClientType()).Inc() statsHubSessionsTotal.WithLabelValues(backend.Id(), session.ClientType()).Inc() h.setDecodedSessionId(privateSessionId, privateSessionName, sessionIdData) h.setDecodedSessionId(publicSessionId, publicSessionName, sessionIdData) h.sendHelloResponse(session, message) } func (h *Hub) processUnregister(client *Client) *ClientSession { session := client.GetSession() h.mu.Lock() delete(h.expectHelloClients, client) if session != nil { delete(h.clients, session.Data().Sid) session.StartExpire() } h.mu.Unlock() if session != nil { log.Printf("Unregister %s (private=%s)", session.PublicId(), session.PrivateId()) session.ClearClient(client) } client.Close() return session } func (h *Hub) processMessage(client *Client, data []byte) { var message ClientMessage if err := message.UnmarshalJSON(data); err != nil { if session := client.GetSession(); session != nil { log.Printf("Error decoding message from client %s: %v", session.PublicId(), err) session.SendError(InvalidFormat) } else { log.Printf("Error decoding message from %s: %v", client.RemoteAddr(), err) client.SendError(InvalidFormat) } return } if err := message.CheckValid(); err != nil { if session := client.GetSession(); session != nil { log.Printf("Invalid message %+v from client %s: %v", message, session.PublicId(), err) if err, ok := err.(*Error); ok { session.SendMessage(message.NewErrorServerMessage(err)) } else { session.SendMessage(message.NewErrorServerMessage(InvalidFormat)) } } else { log.Printf("Invalid message %+v from %s: %v", message, client.RemoteAddr(), err) if err, ok := err.(*Error); ok { client.SendMessage(message.NewErrorServerMessage(err)) } else { client.SendMessage(message.NewErrorServerMessage(InvalidFormat)) } } return } statsMessagesTotal.WithLabelValues(message.Type).Inc() session := client.GetSession() if session == nil { if message.Type != "hello" { client.SendMessage(message.NewErrorServerMessage(HelloExpected)) return } h.processHello(client, &message) return } switch message.Type { case "room": h.processRoom(client, &message) case "message": h.processMessageMsg(client, &message) case "control": h.processControlMsg(client, &message) case "internal": h.processInternalMsg(client, &message) case "transient": h.processTransientMsg(client, &message) case "bye": h.processByeMsg(client, &message) case "hello": log.Printf("Ignore hello %+v for already authenticated connection %s", message.Hello, session.PublicId()) default: log.Printf("Ignore unknown message %+v from %s", message, session.PublicId()) } } func (h *Hub) sendHelloResponse(session *ClientSession, message *ClientMessage) bool { response := &ServerMessage{ Id: message.Id, Type: "hello", Hello: &HelloServerMessage{ Version: message.Hello.Version, SessionId: session.PublicId(), ResumeId: session.PrivateId(), UserId: session.UserId(), Server: h.GetServerInfo(session), }, } return session.SendMessage(response) } func (h *Hub) processHello(client *Client, message *ClientMessage) { resumeId := message.Hello.ResumeId if resumeId != "" { data := h.decodeSessionId(resumeId, privateSessionName) if data == nil { statsHubSessionResumeFailed.Inc() client.SendMessage(message.NewErrorServerMessage(NoSuchSession)) return } h.mu.Lock() session, found := h.sessions[data.Sid] if !found || resumeId != session.PrivateId() { h.mu.Unlock() statsHubSessionResumeFailed.Inc() client.SendMessage(message.NewErrorServerMessage(NoSuchSession)) return } clientSession, ok := session.(*ClientSession) if !ok { // Should never happen as clients only can resume their own sessions. h.mu.Unlock() log.Printf("Client resumed non-client session %s (private=%s)", session.PublicId(), session.PrivateId()) statsHubSessionResumeFailed.Inc() client.SendMessage(message.NewErrorServerMessage(NoSuchSession)) return } if !client.IsConnected() { // Client disconnected while checking message. h.mu.Unlock() return } if prev := clientSession.SetClient(client); prev != nil { log.Printf("Closing previous client from %s for session %s", prev.RemoteAddr(), session.PublicId()) prev.SendByeResponseWithReason(nil, "session_resumed") } clientSession.StopExpire() h.clients[data.Sid] = client delete(h.expectHelloClients, client) h.mu.Unlock() log.Printf("Resume session from %s in %s (%s) %s (private=%s)", client.RemoteAddr(), client.Country(), client.UserAgent(), session.PublicId(), session.PrivateId()) statsHubSessionsResumedTotal.WithLabelValues(clientSession.Backend().Id(), clientSession.ClientType()).Inc() h.sendHelloResponse(clientSession, message) clientSession.NotifySessionResumed(client) return } // Make sure client doesn't get disconnected while calling auth backend. h.mu.Lock() delete(h.expectHelloClients, client) h.mu.Unlock() switch message.Hello.Auth.Type { case HelloClientTypeClient: h.processHelloClient(client, message) case HelloClientTypeInternal: h.processHelloInternal(client, message) default: h.startExpectHello(client) client.SendMessage(message.NewErrorServerMessage(InvalidClientType)) } } func (h *Hub) processHelloV1(client *Client, message *ClientMessage) (*Backend, *BackendClientResponse, error) { url := message.Hello.Auth.parsedUrl backend := h.backend.GetBackend(url) if backend == nil { return nil, nil, InvalidBackendUrl } // Run in timeout context to prevent blocking too long. ctx, cancel := context.WithTimeout(context.Background(), h.backendTimeout) defer cancel() var auth BackendClientResponse request := NewBackendClientAuthRequest(message.Hello.Auth.Params) if err := h.backend.PerformJSONRequest(ctx, url, request, &auth); err != nil { return nil, nil, err } // TODO(jojo): Validate response return backend, &auth, nil } func (h *Hub) processHelloV2(client *Client, message *ClientMessage) (*Backend, *BackendClientResponse, error) { url := message.Hello.Auth.parsedUrl backend := h.backend.GetBackend(url) if backend == nil { return nil, nil, InvalidBackendUrl } token, err := jwt.ParseWithClaims(message.Hello.Auth.helloV2Params.Token, &HelloV2TokenClaims{}, func(token *jwt.Token) (interface{}, error) { // Only public-private-key algorithms are supported. var loadKeyFunc func([]byte) (interface{}, error) switch token.Method.(type) { case *jwt.SigningMethodRSA: loadKeyFunc = func(data []byte) (interface{}, error) { return jwt.ParseRSAPublicKeyFromPEM(data) } case *jwt.SigningMethodECDSA: loadKeyFunc = func(data []byte) (interface{}, error) { return jwt.ParseECPublicKeyFromPEM(data) } case *jwt.SigningMethodEd25519: loadKeyFunc = func(data []byte) (interface{}, error) { if !bytes.HasPrefix(data, []byte("-----BEGIN ")) { // Nextcloud sends the Ed25519 key as base64-encoded public key data. decoded, err := base64.StdEncoding.DecodeString(string(data)) if err != nil { return nil, err } key := ed25519.PublicKey(decoded) data, err = x509.MarshalPKIXPublicKey(key) if err != nil { return nil, err } data = pem.EncodeToMemory(&pem.Block{ Type: "PUBLIC KEY", Bytes: data, }) } return jwt.ParseEdPublicKeyFromPEM(data) } default: log.Printf("Unexpected signing method: %v", token.Header["alg"]) return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) } // Run in timeout context to prevent blocking too long. ctx, cancel := context.WithTimeout(context.Background(), h.backendTimeout) defer cancel() keyData, cached, found := h.backend.capabilities.GetStringConfig(ctx, url, ConfigGroupSignaling, ConfigKeyHelloV2TokenKey) if !found { if cached { // The Nextcloud instance might just have enabled JWT but we probably use // the cached capabilities without the public key. Make sure to re-fetch. h.backend.capabilities.InvalidateCapabilities(url) keyData, _, found = h.backend.capabilities.GetStringConfig(ctx, url, ConfigGroupSignaling, ConfigKeyHelloV2TokenKey) } if !found { return nil, fmt.Errorf("No key found for issuer") } } key, err := loadKeyFunc([]byte(keyData)) if err != nil { return nil, fmt.Errorf("Could not parse token key: %w", err) } return key, nil }) if err != nil { if err, ok := err.(*jwt.ValidationError); ok { if err.Errors&jwt.ValidationErrorIssuedAt == jwt.ValidationErrorIssuedAt { return nil, nil, TokenNotValidYet } if err.Errors&jwt.ValidationErrorExpired == jwt.ValidationErrorExpired { return nil, nil, TokenExpired } } return nil, nil, InvalidToken } claims, ok := token.Claims.(*HelloV2TokenClaims) if !ok || !token.Valid { return nil, nil, InvalidToken } now := time.Now() if !claims.VerifyIssuedAt(now, true) { return nil, nil, TokenNotValidYet } if !claims.VerifyExpiresAt(now, true) { return nil, nil, TokenExpired } auth := &BackendClientResponse{ Type: "auth", Auth: &BackendClientAuthResponse{ Version: message.Hello.Version, UserId: claims.Subject, User: claims.UserData, }, } return backend, auth, nil } func (h *Hub) processHelloClient(client *Client, message *ClientMessage) { // Make sure the client must send another "hello" in case of errors. defer h.startExpectHello(client) var authFunc func(*Client, *ClientMessage) (*Backend, *BackendClientResponse, error) switch message.Hello.Version { case HelloVersionV1: // Auth information contains a ticket that must be validated against the // Nextcloud instance. authFunc = h.processHelloV1 case HelloVersionV2: // Auth information contains a JWT that contains all information of the user. authFunc = h.processHelloV2 default: client.SendMessage(message.NewErrorServerMessage(InvalidHelloVersion)) return } backend, auth, err := authFunc(client, message) if err != nil { if e, ok := err.(*Error); ok { client.SendMessage(message.NewErrorServerMessage(e)) } else { client.SendMessage(message.NewWrappedErrorServerMessage(err)) } return } h.processRegister(client, message, backend, auth) } func (h *Hub) processHelloInternal(client *Client, message *ClientMessage) { defer h.startExpectHello(client) if len(h.internalClientsSecret) == 0 { client.SendMessage(message.NewErrorServerMessage(InvalidClientType)) return } // Validate internal connection. rnd := message.Hello.Auth.internalParams.Random mac := hmac.New(sha256.New, h.internalClientsSecret) mac.Write([]byte(rnd)) // nolint check := hex.EncodeToString(mac.Sum(nil)) if len(rnd) < minTokenRandomLength || check != message.Hello.Auth.internalParams.Token { client.SendMessage(message.NewErrorServerMessage(InvalidToken)) return } backend := h.backend.GetBackend(message.Hello.Auth.internalParams.parsedBackend) if backend == nil { client.SendMessage(message.NewErrorServerMessage(InvalidBackendUrl)) return } auth := &BackendClientResponse{ Type: "auth", Auth: &BackendClientAuthResponse{}, } h.processRegister(client, message, backend, auth) } func (h *Hub) disconnectByRoomSessionId(ctx context.Context, roomSessionId string, backend *Backend) { sessionId, err := h.roomSessions.LookupSessionId(ctx, roomSessionId, "room_session_reconnected") if err == ErrNoSuchRoomSession { return } else if err != nil { log.Printf("Could not get session id for room session %s: %s", roomSessionId, err) return } session := h.GetSessionByPublicId(sessionId) if session == nil { // Session is located on a different server. Should already have been closed // but send "bye" again as additional safeguard. msg := &AsyncMessage{ Type: "message", Message: &ServerMessage{ Type: "bye", Bye: &ByeServerMessage{ Reason: "room_session_reconnected", }, }, } if err := h.events.PublishSessionMessage(sessionId, backend, msg); err != nil { log.Printf("Could not send reconnect bye to session %s: %s", sessionId, err) } return } log.Printf("Closing session %s because same room session %s connected", session.PublicId(), roomSessionId) session.LeaveRoom(false) switch sess := session.(type) { case *ClientSession: if client := sess.GetClient(); client != nil { client.SendByeResponseWithReason(nil, "room_session_reconnected") } } session.Close() } func (h *Hub) sendRoom(session *ClientSession, message *ClientMessage, room *Room) bool { response := &ServerMessage{ Type: "room", } if message != nil { response.Id = message.Id } if room == nil { response.Room = &RoomServerMessage{ RoomId: "", } } else { response.Room = &RoomServerMessage{ RoomId: room.id, Properties: room.properties, } } return session.SendMessage(response) } func (h *Hub) processRoom(client *Client, message *ClientMessage) { session := client.GetSession() roomId := message.Room.RoomId if roomId == "" { if session == nil { return } // We can handle leaving a room directly. if session.LeaveRoom(true) != nil { // User was in a room before, so need to notify about leaving it. h.sendRoom(session, message, nil) if session.UserId() == "" && session.ClientType() != HelloClientTypeInternal { h.startWaitAnonymousSessionRoom(session) } } return } if session != nil { if room := h.getRoomForBackend(roomId, session.Backend()); room != nil && room.HasSession(session) { // Session already is in that room, no action needed. roomSessionId := message.Room.SessionId if roomSessionId == "" { // TODO(jojo): Better make the session id required in the request. log.Printf("User did not send a room session id, assuming session %s", session.PublicId()) roomSessionId = session.PublicId() } if err := session.UpdateRoomSessionId(roomSessionId); err != nil { log.Printf("Error updating room session id for session %s: %s", session.PublicId(), err) } session.SendMessage(message.NewErrorServerMessage( NewErrorDetail("already_joined", "Already joined this room.", &RoomErrorDetails{ Room: &RoomServerMessage{ RoomId: room.id, Properties: room.properties, }, }), )) return } } var room BackendClientResponse if session.ClientType() == HelloClientTypeInternal { // Internal clients can join any room. room = BackendClientResponse{ Type: "room", Room: &BackendClientRoomResponse{ RoomId: roomId, }, } } else { // Run in timeout context to prevent blocking too long. ctx, cancel := context.WithTimeout(context.Background(), h.backendTimeout) defer cancel() sessionId := message.Room.SessionId if sessionId == "" { // TODO(jojo): Better make the session id required in the request. log.Printf("User did not send a room session id, assuming session %s", session.PublicId()) sessionId = session.PublicId() } request := NewBackendClientRoomRequest(roomId, session.UserId(), sessionId) if err := h.backend.PerformJSONRequest(ctx, session.ParsedBackendUrl(), request, &room); err != nil { session.SendMessage(message.NewWrappedErrorServerMessage(err)) return } // TODO(jojo): Validate response if message.Room.SessionId != "" { // There can only be one connection per Nextcloud Talk session, // disconnect any other connections without sending a "leave" event. ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() h.disconnectByRoomSessionId(ctx, message.Room.SessionId, session.Backend()) } } h.processJoinRoom(session, message, &room) } func (h *Hub) getRoomForBackend(id string, backend *Backend) *Room { internalRoomId := getRoomIdForBackend(id, backend) h.ru.RLock() defer h.ru.RUnlock() return h.rooms[internalRoomId] } func (h *Hub) removeRoom(room *Room) { internalRoomId := getRoomIdForBackend(room.Id(), room.Backend()) h.ru.Lock() if _, found := h.rooms[internalRoomId]; found { delete(h.rooms, internalRoomId) statsHubRoomsCurrent.WithLabelValues(room.Backend().Id()).Dec() } h.ru.Unlock() h.roomPing.DeleteRoom(room) } func (h *Hub) createRoom(id string, properties *json.RawMessage, backend *Backend) (*Room, error) { // Note the write lock must be held. room, err := NewRoom(id, properties, h, h.events, backend) if err != nil { return nil, err } internalRoomId := getRoomIdForBackend(id, backend) h.rooms[internalRoomId] = room statsHubRoomsCurrent.WithLabelValues(backend.Id()).Inc() return room, nil } func (h *Hub) processJoinRoom(session *ClientSession, message *ClientMessage, room *BackendClientResponse) { if room.Type == "error" { session.SendMessage(message.NewErrorServerMessage(room.Error)) return } else if room.Type != "room" { session.SendMessage(message.NewErrorServerMessage(RoomJoinFailed)) return } session.LeaveRoom(true) roomId := room.Room.RoomId internalRoomId := getRoomIdForBackend(roomId, session.Backend()) if err := session.SubscribeRoomEvents(roomId, message.Room.SessionId); err != nil { session.SendMessage(message.NewWrappedErrorServerMessage(err)) // The session (implicitly) left the room due to an error. h.sendRoom(session, nil, nil) return } h.ru.Lock() r, found := h.rooms[internalRoomId] if !found { var err error if r, err = h.createRoom(roomId, room.Room.Properties, session.Backend()); err != nil { h.ru.Unlock() session.SendMessage(message.NewWrappedErrorServerMessage(err)) // The session (implicitly) left the room due to an error. session.UnsubscribeRoomEvents() h.sendRoom(session, nil, nil) return } } h.ru.Unlock() h.mu.Lock() // The session now joined a room, don't expire if it is anonymous. delete(h.anonymousSessions, session) if session.ClientType() == HelloClientTypeInternal && session.HasFeature(ClientFeatureStartDialout) { // An internal session in a room can not be used for dialout. delete(h.dialoutSessions, session) } h.mu.Unlock() session.SetRoom(r) if room.Room.Permissions != nil { session.SetPermissions(*room.Room.Permissions) } h.sendRoom(session, message, r) r.AddSession(session, room.Room.Session) } func (h *Hub) processMessageMsg(client *Client, message *ClientMessage) { msg := message.Message session := client.GetSession() if session == nil { // Client is not connected yet. return } var recipient *ClientSession var subject string var clientData *MessageClientMessageData var serverRecipient *MessageClientMessageRecipient var recipientSessionId string var room *Room switch msg.Recipient.Type { case RecipientTypeSession: if h.mcu != nil { // Maybe this is a message to be processed by the MCU. var data MessageClientMessageData if err := json.Unmarshal(*msg.Data, &data); err == nil { if err := data.CheckValid(); err != nil { log.Printf("Invalid message %+v from client %s: %v", message, session.PublicId(), err) if err, ok := err.(*Error); ok { session.SendMessage(message.NewErrorServerMessage(err)) } else { session.SendMessage(message.NewErrorServerMessage(InvalidFormat)) } return } clientData = &data switch clientData.Type { case "requestoffer": // Process asynchronously to avoid blocking regular // message processing for this client. go h.processMcuMessage(session, message, msg, clientData) return case "offer": fallthrough case "answer": fallthrough case "endOfCandidates": fallthrough case "selectStream": fallthrough case "candidate": h.processMcuMessage(session, message, msg, clientData) return case "unshareScreen": if msg.Recipient.SessionId == session.PublicId() { // User is stopping to share his screen. Firefox doesn't properly clean // up the peer connections in all cases, so make sure to stop publishing // in the MCU. go func(c *Client) { time.Sleep(cleanupScreenPublisherDelay) session := c.GetSession() if session == nil { return } publisher := session.GetPublisher(StreamTypeScreen) if publisher == nil { return } log.Printf("Closing screen publisher for %s", session.PublicId()) ctx, cancel := context.WithTimeout(context.Background(), h.mcuTimeout) defer cancel() publisher.Close(ctx) }(client) } } } } sess := h.GetSessionByPublicId(msg.Recipient.SessionId) if sess != nil { // Recipient is also connected to this instance. if sess.Backend().Id() != session.Backend().Id() { // Clients are only allowed to send to sessions from the same backend. return } if msg.Recipient.SessionId == session.PublicId() { // Don't loop messages to the sender. return } subject = "session." + msg.Recipient.SessionId recipientSessionId = msg.Recipient.SessionId if sess, ok := sess.(*ClientSession); ok { recipient = sess } // Send to client connection for virtual sessions. if sess.ClientType() == HelloClientTypeVirtual { virtualSession := sess.(*VirtualSession) clientSession := virtualSession.Session() subject = "session." + clientSession.PublicId() recipientSessionId = clientSession.PublicId() recipient = clientSession // The client should see his session id as recipient. serverRecipient = &MessageClientMessageRecipient{ Type: "session", SessionId: virtualSession.SessionId(), } } } else { subject = "session." + msg.Recipient.SessionId recipientSessionId = msg.Recipient.SessionId serverRecipient = &msg.Recipient } case RecipientTypeUser: if msg.Recipient.UserId != "" { if msg.Recipient.UserId == session.UserId() { // Don't loop messages to the sender. // TODO(jojo): Should we allow users to send messages to their // other sessions? return } subject = GetSubjectForUserId(msg.Recipient.UserId, session.Backend()) } case RecipientTypeRoom: if session != nil { if room = session.GetRoom(); room != nil { subject = GetSubjectForRoomId(room.Id(), room.Backend()) if h.mcu != nil { var data MessageClientMessageData if err := json.Unmarshal(*msg.Data, &data); err == nil { if err := data.CheckValid(); err != nil { log.Printf("Invalid message %+v from client %s: %v", message, session.PublicId(), err) if err, ok := err.(*Error); ok { session.SendMessage(message.NewErrorServerMessage(err)) } else { session.SendMessage(message.NewErrorServerMessage(InvalidFormat)) } return } clientData = &data } } } } } if subject == "" { log.Printf("Unknown recipient in message %+v from %s", msg, session.PublicId()) return } response := &ServerMessage{ Type: "message", Message: &MessageServerMessage{ Sender: &MessageServerMessageSender{ Type: msg.Recipient.Type, SessionId: session.PublicId(), UserId: session.UserId(), }, Recipient: serverRecipient, Data: msg.Data, }, } if recipient != nil { // The recipient is connected to this instance, no need to go through asynchronous events. if clientData != nil && clientData.Type == "sendoffer" { if err := session.IsAllowedToSend(clientData); err != nil { log.Printf("Session %s is not allowed to send offer for %s, ignoring (%s)", session.PublicId(), clientData.RoomType, err) sendNotAllowed(session, message, "Not allowed to send offer") return } // It may take some time for the publisher (which is the current // client) to start his stream, so we must not block the active // goroutine. go func() { ctx, cancel := context.WithTimeout(context.Background(), h.mcuTimeout) defer cancel() mc, err := recipient.GetOrCreateSubscriber(ctx, h.mcu, session.PublicId(), StreamType(clientData.RoomType)) if err != nil { log.Printf("Could not create MCU subscriber for session %s to send %+v to %s: %s", session.PublicId(), clientData, recipient.PublicId(), err) sendMcuClientNotFound(session, message) return } else if mc == nil { log.Printf("No MCU subscriber found for session %s to send %+v to %s", session.PublicId(), clientData, recipient.PublicId()) sendMcuClientNotFound(session, message) return } mc.SendMessage(context.TODO(), msg, clientData, func(err error, response map[string]interface{}) { if err != nil { log.Printf("Could not send MCU message %+v for session %s to %s: %s", clientData, session.PublicId(), recipient.PublicId(), err) sendMcuProcessingFailed(session, message) return } else if response == nil { // No response received return } // The response (i.e. the "offer") must be sent to the recipient but // should be coming from the sender. msg.Recipient.SessionId = session.PublicId() h.sendMcuMessageResponse(recipient, mc, msg, clientData, response) }) }() return } recipient.SendMessage(response) } else { if clientData != nil && clientData.Type == "sendoffer" { if err := session.IsAllowedToSend(clientData); err != nil { log.Printf("Session %s is not allowed to send offer for %s, ignoring (%s)", session.PublicId(), clientData.RoomType, err) sendNotAllowed(session, message, "Not allowed to send offer") return } async := &AsyncMessage{ Type: "sendoffer", SendOffer: &SendOfferMessage{ MessageId: message.Id, SessionId: session.PublicId(), Data: clientData, }, } if err := h.events.PublishSessionMessage(recipientSessionId, session.Backend(), async); err != nil { log.Printf("Error publishing message to remote session: %s", err) } return } async := &AsyncMessage{ Type: "message", Message: response, } var err error switch msg.Recipient.Type { case RecipientTypeSession: err = h.events.PublishSessionMessage(recipientSessionId, session.Backend(), async) case RecipientTypeUser: err = h.events.PublishUserMessage(msg.Recipient.UserId, session.Backend(), async) case RecipientTypeRoom: err = h.events.PublishRoomMessage(room.Id(), session.Backend(), async) default: err = fmt.Errorf("unsupported recipient type: %s", msg.Recipient.Type) } if err != nil { log.Printf("Error publishing message to remote session: %s", err) } } } func isAllowedToControl(session Session) bool { if session.ClientType() == HelloClientTypeInternal { // Internal clients are allowed to send any control message. return true } if session.HasPermission(PERMISSION_MAY_CONTROL) { // Moderator clients are allowed to send any control message. return true } return false } func (h *Hub) processControlMsg(client *Client, message *ClientMessage) { msg := message.Control session := client.GetSession() if session == nil { // Client is not connected yet. return } else if !isAllowedToControl(session) { log.Printf("Ignore control message %+v from %s", msg, session.PublicId()) return } var recipient *ClientSession var subject string var serverRecipient *MessageClientMessageRecipient var recipientSessionId string var room *Room switch msg.Recipient.Type { case RecipientTypeSession: data := h.decodeSessionId(msg.Recipient.SessionId, publicSessionName) if data != nil { if msg.Recipient.SessionId == session.PublicId() { // Don't loop messages to the sender. return } subject = "session." + msg.Recipient.SessionId recipientSessionId = msg.Recipient.SessionId h.mu.RLock() sess, found := h.sessions[data.Sid] if found && sess.PublicId() == msg.Recipient.SessionId { if sess, ok := sess.(*ClientSession); ok { recipient = sess } // Send to client connection for virtual sessions. if sess.ClientType() == HelloClientTypeVirtual { virtualSession := sess.(*VirtualSession) clientSession := virtualSession.Session() subject = "session." + clientSession.PublicId() recipientSessionId = clientSession.PublicId() recipient = clientSession // The client should see his session id as recipient. serverRecipient = &MessageClientMessageRecipient{ Type: "session", SessionId: virtualSession.SessionId(), } } } else { serverRecipient = &msg.Recipient } h.mu.RUnlock() } else { serverRecipient = &msg.Recipient } case RecipientTypeUser: if msg.Recipient.UserId != "" { if msg.Recipient.UserId == session.UserId() { // Don't loop messages to the sender. // TODO(jojo): Should we allow users to send messages to their // other sessions? return } subject = GetSubjectForUserId(msg.Recipient.UserId, session.Backend()) } case RecipientTypeRoom: if session != nil { if room = session.GetRoom(); room != nil { subject = GetSubjectForRoomId(room.Id(), room.Backend()) } } } if subject == "" { log.Printf("Unknown recipient in message %+v from %s", msg, session.PublicId()) return } response := &ServerMessage{ Type: "control", Control: &ControlServerMessage{ Sender: &MessageServerMessageSender{ Type: msg.Recipient.Type, SessionId: session.PublicId(), UserId: session.UserId(), }, Recipient: serverRecipient, Data: msg.Data, }, } if recipient != nil { recipient.SendMessage(response) } else { async := &AsyncMessage{ Type: "message", Message: response, } var err error switch msg.Recipient.Type { case RecipientTypeSession: err = h.events.PublishSessionMessage(recipientSessionId, session.Backend(), async) case RecipientTypeUser: err = h.events.PublishUserMessage(msg.Recipient.UserId, session.Backend(), async) case RecipientTypeRoom: err = h.events.PublishRoomMessage(room.Id(), room.Backend(), async) default: err = fmt.Errorf("unsupported recipient type: %s", msg.Recipient.Type) } if err != nil { log.Printf("Error publishing message to remote session: %s", err) } } } func (h *Hub) processInternalMsg(client *Client, message *ClientMessage) { msg := message.Internal session := client.GetSession() if session == nil { // Client is not connected yet. return } else if session.ClientType() != HelloClientTypeInternal { log.Printf("Ignore internal message %+v from %s", msg, session.PublicId()) return } if session.ProcessResponse(message) { return } switch msg.Type { case "addsession": msg := msg.AddSession room := h.getRoomForBackend(msg.RoomId, session.Backend()) if room == nil { log.Printf("Ignore add session message %+v for invalid room %s from %s", *msg, msg.RoomId, session.PublicId()) return } sessionIdData := h.newSessionIdData(session.Backend()) privateSessionId, err := h.encodeSessionId(sessionIdData, privateSessionName) if err != nil { log.Printf("Could not encode private virtual session id: %s", err) return } publicSessionId, err := h.encodeSessionId(sessionIdData, publicSessionName) if err != nil { log.Printf("Could not encode public virtual session id: %s", err) return } ctx, cancel := context.WithTimeout(context.Background(), h.backendTimeout) defer cancel() virtualSessionId := GetVirtualSessionId(session, msg.SessionId) sess, err := NewVirtualSession(session, privateSessionId, publicSessionId, sessionIdData, msg) if err != nil { log.Printf("Could not create virtual session %s: %s", virtualSessionId, err) reply := message.NewErrorServerMessage(NewError("add_failed", "Could not create virtual session.")) session.SendMessage(reply) return } if msg.Options != nil { request := NewBackendClientRoomRequest(room.Id(), msg.UserId, publicSessionId) request.Room.ActorId = msg.Options.ActorId request.Room.ActorType = msg.Options.ActorType request.Room.InCall = sess.GetInCall() var response BackendClientResponse if err := h.backend.PerformJSONRequest(ctx, session.ParsedBackendUrl(), request, &response); err != nil { sess.Close() log.Printf("Could not join virtual session %s at backend %s: %s", virtualSessionId, session.BackendUrl(), err) reply := message.NewErrorServerMessage(NewError("add_failed", "Could not join virtual session.")) session.SendMessage(reply) return } if response.Type == "error" { sess.Close() log.Printf("Could not join virtual session %s at backend %s: %+v", virtualSessionId, session.BackendUrl(), response.Error) reply := message.NewErrorServerMessage(NewError("add_failed", response.Error.Error())) session.SendMessage(reply) return } } else { request := NewBackendClientSessionRequest(room.Id(), "add", publicSessionId, msg) var response BackendClientSessionResponse if err := h.backend.PerformJSONRequest(ctx, session.ParsedBackendUrl(), request, &response); err != nil { sess.Close() log.Printf("Could not add virtual session %s at backend %s: %s", virtualSessionId, session.BackendUrl(), err) reply := message.NewErrorServerMessage(NewError("add_failed", "Could not add virtual session.")) session.SendMessage(reply) return } } h.mu.Lock() h.sessions[sessionIdData.Sid] = sess h.virtualSessions[virtualSessionId] = sessionIdData.Sid h.mu.Unlock() statsHubSessionsCurrent.WithLabelValues(session.Backend().Id(), sess.ClientType()).Inc() statsHubSessionsTotal.WithLabelValues(session.Backend().Id(), sess.ClientType()).Inc() log.Printf("Session %s added virtual session %s with initial flags %d", session.PublicId(), sess.PublicId(), sess.Flags()) session.AddVirtualSession(sess) sess.SetRoom(room) room.AddSession(sess, nil) case "updatesession": msg := msg.UpdateSession room := h.getRoomForBackend(msg.RoomId, session.Backend()) if room == nil { log.Printf("Ignore remove session message %+v for invalid room %s from %s", *msg, msg.RoomId, session.PublicId()) return } virtualSessionId := GetVirtualSessionId(session, msg.SessionId) h.mu.Lock() sid, found := h.virtualSessions[virtualSessionId] if !found { h.mu.Unlock() return } sess := h.sessions[sid] h.mu.Unlock() if sess != nil { var changed SessionChangeFlag if virtualSession, ok := sess.(*VirtualSession); ok { if msg.Flags != nil { if virtualSession.SetFlags(*msg.Flags) { changed |= SessionChangeFlags } } if msg.InCall != nil { if virtualSession.SetInCall(*msg.InCall) { changed |= SessionChangeInCall } } } else { log.Printf("Ignore update request for non-virtual session %s", sess.PublicId()) } if changed != 0 { room.NotifySessionChanged(sess, changed) } } case "removesession": msg := msg.RemoveSession room := h.getRoomForBackend(msg.RoomId, session.Backend()) if room == nil { log.Printf("Ignore remove session message %+v for invalid room %s from %s", *msg, msg.RoomId, session.PublicId()) return } virtualSessionId := GetVirtualSessionId(session, msg.SessionId) h.mu.Lock() sid, found := h.virtualSessions[virtualSessionId] if !found { h.mu.Unlock() return } delete(h.virtualSessions, virtualSessionId) sess := h.sessions[sid] h.mu.Unlock() if sess != nil { log.Printf("Session %s removed virtual session %s", session.PublicId(), sess.PublicId()) if vsess, ok := sess.(*VirtualSession); ok { // We should always have a VirtualSession here. vsess.CloseWithFeedback(session, message) } else { sess.Close() } } case "incall": msg := msg.InCall if session.SetInCall(msg.InCall) { if room := session.GetRoom(); room != nil { room.NotifySessionChanged(session, SessionChangeInCall) } } case "dialout": roomId := msg.Dialout.RoomId msg.Dialout.RoomId = "" // Don't send room id to recipients. if msg.Dialout.Type == "status" { asyncMessage := &AsyncMessage{ Type: "room", Room: &BackendServerRoomRequest{ Type: "transient", Transient: &BackendRoomTransientRequest{ Action: TransientActionSet, Key: "callstatus_" + msg.Dialout.Status.CallId, Value: msg.Dialout.Status, }, }, } if msg.Dialout.Status.Status == DialoutStatusCleared || msg.Dialout.Status.Status == DialoutStatusRejected { asyncMessage.Room.Transient.TTL = removeCallStatusTTL } if err := h.events.PublishBackendRoomMessage(roomId, session.Backend(), asyncMessage); err != nil { log.Printf("Error publishing dialout message %+v to room %s", msg.Dialout, roomId) } } else { if err := h.events.PublishRoomMessage(roomId, session.Backend(), &AsyncMessage{ Type: "message", Message: &ServerMessage{ Type: "dialout", Dialout: msg.Dialout, }, }); err != nil { log.Printf("Error publishing dialout message %+v to room %s", msg.Dialout, roomId) } } default: log.Printf("Ignore unsupported internal message %+v from %s", msg, session.PublicId()) return } } func isAllowedToUpdateTransientData(session Session) bool { if session.ClientType() == HelloClientTypeInternal { // Internal clients are always allowed. return true } if session.HasPermission(PERMISSION_TRANSIENT_DATA) { return true } return false } func (h *Hub) processTransientMsg(client *Client, message *ClientMessage) { msg := message.TransientData session := client.GetSession() if session == nil { // Client is not connected yet. return } room := session.GetRoom() if room == nil { response := message.NewErrorServerMessage(NewError("not_in_room", "No room joined yet.")) session.SendMessage(response) return } switch msg.Type { case "set": if !isAllowedToUpdateTransientData(session) { sendNotAllowed(session, message, "Not allowed to update transient data.") return } if msg.Value == nil { room.SetTransientDataTTL(msg.Key, nil, msg.TTL) } else { room.SetTransientDataTTL(msg.Key, *msg.Value, msg.TTL) } case "remove": if !isAllowedToUpdateTransientData(session) { sendNotAllowed(session, message, "Not allowed to update transient data.") return } room.RemoveTransientData(msg.Key) default: response := message.NewErrorServerMessage(NewError("ignored", "Unsupported message type.")) session.SendMessage(response) } } func sendNotAllowed(session *ClientSession, message *ClientMessage, reason string) { response := message.NewErrorServerMessage(NewError("not_allowed", reason)) session.SendMessage(response) } func sendMcuClientNotFound(session *ClientSession, message *ClientMessage) { response := message.NewErrorServerMessage(NewError("client_not_found", "No MCU client found to send message to.")) session.SendMessage(response) } func sendMcuProcessingFailed(session *ClientSession, message *ClientMessage) { response := message.NewErrorServerMessage(NewError("processing_failed", "Processing of the message failed, please check server logs.")) session.SendMessage(response) } func (h *Hub) isInSameCallRemote(ctx context.Context, senderSession *ClientSession, senderRoom *Room, recipientSessionId string) bool { clients := h.rpcClients.GetClients() if len(clients) == 0 { return false } var result atomic.Bool var wg sync.WaitGroup rpcCtx, cancel := context.WithCancel(ctx) defer cancel() for _, client := range clients { wg.Add(1) go func(client *GrpcClient) { defer wg.Done() inCall, err := client.IsSessionInCall(rpcCtx, recipientSessionId, senderRoom) if errors.Is(err, context.Canceled) { return } else if err != nil { log.Printf("Error checking session %s in call on %s: %s", recipientSessionId, client.Target(), err) return } else if !inCall { return } cancel() result.Store(true) }(client) } wg.Wait() return result.Load() } func (h *Hub) isInSameCall(ctx context.Context, senderSession *ClientSession, recipientSessionId string) bool { if senderSession.ClientType() == HelloClientTypeInternal { // Internal clients may subscribe all streams. return true } senderRoom := senderSession.GetRoom() if senderRoom == nil || !senderRoom.IsSessionInCall(senderSession) { // Sender is not in a room or not in the call. return false } recipientSession := h.GetSessionByPublicId(recipientSessionId) if recipientSession == nil { // Recipient session does not exist. return h.isInSameCallRemote(ctx, senderSession, senderRoom, recipientSessionId) } recipientRoom := recipientSession.GetRoom() if recipientRoom == nil || !senderRoom.IsEqual(recipientRoom) || (recipientSession.ClientType() != HelloClientTypeInternal && !recipientRoom.IsSessionInCall(recipientSession)) { // Recipient is not in a room, a different room or not in the call. return false } return true } func (h *Hub) processMcuMessage(session *ClientSession, client_message *ClientMessage, message *MessageClientMessage, data *MessageClientMessageData) { ctx, cancel := context.WithTimeout(context.Background(), h.mcuTimeout) defer cancel() var mc McuClient var err error var clientType string switch data.Type { case "requestoffer": if session.PublicId() == message.Recipient.SessionId { log.Printf("Not requesting offer from itself for session %s", session.PublicId()) return } // A user is only allowed to subscribe a stream if she is in the same room // as the other user and both have their "inCall" flag set. if !h.allowSubscribeAnyStream && !h.isInSameCall(ctx, session, message.Recipient.SessionId) { log.Printf("Session %s is not in the same call as session %s, not requesting offer", session.PublicId(), message.Recipient.SessionId) sendNotAllowed(session, client_message, "Not allowed to request offer.") return } clientType = "subscriber" mc, err = session.GetOrCreateSubscriber(ctx, h.mcu, message.Recipient.SessionId, StreamType(data.RoomType)) case "sendoffer": // Will be sent directly. return case "offer": clientType = "publisher" mc, err = session.GetOrCreatePublisher(ctx, h.mcu, StreamType(data.RoomType), data) if err, ok := err.(*PermissionError); ok { log.Printf("Session %s is not allowed to offer %s, ignoring (%s)", session.PublicId(), data.RoomType, err) sendNotAllowed(session, client_message, "Not allowed to publish.") return } if err, ok := err.(*SdpError); ok { log.Printf("Session %s sent unsupported offer %s, ignoring (%s)", session.PublicId(), data.RoomType, err) sendNotAllowed(session, client_message, "Not allowed to publish.") return } case "selectStream": if session.PublicId() == message.Recipient.SessionId { log.Printf("Not selecting substream for own %s stream in session %s", data.RoomType, session.PublicId()) return } clientType = "subscriber" mc = session.GetSubscriber(message.Recipient.SessionId, StreamType(data.RoomType)) default: if session.PublicId() == message.Recipient.SessionId { if err := session.IsAllowedToSend(data); err != nil { log.Printf("Session %s is not allowed to send candidate for %s, ignoring (%s)", session.PublicId(), data.RoomType, err) sendNotAllowed(session, client_message, "Not allowed to send candidate.") return } clientType = "publisher" mc = session.GetPublisher(StreamType(data.RoomType)) } else { clientType = "subscriber" mc = session.GetSubscriber(message.Recipient.SessionId, StreamType(data.RoomType)) } } if err != nil { log.Printf("Could not create MCU %s for session %s to send %+v to %s: %s", clientType, session.PublicId(), data, message.Recipient.SessionId, err) sendMcuClientNotFound(session, client_message) return } else if mc == nil { log.Printf("No MCU %s found for session %s to send %+v to %s", clientType, session.PublicId(), data, message.Recipient.SessionId) sendMcuClientNotFound(session, client_message) return } mc.SendMessage(context.TODO(), message, data, func(err error, response map[string]interface{}) { if err != nil { log.Printf("Could not send MCU message %+v for session %s to %s: %s", data, session.PublicId(), message.Recipient.SessionId, err) sendMcuProcessingFailed(session, client_message) return } else if response == nil { // No response received return } h.sendMcuMessageResponse(session, mc, message, data, response) }) } func (h *Hub) sendMcuMessageResponse(session *ClientSession, mcuClient McuClient, message *MessageClientMessage, data *MessageClientMessageData, response map[string]interface{}) { var response_message *ServerMessage switch response["type"] { case "answer": answer_message := &AnswerOfferMessage{ To: session.PublicId(), From: session.PublicId(), Type: "answer", RoomType: data.RoomType, Payload: response, Sid: mcuClient.Sid(), } answer_data, err := json.Marshal(answer_message) if err != nil { log.Printf("Could not serialize answer %+v to %s: %s", answer_message, session.PublicId(), err) return } response_message = &ServerMessage{ Type: "message", Message: &MessageServerMessage{ Sender: &MessageServerMessageSender{ Type: "session", SessionId: session.PublicId(), UserId: session.UserId(), }, Data: (*json.RawMessage)(&answer_data), }, } case "offer": offer_message := &AnswerOfferMessage{ To: session.PublicId(), From: message.Recipient.SessionId, Type: "offer", RoomType: data.RoomType, Payload: response, Sid: mcuClient.Sid(), } offer_data, err := json.Marshal(offer_message) if err != nil { log.Printf("Could not serialize offer %+v to %s: %s", offer_message, session.PublicId(), err) return } response_message = &ServerMessage{ Type: "message", Message: &MessageServerMessage{ Sender: &MessageServerMessageSender{ Type: "session", SessionId: message.Recipient.SessionId, // TODO(jojo): Set "UserId" field if known user. }, Data: (*json.RawMessage)(&offer_data), }, } default: log.Printf("Unsupported response %+v received to send to %s", response, session.PublicId()) return } session.SendMessage(response_message) } func (h *Hub) processByeMsg(client *Client, message *ClientMessage) { client.SendByeResponse(message) if session := h.processUnregister(client); session != nil { session.Close() } } func (h *Hub) processRoomUpdated(message *BackendServerRoomRequest) { room := message.room room.UpdateProperties(message.Update.Properties) } func (h *Hub) processRoomDeleted(message *BackendServerRoomRequest) { room := message.room sessions := room.Close() for _, session := range sessions { // The session is no longer in the room session.LeaveRoom(true) switch sess := session.(type) { case *ClientSession: if client := sess.GetClient(); client != nil { h.sendRoom(sess, nil, nil) } } } } func (h *Hub) processRoomInCallChanged(message *BackendServerRoomRequest) { room := message.room if message.InCall.All { var flags int if err := json.Unmarshal(message.InCall.InCall, &flags); err != nil { var incall bool if err := json.Unmarshal(message.InCall.InCall, &incall); err != nil { log.Printf("Unsupported InCall flags type: %+v, ignoring", string(message.InCall.InCall)) return } if incall { flags = FlagInCall } } room.PublishUsersInCallChangedAll(flags) } else { room.PublishUsersInCallChanged(message.InCall.Changed, message.InCall.Users) } } func (h *Hub) processRoomParticipants(message *BackendServerRoomRequest) { room := message.room room.PublishUsersChanged(message.Participants.Changed, message.Participants.Users) } func (h *Hub) GetStats() map[string]interface{} { result := make(map[string]interface{}) h.ru.RLock() result["rooms"] = len(h.rooms) h.ru.RUnlock() h.mu.Lock() result["sessions"] = len(h.sessions) h.mu.Unlock() if h.mcu != nil { if stats := h.mcu.GetStats(); stats != nil { result["mcu"] = stats } } return result } func getRealUserIP(r *http.Request) string { // Note this function assumes it is running behind a trusted proxy, so // the headers can be trusted. if ip := r.Header.Get("X-Real-IP"); ip != "" { return ip } if ip := r.Header.Get("X-Forwarded-For"); ip != "" { // Result could be a list "clientip, proxy1, proxy2", so only use first element. if pos := strings.Index(ip, ","); pos >= 0 { ip = strings.TrimSpace(ip[:pos]) } return ip } return r.RemoteAddr } func (h *Hub) serveWs(w http.ResponseWriter, r *http.Request) { addr := getRealUserIP(r) agent := r.Header.Get("User-Agent") conn, err := h.upgrader.Upgrade(w, r, nil) if err != nil { log.Printf("Could not upgrade request from %s: %s", addr, err) return } client, err := NewClient(conn, addr, agent, h) if err != nil { log.Printf("Could not create client for %s: %s", addr, err) return } h.processNewClient(client) go func(h *Hub) { h.writePumpActive.Add(1) defer h.writePumpActive.Add(-1) client.WritePump() }(h) go func(h *Hub) { h.readPumpActive.Add(1) defer h.readPumpActive.Add(-1) client.ReadPump() }(h) } func (h *Hub) OnLookupCountry(client *Client) string { ip := net.ParseIP(client.RemoteAddr()) if ip == nil { return noCountry } for overrideNet, country := range h.geoipOverrides { if overrideNet.Contains(ip) { return country } } if ip.IsLoopback() { return loopback } country := unknownCountry if h.geoip != nil { var err error country, err = h.geoip.LookupCountry(ip) if err != nil { log.Printf("Could not lookup country for %s: %s", ip, err) return unknownCountry } if country == "" { country = unknownCountry } } return country } func (h *Hub) OnClosed(client *Client) { h.processUnregister(client) } func (h *Hub) OnMessageReceived(client *Client, data []byte) { h.processMessage(client, data) } func (h *Hub) OnRTTReceived(client *Client, rtt time.Duration) { // Ignore } nextcloud-spreed-signaling-1.2.4/hub_stats_prometheus.go000066400000000000000000000045151460321600400235530ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "github.com/prometheus/client_golang/prometheus" ) var ( statsHubRoomsCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "signaling", Subsystem: "hub", Name: "rooms", Help: "The current number of rooms per backend", }, []string{"backend"}) statsHubSessionsCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "signaling", Subsystem: "hub", Name: "sessions", Help: "The current number of sessions per backend", }, []string{"backend", "clienttype"}) statsHubSessionsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "hub", Name: "sessions_total", Help: "The total number of sessions per backend", }, []string{"backend", "clienttype"}) statsHubSessionsResumedTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "hub", Name: "sessions_resume_total", Help: "The total number of resumed sessions per backend", }, []string{"backend", "clienttype"}) statsHubSessionResumeFailed = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "hub", Name: "sessions_resume_failed_total", Help: "The total number of failed session resume requests", }) hubStats = []prometheus.Collector{ statsHubRoomsCurrent, statsHubSessionsCurrent, statsHubSessionsTotal, statsHubSessionResumeFailed, } ) func RegisterHubStats() { registerAll(hubStats...) } nextcloud-spreed-signaling-1.2.4/hub_test.go000066400000000000000000004564371460321600400211370ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "bytes" "context" "crypto/ecdsa" "crypto/ed25519" "crypto/elliptic" "crypto/rand" "crypto/rsa" "crypto/x509" "encoding/base64" "encoding/json" "encoding/pem" "errors" "io" "net/http" "net/http/httptest" "net/url" "os" "reflect" "strings" "sync" "testing" "time" "github.com/dlintw/goconf" "github.com/golang-jwt/jwt/v4" "github.com/gorilla/mux" "github.com/gorilla/websocket" ) const ( testDefaultUserId = "test-userid" authAnonymousUserId = "anonymous-userid" testTimeout = 10 * time.Second ) var ( testRoomProperties = []byte("{\"prop1\":\"value1\"}") ) var ( clusteredTests = []string{ "local", "clustered", } testHelloV2Algorithms = []string{ "RSA", "ECDSA", "Ed25519", "Ed25519_Nextcloud", } ) // Only used for testing. func (h *Hub) getRoom(id string) *Room { h.ru.RLock() defer h.ru.RUnlock() // TODO: The same room might exist on different backends. for _, room := range h.rooms { if room.Id() == id { return room } } return nil } func isLocalTest(t *testing.T) bool { return strings.HasSuffix(t.Name(), "/local") } func getTestConfig(server *httptest.Server) (*goconf.ConfigFile, error) { config := goconf.NewConfigFile() u, err := url.Parse(server.URL) if err != nil { return nil, err } config.AddOption("backend", "allowed", u.Host) if u.Scheme == "http" { config.AddOption("backend", "allowhttp", "true") } config.AddOption("backend", "secret", string(testBackendSecret)) config.AddOption("sessions", "hashkey", "12345678901234567890123456789012") config.AddOption("sessions", "blockkey", "09876543210987654321098765432109") config.AddOption("clients", "internalsecret", string(testInternalSecret)) config.AddOption("geoip", "url", "none") return config, nil } func getTestConfigWithMultipleBackends(server *httptest.Server) (*goconf.ConfigFile, error) { config, err := getTestConfig(server) if err != nil { return nil, err } config.RemoveOption("backend", "allowed") config.RemoveOption("backend", "secret") config.AddOption("backend", "backends", "backend1, backend2") config.AddOption("backend1", "url", server.URL+"/one") config.AddOption("backend1", "secret", string(testBackendSecret)) config.AddOption("backend2", "url", server.URL+"/two/") config.AddOption("backend2", "secret", string(testBackendSecret)) return config, nil } func CreateHubForTestWithConfig(t *testing.T, getConfigFunc func(*httptest.Server) (*goconf.ConfigFile, error)) (*Hub, AsyncEvents, *mux.Router, *httptest.Server) { r := mux.NewRouter() registerBackendHandler(t, r) server := httptest.NewServer(r) t.Cleanup(func() { server.Close() }) events := getAsyncEventsForTest(t) config, err := getConfigFunc(server) if err != nil { t.Fatal(err) } h, err := NewHub(config, events, nil, nil, nil, r, "no-version") if err != nil { t.Fatal(err) } b, err := NewBackendServer(config, h, "no-version") if err != nil { t.Fatal(err) } if err := b.Start(r); err != nil { t.Fatal(err) } go h.Run() t.Cleanup(func() { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() WaitForHub(ctx, t, h) }) return h, events, r, server } func CreateHubForTest(t *testing.T) (*Hub, AsyncEvents, *mux.Router, *httptest.Server) { return CreateHubForTestWithConfig(t, getTestConfig) } func CreateHubWithMultipleBackendsForTest(t *testing.T) (*Hub, AsyncEvents, *mux.Router, *httptest.Server) { h, events, r, server := CreateHubForTestWithConfig(t, getTestConfigWithMultipleBackends) registerBackendHandlerUrl(t, r, "/one") registerBackendHandlerUrl(t, r, "/two") return h, events, r, server } func CreateClusteredHubsForTestWithConfig(t *testing.T, getConfigFunc func(*httptest.Server) (*goconf.ConfigFile, error)) (*Hub, *Hub, *mux.Router, *mux.Router, *httptest.Server, *httptest.Server) { r1 := mux.NewRouter() registerBackendHandler(t, r1) server1 := httptest.NewServer(r1) t.Cleanup(func() { server1.Close() }) r2 := mux.NewRouter() registerBackendHandler(t, r2) server2 := httptest.NewServer(r2) t.Cleanup(func() { server2.Close() }) nats := startLocalNatsServer(t) grpcServer1, addr1 := NewGrpcServerForTest(t) grpcServer2, addr2 := NewGrpcServerForTest(t) events1, err := NewAsyncEvents(nats) if err != nil { t.Fatal(err) } t.Cleanup(func() { events1.Close() }) config1, err := getConfigFunc(server1) if err != nil { t.Fatal(err) } client1, _ := NewGrpcClientsForTest(t, addr2) h1, err := NewHub(config1, events1, grpcServer1, client1, nil, r1, "no-version") if err != nil { t.Fatal(err) } b1, err := NewBackendServer(config1, h1, "no-version") if err != nil { t.Fatal(err) } events2, err := NewAsyncEvents(nats) if err != nil { t.Fatal(err) } t.Cleanup(func() { events2.Close() }) config2, err := getConfigFunc(server2) if err != nil { t.Fatal(err) } client2, _ := NewGrpcClientsForTest(t, addr1) h2, err := NewHub(config2, events2, grpcServer2, client2, nil, r2, "no-version") if err != nil { t.Fatal(err) } b2, err := NewBackendServer(config2, h2, "no-version") if err != nil { t.Fatal(err) } if err := b1.Start(r1); err != nil { t.Fatal(err) } if err := b2.Start(r2); err != nil { t.Fatal(err) } go h1.Run() go h2.Run() t.Cleanup(func() { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() WaitForHub(ctx, t, h1) WaitForHub(ctx, t, h2) }) return h1, h2, r1, r2, server1, server2 } func CreateClusteredHubsForTest(t *testing.T) (*Hub, *Hub, *httptest.Server, *httptest.Server) { h1, h2, _, _, server1, server2 := CreateClusteredHubsForTestWithConfig(t, getTestConfig) return h1, h2, server1, server2 } func WaitForHub(ctx context.Context, t *testing.T, h *Hub) { // Wait for any channel messages to be processed. time.Sleep(10 * time.Millisecond) h.Stop() for { h.mu.Lock() clients := len(h.clients) sessions := len(h.sessions) h.mu.Unlock() h.ru.Lock() rooms := len(h.rooms) h.ru.Unlock() readActive := h.readPumpActive.Load() writeActive := h.writePumpActive.Load() if clients == 0 && rooms == 0 && sessions == 0 && readActive == 0 && writeActive == 0 { break } select { case <-ctx.Done(): h.mu.Lock() h.ru.Lock() dumpGoroutines("", os.Stderr) t.Errorf("Error waiting for clients %+v / rooms %+v / sessions %+v / %d read / %d write to terminate: %s", h.clients, h.rooms, h.sessions, readActive, writeActive, ctx.Err()) h.ru.Unlock() h.mu.Unlock() return default: time.Sleep(time.Millisecond) } } } func validateBackendChecksum(t *testing.T, f func(http.ResponseWriter, *http.Request, *BackendClientRequest) *BackendClientResponse) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { t.Fatal("Error reading body: ", err) } rnd := r.Header.Get(HeaderBackendSignalingRandom) checksum := r.Header.Get(HeaderBackendSignalingChecksum) if rnd == "" || checksum == "" { t.Fatalf("No checksum headers found in request to %s", r.URL) } if verify := CalculateBackendChecksum(rnd, body, testBackendSecret); verify != checksum { t.Fatalf("Backend checksum verification failed for request to %s", r.URL) } var request BackendClientRequest if err := json.Unmarshal(body, &request); err != nil { t.Fatal(err) } response := f(w, r, &request) if response == nil { // Function already returned a response. return } data, err := json.Marshal(response) if err != nil { t.Fatal(err) } if r.Header.Get("OCS-APIRequest") != "" { var ocs OcsResponse ocs.Ocs = &OcsBody{ Meta: OcsMeta{ Status: "ok", StatusCode: http.StatusOK, Message: http.StatusText(http.StatusOK), }, Data: (*json.RawMessage)(&data), } if data, err = json.Marshal(ocs); err != nil { t.Fatal(err) } } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(data) // nolint } } func processAuthRequest(t *testing.T, w http.ResponseWriter, r *http.Request, request *BackendClientRequest) *BackendClientResponse { if request.Type != "auth" || request.Auth == nil { t.Fatalf("Expected an auth backend request, got %+v", request) } var params TestBackendClientAuthParams if request.Auth.Params != nil && len(*request.Auth.Params) > 0 { if err := json.Unmarshal(*request.Auth.Params, ¶ms); err != nil { t.Fatal(err) } } if params.UserId == "" { params.UserId = testDefaultUserId } else if params.UserId == authAnonymousUserId { params.UserId = "" } response := &BackendClientResponse{ Type: "auth", Auth: &BackendClientAuthResponse{ Version: BackendVersion, UserId: params.UserId, }, } userdata := map[string]string{ "displayname": "Displayname " + params.UserId, } if data, err := json.Marshal(userdata); err != nil { t.Fatal(err) } else { response.Auth.User = (*json.RawMessage)(&data) } return response } func processRoomRequest(t *testing.T, w http.ResponseWriter, r *http.Request, request *BackendClientRequest) *BackendClientResponse { if request.Type != "room" || request.Room == nil { t.Fatalf("Expected an room backend request, got %+v", request) } switch request.Room.RoomId { case "test-room-slow": time.Sleep(100 * time.Millisecond) case "test-room-takeover-room-session": // Additional checks for testcase "TestClientTakeoverRoomSession" if request.Room.Action == "leave" && request.Room.UserId == "test-userid1" { t.Errorf("Should not receive \"leave\" event for first user, received %+v", request.Room) } } // Allow joining any room. response := &BackendClientResponse{ Type: "room", Room: &BackendClientRoomResponse{ Version: BackendVersion, RoomId: request.Room.RoomId, Properties: (*json.RawMessage)(&testRoomProperties), }, } switch request.Room.RoomId { case "test-room-with-sessiondata": data := map[string]string{ "userid": "userid-from-sessiondata", } tmp, err := json.Marshal(data) if err != nil { t.Fatalf("Could not marshal %+v: %s", data, err) } response.Room.Session = (*json.RawMessage)(&tmp) case "test-room-initial-permissions": permissions := []Permission{PERMISSION_MAY_PUBLISH_AUDIO} response.Room.Permissions = &permissions } return response } var ( sessionRequestHander struct { sync.Mutex handlers map[*testing.T]func(*BackendClientSessionRequest) } ) func setSessionRequestHandler(t *testing.T, f func(*BackendClientSessionRequest)) { sessionRequestHander.Lock() defer sessionRequestHander.Unlock() if sessionRequestHander.handlers == nil { sessionRequestHander.handlers = make(map[*testing.T]func(*BackendClientSessionRequest)) } if _, found := sessionRequestHander.handlers[t]; !found { t.Cleanup(func() { sessionRequestHander.Lock() defer sessionRequestHander.Unlock() delete(sessionRequestHander.handlers, t) }) } sessionRequestHander.handlers[t] = f } func clearSessionRequestHandler(t *testing.T) { // nolint sessionRequestHander.Lock() defer sessionRequestHander.Unlock() delete(sessionRequestHander.handlers, t) } func processSessionRequest(t *testing.T, w http.ResponseWriter, r *http.Request, request *BackendClientRequest) *BackendClientResponse { if request.Type != "session" || request.Session == nil { t.Fatalf("Expected an session backend request, got %+v", request) } sessionRequestHander.Lock() defer sessionRequestHander.Unlock() if f, found := sessionRequestHander.handlers[t]; found { f(request.Session) } response := &BackendClientResponse{ Type: "session", Session: &BackendClientSessionResponse{ Version: BackendVersion, RoomId: request.Session.RoomId, }, } return response } var pingRequests map[*testing.T][]*BackendClientRequest func getPingRequests(t *testing.T) []*BackendClientRequest { return pingRequests[t] } func clearPingRequests(t *testing.T) { delete(pingRequests, t) } func storePingRequest(t *testing.T, request *BackendClientRequest) { if entries, found := pingRequests[t]; !found { if pingRequests == nil { pingRequests = make(map[*testing.T][]*BackendClientRequest) } pingRequests[t] = []*BackendClientRequest{ request, } t.Cleanup(func() { clearPingRequests(t) }) } else { pingRequests[t] = append(entries, request) } } func processPingRequest(t *testing.T, w http.ResponseWriter, r *http.Request, request *BackendClientRequest) *BackendClientResponse { if request.Type != "ping" || request.Ping == nil { t.Fatalf("Expected an ping backend request, got %+v", request) } if request.Ping.RoomId == "test-room-with-sessiondata" { if entries := request.Ping.Entries; len(entries) != 1 { t.Errorf("Expected one entry, got %+v", entries) } else { if entries[0].UserId != "" { t.Errorf("Expected empty userid, got %+v", entries[0]) } } } storePingRequest(t, request) response := &BackendClientResponse{ Type: "ping", Ping: &BackendClientRingResponse{ Version: BackendVersion, RoomId: request.Ping.RoomId, }, } return response } func ensureAuthTokens(t *testing.T) (string, string) { if privateKey := os.Getenv("PRIVATE_AUTH_TOKEN_" + t.Name()); privateKey != "" { publicKey := os.Getenv("PUBLIC_AUTH_TOKEN_" + t.Name()) if publicKey == "" { // should not happen, always both keys are created t.Fatal("public key is empty") } return privateKey, publicKey } var private []byte var public []byte if strings.Contains(t.Name(), "ECDSA") { key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { t.Fatal(err) } private, err = x509.MarshalECPrivateKey(key) if err != nil { t.Fatal(err) } private = pem.EncodeToMemory(&pem.Block{ Type: "ECDSA PRIVATE KEY", Bytes: private, }) public, err = x509.MarshalPKIXPublicKey(&key.PublicKey) if err != nil { t.Fatal(err) } public = pem.EncodeToMemory(&pem.Block{ Type: "ECDSA PUBLIC KEY", Bytes: public, }) } else if strings.Contains(t.Name(), "Ed25519") { publicKey, privateKey, err := ed25519.GenerateKey(rand.Reader) if err != nil { t.Fatal(err) } private, err = x509.MarshalPKCS8PrivateKey(privateKey) if err != nil { t.Fatal(err) } private = pem.EncodeToMemory(&pem.Block{ Type: "Ed25519 PRIVATE KEY", Bytes: private, }) public, err = x509.MarshalPKIXPublicKey(publicKey) if err != nil { t.Fatal(err) } public = pem.EncodeToMemory(&pem.Block{ Type: "Ed25519 PUBLIC KEY", Bytes: public, }) } else { key, err := rsa.GenerateKey(rand.Reader, 1024) if err != nil { t.Fatal(err) } private = pem.EncodeToMemory(&pem.Block{ Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key), }) public, err = x509.MarshalPKIXPublicKey(&key.PublicKey) if err != nil { t.Fatal(err) } public = pem.EncodeToMemory(&pem.Block{ Type: "RSA PUBLIC KEY", Bytes: public, }) } privateKey := base64.StdEncoding.EncodeToString(private) t.Setenv("PRIVATE_AUTH_TOKEN_"+t.Name(), privateKey) publicKey := base64.StdEncoding.EncodeToString(public) t.Setenv("PUBLIC_AUTH_TOKEN_"+t.Name(), publicKey) return privateKey, publicKey } func getPrivateAuthToken(t *testing.T) (key interface{}) { private, _ := ensureAuthTokens(t) data, err := base64.StdEncoding.DecodeString(private) if err != nil { t.Fatal(err) } if strings.Contains(t.Name(), "ECDSA") { key, err = jwt.ParseECPrivateKeyFromPEM(data) } else if strings.Contains(t.Name(), "Ed25519") { key, err = jwt.ParseEdPrivateKeyFromPEM(data) } else { key, err = jwt.ParseRSAPrivateKeyFromPEM(data) } if err != nil { t.Fatal(err) } return key } func getPublicAuthToken(t *testing.T) (key interface{}) { _, public := ensureAuthTokens(t) data, err := base64.StdEncoding.DecodeString(public) if err != nil { t.Fatal(err) } if strings.Contains(t.Name(), "ECDSA") { key, err = jwt.ParseECPublicKeyFromPEM(data) } else if strings.Contains(t.Name(), "Ed25519") { key, err = jwt.ParseEdPublicKeyFromPEM(data) } else { key, err = jwt.ParseRSAPublicKeyFromPEM(data) } if err != nil { t.Fatal(err) } return key } func registerBackendHandler(t *testing.T, router *mux.Router) { registerBackendHandlerUrl(t, router, "/") } func registerBackendHandlerUrl(t *testing.T, router *mux.Router, url string) { handleFunc := validateBackendChecksum(t, func(w http.ResponseWriter, r *http.Request, request *BackendClientRequest) *BackendClientResponse { switch request.Type { case "auth": return processAuthRequest(t, w, r, request) case "room": return processRoomRequest(t, w, r, request) case "session": return processSessionRequest(t, w, r, request) case "ping": return processPingRequest(t, w, r, request) default: t.Fatalf("Unsupported request received: %+v", request) return nil } }) router.HandleFunc(url, handleFunc) if !strings.HasSuffix(url, "/") { url += "/" } handleCapabilitiesFunc := func(w http.ResponseWriter, r *http.Request) { features := []string{ "foo", "bar", } if strings.Contains(t.Name(), "V3Api") { features = append(features, "signaling-v3") } signaling := map[string]interface{}{ "foo": "bar", "baz": 42, } config := map[string]interface{}{ "signaling": signaling, } if strings.Contains(t.Name(), "MultiRoom") { signaling[ConfigKeySessionPingLimit] = 2 } useV2 := true if os.Getenv("SKIP_V2_CAPABILITIES") != "" { useV2 = false } if strings.Contains(t.Name(), "V2") && useV2 { key := getPublicAuthToken(t) public, err := x509.MarshalPKIXPublicKey(key) if err != nil { t.Fatal(err) } var pemType string if strings.Contains(t.Name(), "ECDSA") { pemType = "ECDSA PUBLIC KEY" } else if strings.Contains(t.Name(), "Ed25519") { pemType = "Ed25519 PUBLIC KEY" } else { pemType = "RSA PUBLIC KEY" } public = pem.EncodeToMemory(&pem.Block{ Type: pemType, Bytes: public, }) if strings.Contains(t.Name(), "Ed25519_Nextcloud") { // Simulate Nextcloud which returns the Ed25519 key as base64-encoded data. encoded := base64.StdEncoding.EncodeToString(key.(ed25519.PublicKey)) signaling[ConfigKeyHelloV2TokenKey] = encoded } else { signaling[ConfigKeyHelloV2TokenKey] = string(public) } } spreedCapa, _ := json.Marshal(map[string]interface{}{ "features": features, "config": config, }) response := &CapabilitiesResponse{ Version: CapabilitiesVersion{ Major: 20, }, Capabilities: map[string]*json.RawMessage{ "spreed": (*json.RawMessage)(&spreedCapa), }, } data, err := json.Marshal(response) if err != nil { t.Errorf("Could not marshal %+v: %s", response, err) } var ocs OcsResponse ocs.Ocs = &OcsBody{ Meta: OcsMeta{ Status: "ok", StatusCode: http.StatusOK, Message: http.StatusText(http.StatusOK), }, Data: (*json.RawMessage)(&data), } if data, err = json.Marshal(ocs); err != nil { t.Fatal(err) } w.Header().Add("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(data) // nolint } router.HandleFunc(url+"ocs/v2.php/cloud/capabilities", handleCapabilitiesFunc) if strings.Contains(t.Name(), "V3Api") { router.HandleFunc(url+"ocs/v2.php/apps/spreed/api/v3/signaling/backend", handleFunc) } else { router.HandleFunc(url+"ocs/v2.php/apps/spreed/api/v1/signaling/backend", handleFunc) } } func performHousekeeping(hub *Hub, now time.Time) *sync.WaitGroup { var wg sync.WaitGroup wg.Add(1) go func() { hub.performHousekeeping(now) wg.Done() }() return &wg } func TestInitialWelcome(t *testing.T) { hub, _, _, server := CreateHubForTest(t) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() client := NewTestClientContext(ctx, t, server, hub) defer client.CloseWithBye() msg, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } if msg.Type != "welcome" { t.Errorf("Expected \"welcome\" message, got %+v", msg) } else if msg.Welcome.Version == "" { t.Errorf("Expected welcome version, got %+v", msg) } else if len(msg.Welcome.Features) == 0 { t.Errorf("Expected welcome features, got %+v", msg) } } func TestExpectClientHello(t *testing.T) { hub, _, _, server := CreateHubForTest(t) // The server will send an error and close the connection if no "Hello" // is sent. client := NewTestClient(t, server, hub) defer client.CloseWithBye() // Perform housekeeping in the future, this will cause the connection to // be terminated due to the missing "Hello" request. performHousekeeping(hub, time.Now().Add(initialHelloTimeout+time.Second)) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() message, err := client.RunUntilMessage(ctx) if err := checkUnexpectedClose(err); err != nil { t.Fatal(err) } message2, err := client.RunUntilMessage(ctx) if message2 != nil { t.Fatalf("Received multiple messages, already have %+v, also got %+v", message, message2) } if err := checkUnexpectedClose(err); err != nil { t.Fatal(err) } if err := checkMessageType(message, "bye"); err != nil { t.Error(err) } else if message.Bye.Reason != "hello_timeout" { t.Errorf("Expected \"hello_timeout\" reason, got %+v", message.Bye) } } func TestExpectClientHelloUnsupportedVersion(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() params := TestBackendClientAuthParams{ UserId: testDefaultUserId, } if err := client.SendHelloParams(server.URL, "0.0", "", nil, params); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() message, err := client.RunUntilMessage(ctx) if err := checkUnexpectedClose(err); err != nil { t.Fatal(err) } if err := checkMessageType(message, "error"); err != nil { t.Error(err) } else if message.Error.Code != "invalid_hello_version" { t.Errorf("Expected \"invalid_hello_version\" reason, got %+v", message.Error) } } func TestClientHelloV1(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() if hello, err := client.RunUntilHello(ctx); err != nil { t.Error(err) } else { if hello.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } } } func TestClientHelloV2(t *testing.T) { for _, algo := range testHelloV2Algorithms { t.Run(algo, func(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHelloV2(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if hello.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } data := hub.decodeSessionId(hello.Hello.SessionId, publicSessionName) if data == nil { t.Fatalf("Could not decode session id: %s", hello.Hello.SessionId) } hub.mu.RLock() session := hub.sessions[data.Sid] hub.mu.RUnlock() if session == nil { t.Fatalf("Could not get session for id %+v", data) } var userdata map[string]string if err := json.Unmarshal(*session.UserData(), &userdata); err != nil { t.Fatal(err) } if expected := "Displayname " + testDefaultUserId; userdata["displayname"] != expected { t.Errorf("Expected displayname %s, got %s", expected, userdata["displayname"]) } }) } } func TestClientHelloV2_IssuedInFuture(t *testing.T) { for _, algo := range testHelloV2Algorithms { t.Run(algo, func(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() issuedAt := time.Now().Add(time.Minute) expiresAt := issuedAt.Add(time.Second) if err := client.SendHelloV2WithTimes(testDefaultUserId, issuedAt, expiresAt); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() message, err := client.RunUntilMessage(ctx) if err := checkUnexpectedClose(err); err != nil { t.Fatal(err) } if err := checkMessageType(message, "error"); err != nil { t.Error(err) } else if message.Error.Code != "token_not_valid_yet" { t.Errorf("Expected \"token_not_valid_yet\" reason, got %+v", message.Error) } }) } } func TestClientHelloV2_Expired(t *testing.T) { for _, algo := range testHelloV2Algorithms { t.Run(algo, func(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() issuedAt := time.Now().Add(-time.Minute) if err := client.SendHelloV2WithTimes(testDefaultUserId, issuedAt, issuedAt.Add(time.Second)); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() message, err := client.RunUntilMessage(ctx) if err := checkUnexpectedClose(err); err != nil { t.Fatal(err) } if err := checkMessageType(message, "error"); err != nil { t.Error(err) } else if message.Error.Code != "token_expired" { t.Errorf("Expected \"token_expired\" reason, got %+v", message.Error) } }) } } func TestClientHelloV2_IssuedAtMissing(t *testing.T) { for _, algo := range testHelloV2Algorithms { t.Run(algo, func(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() var issuedAt time.Time expiresAt := time.Now().Add(time.Minute) if err := client.SendHelloV2WithTimes(testDefaultUserId, issuedAt, expiresAt); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() message, err := client.RunUntilMessage(ctx) if err := checkUnexpectedClose(err); err != nil { t.Fatal(err) } if err := checkMessageType(message, "error"); err != nil { t.Error(err) } else if message.Error.Code != "token_not_valid_yet" { t.Errorf("Expected \"token_not_valid_yet\" reason, got %+v", message.Error) } }) } } func TestClientHelloV2_ExpiresAtMissing(t *testing.T) { for _, algo := range testHelloV2Algorithms { t.Run(algo, func(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() issuedAt := time.Now().Add(-time.Minute) var expiresAt time.Time if err := client.SendHelloV2WithTimes(testDefaultUserId, issuedAt, expiresAt); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() message, err := client.RunUntilMessage(ctx) if err := checkUnexpectedClose(err); err != nil { t.Fatal(err) } if err := checkMessageType(message, "error"); err != nil { t.Error(err) } else if message.Error.Code != "token_expired" { t.Errorf("Expected \"token_expired\" reason, got %+v", message.Error) } }) } } func TestClientHelloV2_CachedCapabilities(t *testing.T) { for _, algo := range testHelloV2Algorithms { t.Run(algo, func(t *testing.T) { hub, _, _, server := CreateHubForTest(t) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() // Simulate old-style Nextcloud without capabilities for Hello V2. t.Setenv("SKIP_V2_CAPABILITIES", "1") client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHelloV1(testDefaultUserId + "1"); err != nil { t.Fatal(err) } hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if hello1.Hello.UserId != testDefaultUserId+"1" { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId+"1", hello1.Hello) } if hello1.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello1.Hello) } // Simulate updated Nextcloud with capabilities for Hello V2. t.Setenv("SKIP_V2_CAPABILITIES", "") client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHelloV2(testDefaultUserId + "2"); err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if hello2.Hello.UserId != testDefaultUserId+"2" { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId+"2", hello2.Hello) } if hello2.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello2.Hello) } }) } } func TestClientHelloWithSpaces(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() userId := "test user with spaces" if err := client.SendHello(userId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() if hello, err := client.RunUntilHello(ctx); err != nil { t.Error(err) } else { if hello.Hello.UserId != userId { t.Errorf("Expected \"%s\", got %+v", userId, hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } } } func TestClientHelloAllowAll(t *testing.T) { hub, _, _, server := CreateHubForTestWithConfig(t, func(server *httptest.Server) (*goconf.ConfigFile, error) { config, err := getTestConfig(server) if err != nil { return nil, err } config.RemoveOption("backend", "allowed") config.AddOption("backend", "allowall", "true") return config, nil }) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() if hello, err := client.RunUntilHello(ctx); err != nil { t.Error(err) } else { if hello.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } } } func TestClientHelloSessionLimit(t *testing.T) { for _, subtest := range clusteredTests { t.Run(subtest, func(t *testing.T) { var hub1 *Hub var hub2 *Hub var server1 *httptest.Server var server2 *httptest.Server if isLocalTest(t) { var router1 *mux.Router hub1, _, router1, server1 = CreateHubForTestWithConfig(t, func(server *httptest.Server) (*goconf.ConfigFile, error) { config, err := getTestConfig(server) if err != nil { return nil, err } config.RemoveOption("backend", "allowed") config.RemoveOption("backend", "secret") config.AddOption("backend", "backends", "backend1, backend2") config.AddOption("backend1", "url", server.URL+"/one") config.AddOption("backend1", "secret", string(testBackendSecret)) config.AddOption("backend1", "sessionlimit", "1") config.AddOption("backend2", "url", server.URL+"/two") config.AddOption("backend2", "secret", string(testBackendSecret)) return config, nil }) registerBackendHandlerUrl(t, router1, "/one") registerBackendHandlerUrl(t, router1, "/two") hub2 = hub1 server2 = server1 } else { var router1 *mux.Router var router2 *mux.Router hub1, hub2, router1, router2, server1, server2 = CreateClusteredHubsForTestWithConfig(t, func(server *httptest.Server) (*goconf.ConfigFile, error) { // Make sure all backends use the same server if server1 == nil { server1 = server } else { server = server1 } config, err := getTestConfig(server) if err != nil { return nil, err } config.RemoveOption("backend", "allowed") config.RemoveOption("backend", "secret") config.AddOption("backend", "backends", "backend1, backend2") config.AddOption("backend1", "url", server.URL+"/one") config.AddOption("backend1", "secret", string(testBackendSecret)) config.AddOption("backend1", "sessionlimit", "1") config.AddOption("backend2", "url", server.URL+"/two") config.AddOption("backend2", "secret", string(testBackendSecret)) return config, nil }) registerBackendHandlerUrl(t, router1, "/one") registerBackendHandlerUrl(t, router1, "/two") registerBackendHandlerUrl(t, router2, "/one") registerBackendHandlerUrl(t, router2, "/two") } client := NewTestClient(t, server1, hub1) defer client.CloseWithBye() params1 := TestBackendClientAuthParams{ UserId: testDefaultUserId, } if err := client.SendHelloParams(server1.URL+"/one", HelloVersionV1, "client", nil, params1); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() if hello, err := client.RunUntilHello(ctx); err != nil { t.Error(err) } else { if hello.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } } // The second client can't connect as it would exceed the session limit. client2 := NewTestClient(t, server2, hub2) defer client2.CloseWithBye() params2 := TestBackendClientAuthParams{ UserId: testDefaultUserId + "2", } if err := client2.SendHelloParams(server1.URL+"/one", HelloVersionV1, "client", nil, params2); err != nil { t.Fatal(err) } msg, err := client2.RunUntilMessage(ctx) if err != nil { t.Error(err) } else { if msg.Type != "error" || msg.Error == nil { t.Errorf("Expected error message, got %+v", msg) } else if msg.Error.Code != "session_limit_exceeded" { t.Errorf("Expected error \"session_limit_exceeded\", got %+v", msg.Error.Code) } } // The client can connect to a different backend. if err := client2.SendHelloParams(server1.URL+"/two", HelloVersionV1, "client", nil, params2); err != nil { t.Fatal(err) } if hello, err := client2.RunUntilHello(ctx); err != nil { t.Error(err) } else { if hello.Hello.UserId != testDefaultUserId+"2" { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId+"2", hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } } // If the first client disconnects (and releases the session), a new one can connect. client.CloseWithBye() if err := client.WaitForClientRemoved(ctx); err != nil { t.Error(err) } client3 := NewTestClient(t, server2, hub2) defer client3.CloseWithBye() params3 := TestBackendClientAuthParams{ UserId: testDefaultUserId + "3", } if err := client3.SendHelloParams(server1.URL+"/one", HelloVersionV1, "client", nil, params3); err != nil { t.Fatal(err) } if hello, err := client3.RunUntilHello(ctx); err != nil { t.Error(err) } else { if hello.Hello.UserId != testDefaultUserId+"3" { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId+"3", hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } } }) } } func TestSessionIdsUnordered(t *testing.T) { hub, _, _, server := CreateHubForTest(t) publicSessionIds := make([]string, 0) for i := 0; i < 20; i++ { client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() if hello, err := client.RunUntilHello(ctx); err != nil { t.Error(err) } else { if hello.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello.Hello) break } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) break } data := hub.decodeSessionId(hello.Hello.SessionId, publicSessionName) if data == nil { t.Errorf("Could not decode session id: %s", hello.Hello.SessionId) break } hub.mu.RLock() session := hub.sessions[data.Sid] hub.mu.RUnlock() if session == nil { t.Errorf("Could not get session for id %+v", data) break } publicSessionIds = append(publicSessionIds, session.PublicId()) } } if len(publicSessionIds) == 0 { t.Fatal("no session ids decoded") } larger := 0 smaller := 0 prevSid := "" for i, sid := range publicSessionIds { if i > 0 { if sid > prevSid { larger++ } else if sid < prevSid { smaller-- } else { t.Error("should not have received the same session id twice") } } prevSid = sid } // Public session ids should not be ordered. if len(publicSessionIds) == larger { t.Error("the session ids are all larger than the previous ones") } else if len(publicSessionIds) == smaller { t.Error("the session ids are all smaller than the previous ones") } } func TestClientHelloResume(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Error(err) } else { if hello.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } if hello.Hello.ResumeId == "" { t.Errorf("Expected resume id, got %+v", hello.Hello) } } client.Close() if err := client.WaitForClientRemoved(ctx); err != nil { t.Error(err) } client = NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHelloResume(hello.Hello.ResumeId); err != nil { t.Fatal(err) } hello2, err := client.RunUntilHello(ctx) if err != nil { t.Error(err) } else { if hello2.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello2.Hello) } if hello2.Hello.SessionId != hello.Hello.SessionId { t.Errorf("Expected session id %s, got %+v", hello.Hello.SessionId, hello2.Hello) } if hello2.Hello.ResumeId != hello.Hello.ResumeId { t.Errorf("Expected resume id %s, got %+v", hello.Hello.ResumeId, hello2.Hello) } } } func TestClientHelloResumeExpired(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Error(err) } else { if hello.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } if hello.Hello.ResumeId == "" { t.Errorf("Expected resume id, got %+v", hello.Hello) } } client.Close() if err := client.WaitForClientRemoved(ctx); err != nil { t.Error(err) } // Perform housekeeping in the future, this will cause the session to be // cleaned up after it is expired. performHousekeeping(hub, time.Now().Add(sessionExpireDuration+time.Second)).Wait() client = NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHelloResume(hello.Hello.ResumeId); err != nil { t.Fatal(err) } msg, err := client.RunUntilMessage(ctx) if err != nil { t.Error(err) } else { if msg.Type != "error" || msg.Error == nil { t.Errorf("Expected error message, got %+v", msg) } else if msg.Error.Code != "no_such_session" { t.Errorf("Expected error \"no_such_session\", got %+v", msg.Error.Code) } } } func TestClientHelloResumeTakeover(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client1.RunUntilHello(ctx) if err != nil { t.Error(err) } else { if hello.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } if hello.Hello.ResumeId == "" { t.Errorf("Expected resume id, got %+v", hello.Hello) } } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHelloResume(hello.Hello.ResumeId); err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Error(err) } else { if hello2.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello2.Hello) } if hello2.Hello.SessionId != hello.Hello.SessionId { t.Errorf("Expected session id %s, got %+v", hello.Hello.SessionId, hello2.Hello) } if hello2.Hello.ResumeId != hello.Hello.ResumeId { t.Errorf("Expected resume id %s, got %+v", hello.Hello.ResumeId, hello2.Hello) } } // The first client got disconnected with a reason in a "Bye" message. msg, err := client1.RunUntilMessage(ctx) if err != nil { t.Error(err) } else { if msg.Type != "bye" || msg.Bye == nil { t.Errorf("Expected bye message, got %+v", msg) } else if msg.Bye.Reason != "session_resumed" { t.Errorf("Expected reason \"session_resumed\", got %+v", msg.Bye.Reason) } } if msg, err := client1.RunUntilMessage(ctx); err == nil { t.Errorf("Expected error but received %+v", msg) } else if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseNoStatusReceived) { t.Errorf("Expected close error but received %+v", err) } } func TestClientHelloResumeOtherHub(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Error(err) } else { if hello.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } if hello.Hello.ResumeId == "" { t.Errorf("Expected resume id, got %+v", hello.Hello) } } client.Close() if err := client.WaitForClientRemoved(ctx); err != nil { t.Error(err) } // Simulate a restart of the hub. hub.sid.Store(0) sessions := make([]Session, 0) hub.mu.Lock() for _, session := range hub.sessions { sessions = append(sessions, session) } hub.mu.Unlock() for _, session := range sessions { session.Close() } hub.mu.Lock() count := len(hub.sessions) hub.mu.Unlock() if count > 0 { t.Errorf("Should have removed all sessions (still has %d)", count) } // The new client will get the same (internal) sid for his session. newClient := NewTestClient(t, server, hub) defer newClient.CloseWithBye() if err := newClient.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } if hello, err := newClient.RunUntilHello(ctx); err != nil { t.Error(err) } else { if hello.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } if hello.Hello.ResumeId == "" { t.Errorf("Expected resume id, got %+v", hello.Hello) } } // The previous session (which had the same internal sid) can't be resumed. client = NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHelloResume(hello.Hello.ResumeId); err != nil { t.Fatal(err) } msg, err := client.RunUntilMessage(ctx) if err != nil { t.Error(err) } else { if msg.Type != "error" || msg.Error == nil { t.Errorf("Expected error message, got %+v", msg) } else if msg.Error.Code != "no_such_session" { t.Errorf("Expected error \"no_such_session\", got %+v", msg.Error.Code) } } // Expire old sessions hub.performHousekeeping(time.Now().Add(2 * sessionExpireDuration)) } func TestClientHelloResumePublicId(t *testing.T) { // Test that a client can't resume a "public" session of another user. hub, _, _, server := CreateHubForTest(t) client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if hello1.Hello.SessionId == hello2.Hello.SessionId { t.Fatalf("Expected different session ids, got %s twice", hello1.Hello.SessionId) } recipient2 := MessageClientMessageRecipient{ Type: "session", SessionId: hello2.Hello.SessionId, } data := "from-1-to-2" client1.SendMessage(recipient2, data) // nolint var payload string var sender *MessageServerMessageSender if err := checkReceiveClientMessageWithSender(ctx, client2, "session", hello1.Hello, &payload, &sender); err != nil { t.Error(err) } else if payload != data { t.Errorf("Expected payload %s, got %s", data, payload) } client1.Close() if err := client1.WaitForClientRemoved(ctx); err != nil { t.Error(err) } client1 = NewTestClient(t, server, hub) defer client1.CloseWithBye() // Can't resume a session with the id received from messages of a client. if err := client1.SendHelloResume(sender.SessionId); err != nil { t.Fatal(err) } msg, err := client1.RunUntilMessage(ctx) if err != nil { t.Error(err) } else { if msg.Type != "error" || msg.Error == nil { t.Errorf("Expected error message, got %+v", msg) } else if msg.Error.Code != "no_such_session" { t.Errorf("Expected error \"no_such_session\", got %+v", msg.Error.Code) } } // Expire old sessions hub.performHousekeeping(time.Now().Add(2 * sessionExpireDuration)) } func TestClientHelloByeResume(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Error(err) } else { if hello.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } if hello.Hello.ResumeId == "" { t.Errorf("Expected resume id, got %+v", hello.Hello) } } if err := client.SendBye(); err != nil { t.Fatal(err) } if message, err := client.RunUntilMessage(ctx); err != nil { t.Error(err) } else { if err := checkMessageType(message, "bye"); err != nil { t.Error(err) } } client.Close() if err := client.WaitForSessionRemoved(ctx, hello.Hello.SessionId); err != nil { t.Error(err) } if err := client.WaitForClientRemoved(ctx); err != nil { t.Error(err) } client = NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHelloResume(hello.Hello.ResumeId); err != nil { t.Fatal(err) } msg, err := client.RunUntilMessage(ctx) if err != nil { t.Error(err) } else { if msg.Type != "error" || msg.Error == nil { t.Errorf("Expected \"error\", got %+v", *msg) } else if msg.Error.Code != "no_such_session" { t.Errorf("Expected error \"no_such_session\", got %+v", *msg) } } } func TestClientHelloResumeAndJoin(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Error(err) } else { if hello.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } if hello.Hello.ResumeId == "" { t.Errorf("Expected resume id, got %+v", hello.Hello) } } client.Close() if err := client.WaitForClientRemoved(ctx); err != nil { t.Error(err) } client = NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHelloResume(hello.Hello.ResumeId); err != nil { t.Fatal(err) } hello2, err := client.RunUntilHello(ctx) if err != nil { t.Error(err) } else { if hello2.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello2.Hello) } if hello2.Hello.SessionId != hello.Hello.SessionId { t.Errorf("Expected session id %s, got %+v", hello.Hello.SessionId, hello2.Hello) } if hello2.Hello.ResumeId != hello.Hello.ResumeId { t.Errorf("Expected resume id %s, got %+v", hello.Hello.ResumeId, hello2.Hello) } } // Join room by id. roomId := "test-room" if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } } func TestClientHelloClient(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHelloClient(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() if hello, err := client.RunUntilHello(ctx); err != nil { t.Error(err) } else { if hello.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } if hello.Hello.ResumeId == "" { t.Errorf("Expected resume id, got %+v", hello.Hello) } } } func TestClientHelloClient_V3Api(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() params := TestBackendClientAuthParams{ UserId: testDefaultUserId, } // The "/api/v1/signaling/" URL will be changed to use "v3" as the "signaling-v3" // feature is returned by the capabilities endpoint. if err := client.SendHelloParams(server.URL+"/ocs/v2.php/apps/spreed/api/v1/signaling/backend", HelloVersionV1, "client", nil, params); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() if hello, err := client.RunUntilHello(ctx); err != nil { t.Error(err) } else { if hello.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } if hello.Hello.ResumeId == "" { t.Errorf("Expected resume id, got %+v", hello.Hello) } } } func TestClientHelloInternal(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHelloInternal(); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() if hello, err := client.RunUntilHello(ctx); err != nil { t.Error(err) } else { if hello.Hello.UserId != "" { t.Errorf("Expected empty user id, got %+v", hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } if hello.Hello.ResumeId == "" { t.Errorf("Expected resume id, got %+v", hello.Hello) } } } func TestClientMessageToSessionId(t *testing.T) { for _, subtest := range clusteredTests { t.Run(subtest, func(t *testing.T) { var hub1 *Hub var hub2 *Hub var server1 *httptest.Server var server2 *httptest.Server if isLocalTest(t) { hub1, _, _, server1 = CreateHubForTest(t) hub2 = hub1 server2 = server1 } else { hub1, hub2, server1, server2 = CreateClusteredHubsForTest(t) } client1 := NewTestClient(t, server1, hub1) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server2, hub2) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if hello1.Hello.SessionId == hello2.Hello.SessionId { t.Fatalf("Expected different session ids, got %s twice", hello1.Hello.SessionId) } recipient1 := MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, } recipient2 := MessageClientMessageRecipient{ Type: "session", SessionId: hello2.Hello.SessionId, } data1 := "from-1-to-2" client1.SendMessage(recipient2, data1) // nolint data2 := "from-2-to-1" client2.SendMessage(recipient1, data2) // nolint var payload string if err := checkReceiveClientMessage(ctx, client1, "session", hello2.Hello, &payload); err != nil { t.Error(err) } else if payload != data2 { t.Errorf("Expected payload %s, got %s", data2, payload) } if err := checkReceiveClientMessage(ctx, client2, "session", hello1.Hello, &payload); err != nil { t.Error(err) } else if payload != data1 { t.Errorf("Expected payload %s, got %s", data1, payload) } }) } } func TestClientControlToSessionId(t *testing.T) { for _, subtest := range clusteredTests { t.Run(subtest, func(t *testing.T) { var hub1 *Hub var hub2 *Hub var server1 *httptest.Server var server2 *httptest.Server if isLocalTest(t) { hub1, _, _, server1 = CreateHubForTest(t) hub2 = hub1 server2 = server1 } else { hub1, hub2, server1, server2 = CreateClusteredHubsForTest(t) } client1 := NewTestClient(t, server1, hub1) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server2, hub2) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if hello1.Hello.SessionId == hello2.Hello.SessionId { t.Fatalf("Expected different session ids, got %s twice", hello1.Hello.SessionId) } recipient1 := MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, } recipient2 := MessageClientMessageRecipient{ Type: "session", SessionId: hello2.Hello.SessionId, } data1 := "from-1-to-2" client1.SendControl(recipient2, data1) // nolint data2 := "from-2-to-1" client2.SendControl(recipient1, data2) // nolint var payload string if err := checkReceiveClientControl(ctx, client1, "session", hello2.Hello, &payload); err != nil { t.Error(err) } else if payload != data2 { t.Errorf("Expected payload %s, got %s", data2, payload) } if err := checkReceiveClientControl(ctx, client2, "session", hello1.Hello, &payload); err != nil { t.Error(err) } else if payload != data1 { t.Errorf("Expected payload %s, got %s", data1, payload) } }) } } func TestClientControlMissingPermissions(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if hello1.Hello.SessionId == hello2.Hello.SessionId { t.Fatalf("Expected different session ids, got %s twice", hello1.Hello.SessionId) } session1 := hub.GetSessionByPublicId(hello1.Hello.SessionId).(*ClientSession) if session1 == nil { t.Fatalf("Session %s does not exist", hello1.Hello.SessionId) } session2 := hub.GetSessionByPublicId(hello2.Hello.SessionId).(*ClientSession) if session2 == nil { t.Fatalf("Session %s does not exist", hello2.Hello.SessionId) } // Client 1 may not send control messages (will be ignored). session1.SetPermissions([]Permission{ PERMISSION_MAY_PUBLISH_AUDIO, PERMISSION_MAY_PUBLISH_VIDEO, }) // Client 2 may send control messages. session2.SetPermissions([]Permission{ PERMISSION_MAY_PUBLISH_AUDIO, PERMISSION_MAY_PUBLISH_VIDEO, PERMISSION_MAY_CONTROL, }) recipient1 := MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, } recipient2 := MessageClientMessageRecipient{ Type: "session", SessionId: hello2.Hello.SessionId, } data1 := "from-1-to-2" client1.SendControl(recipient2, data1) // nolint data2 := "from-2-to-1" client2.SendControl(recipient1, data2) // nolint var payload string if err := checkReceiveClientControl(ctx, client1, "session", hello2.Hello, &payload); err != nil { t.Error(err) } else if payload != data2 { t.Errorf("Expected payload %s, got %s", data2, payload) } ctx2, cancel2 := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel2() if err := checkReceiveClientMessage(ctx2, client2, "session", hello1.Hello, &payload); err != nil { if err != ErrNoMessageReceived { t.Error(err) } } else { t.Errorf("Expected no payload, got %+v", payload) } } func TestClientMessageToUserId(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if hello1.Hello.SessionId == hello2.Hello.SessionId { t.Fatalf("Expected different session ids, got %s twice", hello1.Hello.SessionId) } else if hello1.Hello.UserId == hello2.Hello.UserId { t.Fatalf("Expected different user ids, got %s twice", hello1.Hello.UserId) } recipient1 := MessageClientMessageRecipient{ Type: "user", UserId: hello1.Hello.UserId, } recipient2 := MessageClientMessageRecipient{ Type: "user", UserId: hello2.Hello.UserId, } data1 := "from-1-to-2" client1.SendMessage(recipient2, data1) // nolint data2 := "from-2-to-1" client2.SendMessage(recipient1, data2) // nolint var payload string if err := checkReceiveClientMessage(ctx, client1, "user", hello2.Hello, &payload); err != nil { t.Error(err) } else if payload != data2 { t.Errorf("Expected payload %s, got %s", data2, payload) } if err := checkReceiveClientMessage(ctx, client2, "user", hello1.Hello, &payload); err != nil { t.Error(err) } else if payload != data1 { t.Errorf("Expected payload %s, got %s", data1, payload) } } func TestClientControlToUserId(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if hello1.Hello.SessionId == hello2.Hello.SessionId { t.Fatalf("Expected different session ids, got %s twice", hello1.Hello.SessionId) } else if hello1.Hello.UserId == hello2.Hello.UserId { t.Fatalf("Expected different user ids, got %s twice", hello1.Hello.UserId) } recipient1 := MessageClientMessageRecipient{ Type: "user", UserId: hello1.Hello.UserId, } recipient2 := MessageClientMessageRecipient{ Type: "user", UserId: hello2.Hello.UserId, } data1 := "from-1-to-2" client1.SendControl(recipient2, data1) // nolint data2 := "from-2-to-1" client2.SendControl(recipient1, data2) // nolint var payload string if err := checkReceiveClientControl(ctx, client1, "user", hello2.Hello, &payload); err != nil { t.Error(err) } else if payload != data2 { t.Errorf("Expected payload %s, got %s", data2, payload) } if err := checkReceiveClientControl(ctx, client2, "user", hello1.Hello, &payload); err != nil { t.Error(err) } else if payload != data1 { t.Errorf("Expected payload %s, got %s", data1, payload) } } func TestClientMessageToUserIdMultipleSessions(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2a := NewTestClient(t, server, hub) defer client2a.CloseWithBye() if err := client2a.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } client2b := NewTestClient(t, server, hub) defer client2b.CloseWithBye() if err := client2b.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2a, err := client2a.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2b, err := client2b.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if hello1.Hello.SessionId == hello2a.Hello.SessionId { t.Fatalf("Expected different session ids, got %s twice", hello1.Hello.SessionId) } else if hello1.Hello.SessionId == hello2b.Hello.SessionId { t.Fatalf("Expected different session ids, got %s twice", hello1.Hello.SessionId) } else if hello2a.Hello.SessionId == hello2b.Hello.SessionId { t.Fatalf("Expected different session ids, got %s twice", hello2a.Hello.SessionId) } if hello1.Hello.UserId == hello2a.Hello.UserId { t.Fatalf("Expected different user ids, got %s twice", hello1.Hello.UserId) } else if hello1.Hello.UserId == hello2b.Hello.UserId { t.Fatalf("Expected different user ids, got %s twice", hello1.Hello.UserId) } else if hello2a.Hello.UserId != hello2b.Hello.UserId { t.Fatalf("Expected the same user ids, got %s and %s", hello2a.Hello.UserId, hello2b.Hello.UserId) } recipient := MessageClientMessageRecipient{ Type: "user", UserId: hello2a.Hello.UserId, } data1 := "from-1-to-2" client1.SendMessage(recipient, data1) // nolint // Both clients will receive the message as it was sent to the user. var payload string if err := checkReceiveClientMessage(ctx, client2a, "user", hello1.Hello, &payload); err != nil { t.Error(err) } else if payload != data1 { t.Errorf("Expected payload %s, got %s", data1, payload) } if err := checkReceiveClientMessage(ctx, client2b, "user", hello1.Hello, &payload); err != nil { t.Error(err) } else if payload != data1 { t.Errorf("Expected payload %s, got %s", data1, payload) } } func WaitForUsersJoined(ctx context.Context, t *testing.T, client1 *TestClient, hello1 *ServerMessage, client2 *TestClient, hello2 *ServerMessage) { // We will receive "joined" events for all clients. The ordering is not // defined as messages are processed and sent by asynchronous event handlers. if err := client1.RunUntilJoined(ctx, hello1.Hello, hello2.Hello); err != nil { t.Error(err) } if err := client2.RunUntilJoined(ctx, hello1.Hello, hello2.Hello); err != nil { t.Error(err) } } func TestClientMessageToRoom(t *testing.T) { for _, subtest := range clusteredTests { t.Run(subtest, func(t *testing.T) { var hub1 *Hub var hub2 *Hub var server1 *httptest.Server var server2 *httptest.Server if isLocalTest(t) { hub1, _, _, server1 = CreateHubForTest(t) hub2 = hub1 server2 = server1 } else { hub1, hub2, server1, server2 = CreateClusteredHubsForTest(t) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() client1 := NewTestClient(t, server1, hub1) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } client2 := NewTestClient(t, server2, hub2) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if hello1.Hello.SessionId == hello2.Hello.SessionId { t.Fatalf("Expected different session ids, got %s twice", hello1.Hello.SessionId) } else if hello1.Hello.UserId == hello2.Hello.UserId { t.Fatalf("Expected different user ids, got %s twice", hello1.Hello.UserId) } // Join room by id. roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // Give message processing some time. time.Sleep(10 * time.Millisecond) if room, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } WaitForUsersJoined(ctx, t, client1, hello1, client2, hello2) recipient := MessageClientMessageRecipient{ Type: "room", } data1 := "from-1-to-2" client1.SendMessage(recipient, data1) // nolint data2 := "from-2-to-1" client2.SendMessage(recipient, data2) // nolint var payload string if err := checkReceiveClientMessage(ctx, client1, "room", hello2.Hello, &payload); err != nil { t.Error(err) } else if payload != data2 { t.Errorf("Expected payload %s, got %s", data2, payload) } if err := checkReceiveClientMessage(ctx, client2, "room", hello1.Hello, &payload); err != nil { t.Error(err) } else if payload != data1 { t.Errorf("Expected payload %s, got %s", data1, payload) } }) } } func TestClientControlToRoom(t *testing.T) { for _, subtest := range clusteredTests { t.Run(subtest, func(t *testing.T) { var hub1 *Hub var hub2 *Hub var server1 *httptest.Server var server2 *httptest.Server if isLocalTest(t) { hub1, _, _, server1 = CreateHubForTest(t) hub2 = hub1 server2 = server1 } else { hub1, hub2, server1, server2 = CreateClusteredHubsForTest(t) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() client1 := NewTestClient(t, server1, hub1) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } client2 := NewTestClient(t, server2, hub2) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if hello1.Hello.SessionId == hello2.Hello.SessionId { t.Fatalf("Expected different session ids, got %s twice", hello1.Hello.SessionId) } else if hello1.Hello.UserId == hello2.Hello.UserId { t.Fatalf("Expected different user ids, got %s twice", hello1.Hello.UserId) } // Join room by id. roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // Give message processing some time. time.Sleep(10 * time.Millisecond) if room, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } WaitForUsersJoined(ctx, t, client1, hello1, client2, hello2) recipient := MessageClientMessageRecipient{ Type: "room", } data1 := "from-1-to-2" client1.SendControl(recipient, data1) // nolint data2 := "from-2-to-1" client2.SendControl(recipient, data2) // nolint var payload string if err := checkReceiveClientControl(ctx, client1, "room", hello2.Hello, &payload); err != nil { t.Error(err) } else if payload != data2 { t.Errorf("Expected payload %s, got %s", data2, payload) } if err := checkReceiveClientControl(ctx, client2, "room", hello1.Hello, &payload); err != nil { t.Error(err) } else if payload != data1 { t.Errorf("Expected payload %s, got %s", data1, payload) } }) } } func TestJoinRoom(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event. if err := client.RunUntilJoined(ctx, hello.Hello); err != nil { t.Error(err) } // Leave room. if room, err := client.JoinRoom(ctx, ""); err != nil { t.Fatal(err) } else if room.Room.RoomId != "" { t.Fatalf("Expected empty room, got %s", room.Room.RoomId) } } func TestJoinRoomTwice(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } else if !bytes.Equal(testRoomProperties, *room.Room.Properties) { t.Fatalf("Expected room properties %s, got %s", string(testRoomProperties), string(*room.Room.Properties)) } // We will receive a "joined" event. if err := client.RunUntilJoined(ctx, hello.Hello); err != nil { t.Error(err) } msg := &ClientMessage{ Id: "ABCD", Type: "room", Room: &RoomClientMessage{ RoomId: roomId, SessionId: roomId + "-" + client.publicId + "-2", }, } if err := client.WriteJSON(msg); err != nil { t.Fatal(err) } message, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } if err := checkUnexpectedClose(err); err != nil { t.Fatal(err) } if msg.Id != message.Id { t.Errorf("expected message id %s, got %s", msg.Id, message.Id) } else if err := checkMessageType(message, "error"); err != nil { t.Fatal(err) } else if expected := "already_joined"; message.Error.Code != expected { t.Errorf("expected error %s, got %s", expected, message.Error.Code) } else if message.Error.Details == nil { t.Fatal("expected error details") } var roomMsg RoomErrorDetails if err := json.Unmarshal(message.Error.Details, &roomMsg); err != nil { t.Fatal(err) } else if roomMsg.Room == nil { t.Fatalf("expected room details, got %+v", message) } if roomMsg.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %+v", roomId, roomMsg.Room) } else if !bytes.Equal(testRoomProperties, *roomMsg.Room.Properties) { t.Fatalf("Expected room properties %s, got %s", string(testRoomProperties), string(*roomMsg.Room.Properties)) } } func TestExpectAnonymousJoinRoom(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(authAnonymousUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Error(err) } else { if hello.Hello.UserId != "" { t.Errorf("Expected an anonymous user, got %+v", hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } if hello.Hello.ResumeId == "" { t.Errorf("Expected resume id, got %+v", hello.Hello) } } // Perform housekeeping in the future, this will cause the connection to // be terminated because the anonymous client didn't join a room. performHousekeeping(hub, time.Now().Add(anonmyousJoinRoomTimeout+time.Second)) message, err := client.RunUntilMessage(ctx) if err != nil { t.Error(err) } if err := checkMessageType(message, "bye"); err != nil { t.Error(err) } else if message.Bye.Reason != "room_join_timeout" { t.Errorf("Expected \"room_join_timeout\" reason, got %+v", message.Bye) } // Both the client and the session get removed from the hub. if err := client.WaitForClientRemoved(ctx); err != nil { t.Error(err) } if err := client.WaitForSessionRemoved(ctx, hello.Hello.SessionId); err != nil { t.Error(err) } } func TestExpectAnonymousJoinRoomAfterLeave(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(authAnonymousUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Error(err) } else { if hello.Hello.UserId != "" { t.Errorf("Expected an anonymous user, got %+v", hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } if hello.Hello.ResumeId == "" { t.Errorf("Expected resume id, got %+v", hello.Hello) } } // Join room by id. roomId := "test-room" if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event. if err := client.RunUntilJoined(ctx, hello.Hello); err != nil { t.Error(err) } // Perform housekeeping in the future, this will keep the connection as the // session joined a room. performHousekeeping(hub, time.Now().Add(anonmyousJoinRoomTimeout+time.Second)) // No message about the closing is sent to the new connection. ctx2, cancel2 := context.WithTimeout(context.Background(), 200*time.Millisecond) defer cancel2() if message, err := client.RunUntilMessage(ctx2); err != nil && err != ErrNoMessageReceived && err != context.DeadlineExceeded { t.Error(err) } else if message != nil { t.Errorf("Expected no message, got %+v", message) } // Leave room if room, err := client.JoinRoom(ctx, ""); err != nil { t.Fatal(err) } else if room.Room.RoomId != "" { t.Fatalf("Expected room %s, got %s", "", room.Room.RoomId) } // Perform housekeeping in the future, this will cause the connection to // be terminated because the anonymous client didn't join a room. performHousekeeping(hub, time.Now().Add(anonmyousJoinRoomTimeout+time.Second)) message, err := client.RunUntilMessage(ctx) if err != nil { t.Error(err) } if err := checkMessageType(message, "bye"); err != nil { t.Error(err) } else if message.Bye.Reason != "room_join_timeout" { t.Errorf("Expected \"room_join_timeout\" reason, got %+v", message.Bye) } // Both the client and the session get removed from the hub. if err := client.WaitForClientRemoved(ctx); err != nil { t.Error(err) } if err := client.WaitForSessionRemoved(ctx, hello.Hello.SessionId); err != nil { t.Error(err) } } func TestJoinRoomChange(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event. if err := client.RunUntilJoined(ctx, hello.Hello); err != nil { t.Error(err) } // Change room. roomId = "other-test-room" if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event. if err := client.RunUntilJoined(ctx, hello.Hello); err != nil { t.Error(err) } // Leave room. if room, err := client.JoinRoom(ctx, ""); err != nil { t.Fatal(err) } else if room.Room.RoomId != "" { t.Fatalf("Expected empty room, got %s", room.Room.RoomId) } } func TestJoinMultiple(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if hello1.Hello.SessionId == hello2.Hello.SessionId { t.Fatalf("Expected different session ids, got %s twice", hello1.Hello.SessionId) } // Join room by id (first client). roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event. if err := client1.RunUntilJoined(ctx, hello1.Hello); err != nil { t.Error(err) } // Join room by id (second client). if room, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event for the first and the second client. if err := client2.RunUntilJoined(ctx, hello1.Hello, hello2.Hello); err != nil { t.Error(err) } // The first client will also receive a "joined" event from the second client. if err := client1.RunUntilJoined(ctx, hello2.Hello); err != nil { t.Error(err) } // Leave room. if room, err := client1.JoinRoom(ctx, ""); err != nil { t.Fatal(err) } else if room.Room.RoomId != "" { t.Fatalf("Expected empty room, got %s", room.Room.RoomId) } // The second client will now receive a "left" event if err := client2.RunUntilLeft(ctx, hello1.Hello); err != nil { t.Error(err) } if room, err := client2.JoinRoom(ctx, ""); err != nil { t.Fatal(err) } else if room.Room.RoomId != "" { t.Fatalf("Expected empty room, got %s", room.Room.RoomId) } } func TestJoinDisplaynamesPermission(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } session2 := hub.GetSessionByPublicId(hello2.Hello.SessionId).(*ClientSession) if session2 == nil { t.Fatalf("Session %s does not exist", hello2.Hello.SessionId) } // Client 2 may not receive display names. session2.SetPermissions([]Permission{PERMISSION_HIDE_DISPLAYNAMES}) // Join room by id (first client). roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event. if err := client1.RunUntilJoined(ctx, hello1.Hello); err != nil { t.Error(err) } // Join room by id (second client). if room, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event for the first and the second client. if events, unexpected, err := client2.RunUntilJoinedAndReturn(ctx, hello1.Hello, hello2.Hello); err != nil { t.Error(err) } else { if len(unexpected) > 0 { t.Errorf("Received unexpected messages: %+v", unexpected) } else if len(events) != 2 { t.Errorf("Expected two event, got %+v", events) } else if events[0].User != nil { t.Errorf("Expected empty userdata for first event, got %+v", events[0].User) } else if events[1].User != nil { t.Errorf("Expected empty userdata for second event, got %+v", events[1].User) } } // The first client will also receive a "joined" event from the second client. if events, unexpected, err := client1.RunUntilJoinedAndReturn(ctx, hello2.Hello); err != nil { t.Error(err) } else { if len(unexpected) > 0 { t.Errorf("Received unexpected messages: %+v", unexpected) } else if len(events) != 1 { t.Errorf("Expected one event, got %+v", events) } else if events[0].User == nil { t.Errorf("Expected userdata for first event, got nothing") } } } func TestInitialRoomPermissions(t *testing.T) { hub, _, _, server := CreateHubForTest(t) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } hello, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room-initial-permissions" if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } if err := client.RunUntilJoined(ctx, hello.Hello); err != nil { t.Error(err) } session := hub.GetSessionByPublicId(hello.Hello.SessionId).(*ClientSession) if session == nil { t.Fatalf("Session %s does not exist", hello.Hello.SessionId) } if !session.HasPermission(PERMISSION_MAY_PUBLISH_AUDIO) { t.Errorf("Session %s should have %s, got %+v", session.PublicId(), PERMISSION_MAY_PUBLISH_AUDIO, session.permissions) } if session.HasPermission(PERMISSION_MAY_PUBLISH_VIDEO) { t.Errorf("Session %s should not have %s, got %+v", session.PublicId(), PERMISSION_MAY_PUBLISH_VIDEO, session.permissions) } } func TestJoinRoomSwitchClient(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room-slow" msg := &ClientMessage{ Id: "ABCD", Type: "room", Room: &RoomClientMessage{ RoomId: roomId, SessionId: roomId + "-" + hello.Hello.SessionId, }, } if err := client.WriteJSON(msg); err != nil { t.Fatal(err) } // Wait a bit to make sure request is sent before closing client. time.Sleep(1 * time.Millisecond) client.Close() if err := client.WaitForClientRemoved(ctx); err != nil { t.Fatal(err) } // The client needs some time to reconnect. time.Sleep(200 * time.Millisecond) client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHelloResume(hello.Hello.ResumeId); err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Error(err) } else { if hello2.Hello.UserId != testDefaultUserId { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId, hello2.Hello) } if hello2.Hello.SessionId != hello.Hello.SessionId { t.Errorf("Expected session id %s, got %+v", hello.Hello.SessionId, hello2.Hello) } if hello2.Hello.ResumeId != hello.Hello.ResumeId { t.Errorf("Expected resume id %s, got %+v", hello.Hello.ResumeId, hello2.Hello) } } room, err := client2.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } if err := checkUnexpectedClose(err); err != nil { t.Fatal(err) } if err := checkMessageType(room, "room"); err != nil { t.Fatal(err) } if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event. if err := client2.RunUntilJoined(ctx, hello.Hello); err != nil { t.Error(err) } // Leave room. if room, err := client2.JoinRoom(ctx, ""); err != nil { t.Fatal(err) } else if room.Room.RoomId != "" { t.Fatalf("Expected empty room, got %s", room.Room.RoomId) } } func TestGetRealUserIP(t *testing.T) { REMOTE_ATTR := "192.168.1.2" request := &http.Request{ RemoteAddr: REMOTE_ATTR, } if ip := getRealUserIP(request); ip != REMOTE_ATTR { t.Errorf("Expected %s but got %s", REMOTE_ATTR, ip) } X_REAL_IP := "192.168.10.11" request.Header = http.Header{ http.CanonicalHeaderKey("x-real-ip"): []string{X_REAL_IP}, } if ip := getRealUserIP(request); ip != X_REAL_IP { t.Errorf("Expected %s but got %s", X_REAL_IP, ip) } // "X-Real-IP" has preference before "X-Forwarded-For" X_FORWARDED_FOR_IP := "192.168.20.21" X_FORWARDED_FOR := X_FORWARDED_FOR_IP + ", 192.168.30.32" request.Header = http.Header{ http.CanonicalHeaderKey("x-real-ip"): []string{X_REAL_IP}, http.CanonicalHeaderKey("x-forwarded-for"): []string{X_FORWARDED_FOR}, } if ip := getRealUserIP(request); ip != X_REAL_IP { t.Errorf("Expected %s but got %s", X_REAL_IP, ip) } request.Header = http.Header{ http.CanonicalHeaderKey("x-forwarded-for"): []string{X_FORWARDED_FOR}, } if ip := getRealUserIP(request); ip != X_FORWARDED_FOR_IP { t.Errorf("Expected %s but got %s", X_FORWARDED_FOR_IP, ip) } } func TestClientMessageToSessionIdWhileDisconnected(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if hello1.Hello.SessionId == hello2.Hello.SessionId { t.Fatalf("Expected different session ids, got %s twice", hello1.Hello.SessionId) } client2.Close() if err := client2.WaitForClientRemoved(ctx); err != nil { t.Error(err) } recipient2 := MessageClientMessageRecipient{ Type: "session", SessionId: hello2.Hello.SessionId, } // The two chat messages should get combined into one when receiving pending messages. chat_refresh := "{\"type\":\"chat\",\"chat\":{\"refresh\":true}}" var data1 map[string]interface{} if err := json.Unmarshal([]byte(chat_refresh), &data1); err != nil { t.Fatal(err) } client1.SendMessage(recipient2, data1) // nolint client1.SendMessage(recipient2, data1) // nolint // Simulate some time until client resumes the session. time.Sleep(10 * time.Millisecond) client2 = NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHelloResume(hello2.Hello.ResumeId); err != nil { t.Fatal(err) } hello3, err := client2.RunUntilHello(ctx) if err != nil { t.Error(err) } else { if hello3.Hello.UserId != testDefaultUserId+"2" { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId+"2", hello3.Hello) } if hello3.Hello.SessionId != hello2.Hello.SessionId { t.Errorf("Expected session id %s, got %+v", hello2.Hello.SessionId, hello3.Hello) } if hello3.Hello.ResumeId != hello2.Hello.ResumeId { t.Errorf("Expected resume id %s, got %+v", hello2.Hello.ResumeId, hello3.Hello) } } var payload map[string]interface{} if err := checkReceiveClientMessage(ctx, client2, "session", hello1.Hello, &payload); err != nil { t.Error(err) } else if !reflect.DeepEqual(payload, data1) { t.Errorf("Expected payload %+v, got %+v", data1, payload) } ctx2, cancel2 := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel2() if err := checkReceiveClientMessage(ctx2, client2, "session", hello1.Hello, &payload); err != nil { if err != ErrNoMessageReceived { t.Error(err) } } else { t.Errorf("Expected no payload, got %+v", payload) } } func TestRoomParticipantsListUpdateWhileDisconnected(t *testing.T) { hub, _, _, server := CreateHubForTest(t) client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if hello1.Hello.SessionId == hello2.Hello.SessionId { t.Fatalf("Expected different session ids, got %s twice", hello1.Hello.SessionId) } // Join room by id. roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // Give message processing some time. time.Sleep(10 * time.Millisecond) if room, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } WaitForUsersJoined(ctx, t, client1, hello1, client2, hello2) // Simulate request from the backend that somebody joined the call. users := []map[string]interface{}{ { "sessionId": "the-session-id", "inCall": 1, }, } room := hub.getRoom(roomId) if room == nil { t.Fatalf("Could not find room %s", roomId) } room.PublishUsersInCallChanged(users, users) if err := checkReceiveClientEvent(ctx, client2, "update", nil); err != nil { t.Error(err) } client2.Close() if err := client2.WaitForClientRemoved(ctx); err != nil { t.Error(err) } room.PublishUsersInCallChanged(users, users) // Give asynchronous events some time to be processed. time.Sleep(100 * time.Millisecond) recipient2 := MessageClientMessageRecipient{ Type: "session", SessionId: hello2.Hello.SessionId, } chat_refresh := "{\"type\":\"chat\",\"chat\":{\"refresh\":true}}" var data1 map[string]interface{} if err := json.Unmarshal([]byte(chat_refresh), &data1); err != nil { t.Fatal(err) } client1.SendMessage(recipient2, data1) // nolint client2 = NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHelloResume(hello2.Hello.ResumeId); err != nil { t.Fatal(err) } hello3, err := client2.RunUntilHello(ctx) if err != nil { t.Error(err) } else { if hello3.Hello.UserId != testDefaultUserId+"2" { t.Errorf("Expected \"%s\", got %+v", testDefaultUserId+"2", hello3.Hello) } if hello3.Hello.SessionId != hello2.Hello.SessionId { t.Errorf("Expected session id %s, got %+v", hello2.Hello.SessionId, hello3.Hello) } if hello3.Hello.ResumeId != hello2.Hello.ResumeId { t.Errorf("Expected resume id %s, got %+v", hello2.Hello.ResumeId, hello3.Hello) } } // The participants list update event is triggered again after the session resume. // TODO(jojo): Check contents of message and try with multiple users. if err := checkReceiveClientEvent(ctx, client2, "update", nil); err != nil { t.Error(err) } var payload map[string]interface{} if err := checkReceiveClientMessage(ctx, client2, "session", hello1.Hello, &payload); err != nil { t.Error(err) } else if !reflect.DeepEqual(payload, data1) { t.Errorf("Expected payload %+v, got %+v", data1, payload) } ctx2, cancel2 := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel2() if err := checkReceiveClientMessage(ctx2, client2, "session", hello1.Hello, &payload); err != nil { if err != ErrNoMessageReceived { t.Error(err) } } else { t.Errorf("Expected no payload, got %+v", payload) } } func TestClientTakeoverRoomSession(t *testing.T) { for _, subtest := range clusteredTests { t.Run(subtest, func(t *testing.T) { RunTestClientTakeoverRoomSession(t) }) } } func RunTestClientTakeoverRoomSession(t *testing.T) { var hub1 *Hub var hub2 *Hub var server1 *httptest.Server var server2 *httptest.Server if isLocalTest(t) { hub1, _, _, server1 = CreateHubForTest(t) hub2 = hub1 server2 = server1 } else { hub1, hub2, server1, server2 = CreateClusteredHubsForTest(t) } client1 := NewTestClient(t, server1, hub1) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room-takeover-room-session" roomSessionid := "room-session-id" if room, err := client1.JoinRoomWithRoomSession(ctx, roomId, roomSessionid); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } if hubRoom := hub1.getRoom(roomId); hubRoom == nil { t.Fatalf("Room %s does not exist", roomId) } if session1 := hub1.GetSessionByPublicId(hello1.Hello.SessionId); session1 == nil { t.Fatalf("There should be a session %s", hello1.Hello.SessionId) } client3 := NewTestClient(t, server2, hub2) defer client3.CloseWithBye() if err := client3.SendHello(testDefaultUserId + "3"); err != nil { t.Fatal(err) } hello3, err := client3.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if room, err := client3.JoinRoomWithRoomSession(ctx, roomId, roomSessionid+"other"); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // Wait until both users have joined. WaitForUsersJoined(ctx, t, client1, hello1, client3, hello3) client2 := NewTestClient(t, server2, hub2) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if room, err := client2.JoinRoomWithRoomSession(ctx, roomId, roomSessionid); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // The first client got disconnected with a reason in a "Bye" message. msg, err := client1.RunUntilMessage(ctx) if err != nil { t.Error(err) } else { if msg.Type != "bye" || msg.Bye == nil { t.Errorf("Expected bye message, got %+v", msg) } else if msg.Bye.Reason != "room_session_reconnected" { t.Errorf("Expected reason \"room_session_reconnected\", got %+v", msg.Bye.Reason) } } if msg, err := client1.RunUntilMessage(ctx); err == nil { t.Errorf("Expected error but received %+v", msg) } else if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseNoStatusReceived) { t.Errorf("Expected close error but received %+v", err) } // The first session has been closed if session1 := hub1.GetSessionByPublicId(hello1.Hello.SessionId); session1 != nil { t.Errorf("The session %s should have been removed", hello1.Hello.SessionId) } // The new client will receive "joined" events for the existing client3 and // himself. if err := client2.RunUntilJoined(ctx, hello3.Hello, hello2.Hello); err != nil { t.Error(err) } // No message about the closing is sent to the new connection. ctx2, cancel2 := context.WithTimeout(context.Background(), 200*time.Millisecond) defer cancel2() if message, err := client2.RunUntilMessage(ctx2); err != nil && err != ErrNoMessageReceived && err != context.DeadlineExceeded { t.Error(err) } else if message != nil { t.Errorf("Expected no message, got %+v", message) } // The permanently connected client will receive a "left" event from the // overridden session and a "joined" for the new session. In that order as // both were on the same server. if err := client3.RunUntilLeft(ctx, hello1.Hello); err != nil { t.Error(err) } if err := client3.RunUntilJoined(ctx, hello2.Hello); err != nil { t.Error(err) } } func TestClientSendOfferPermissions(t *testing.T) { hub, _, _, server := CreateHubForTest(t) mcu, err := NewTestMCU() if err != nil { t.Fatal(err) } else if err := mcu.Start(); err != nil { t.Fatal(err) } defer mcu.Stop() hub.SetMcu(mcu) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // Give message processing some time. time.Sleep(10 * time.Millisecond) if room, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } WaitForUsersJoined(ctx, t, client1, hello1, client2, hello2) session1 := hub.GetSessionByPublicId(hello1.Hello.SessionId).(*ClientSession) if session1 == nil { t.Fatalf("Session %s does not exist", hello1.Hello.SessionId) } session2 := hub.GetSessionByPublicId(hello2.Hello.SessionId).(*ClientSession) if session2 == nil { t.Fatalf("Session %s does not exist", hello2.Hello.SessionId) } // Client 1 is the moderator session1.SetPermissions([]Permission{PERMISSION_MAY_PUBLISH_MEDIA, PERMISSION_MAY_PUBLISH_SCREEN}) // Client 2 is a guest participant. session2.SetPermissions([]Permission{}) // Client 2 may not send an offer (he doesn't have the necessary permissions). if err := client2.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, }, MessageClientMessageData{ Type: "sendoffer", Sid: "12345", RoomType: "screen", }); err != nil { t.Fatal(err) } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageError(msg, "not_allowed"); err != nil { t.Fatal(err) } } if err := client1.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, }, MessageClientMessageData{ Type: "offer", Sid: "12345", RoomType: "screen", Payload: map[string]interface{}{ "sdp": MockSdpOfferAudioAndVideo, }, }); err != nil { t.Fatal(err) } if err := client1.RunUntilAnswer(ctx, MockSdpAnswerAudioAndVideo); err != nil { t.Fatal(err) } // Client 1 may send an offer. if err := client1.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello2.Hello.SessionId, }, MessageClientMessageData{ Type: "sendoffer", Sid: "54321", RoomType: "screen", }); err != nil { t.Fatal(err) } // The sender won't get a reply... ctx2, cancel2 := context.WithTimeout(context.Background(), 200*time.Millisecond) defer cancel2() if message, err := client1.RunUntilMessage(ctx2); err != nil && err != ErrNoMessageReceived && err != context.DeadlineExceeded { t.Error(err) } else if message != nil { t.Errorf("Expected no message, got %+v", message) } // ...but the other peer will get an offer. if err := client2.RunUntilOffer(ctx, MockSdpOfferAudioAndVideo); err != nil { t.Fatal(err) } } func TestClientSendOfferPermissionsAudioOnly(t *testing.T) { hub, _, _, server := CreateHubForTest(t) mcu, err := NewTestMCU() if err != nil { t.Fatal(err) } else if err := mcu.Start(); err != nil { t.Fatal(err) } defer mcu.Stop() hub.SetMcu(mcu) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } if err := client1.RunUntilJoined(ctx, hello1.Hello); err != nil { t.Error(err) } session1 := hub.GetSessionByPublicId(hello1.Hello.SessionId).(*ClientSession) if session1 == nil { t.Fatalf("Session %s does not exist", hello1.Hello.SessionId) } // Client is allowed to send audio only. session1.SetPermissions([]Permission{PERMISSION_MAY_PUBLISH_AUDIO}) // Client may not send an offer with audio and video. if err := client1.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, }, MessageClientMessageData{ Type: "offer", Sid: "54321", RoomType: "video", Payload: map[string]interface{}{ "sdp": MockSdpOfferAudioAndVideo, }, }); err != nil { t.Fatal(err) } if msg, err := client1.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageError(msg, "not_allowed"); err != nil { t.Fatal(err) } } // Client may send an offer (audio only). if err := client1.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, }, MessageClientMessageData{ Type: "offer", Sid: "54321", RoomType: "video", Payload: map[string]interface{}{ "sdp": MockSdpOfferAudioOnly, }, }); err != nil { t.Fatal(err) } if err := client1.RunUntilAnswer(ctx, MockSdpAnswerAudioOnly); err != nil { t.Fatal(err) } } func TestClientSendOfferPermissionsAudioVideo(t *testing.T) { hub, _, _, server := CreateHubForTest(t) mcu, err := NewTestMCU() if err != nil { t.Fatal(err) } else if err := mcu.Start(); err != nil { t.Fatal(err) } defer mcu.Stop() hub.SetMcu(mcu) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } if err := client1.RunUntilJoined(ctx, hello1.Hello); err != nil { t.Error(err) } session1 := hub.GetSessionByPublicId(hello1.Hello.SessionId).(*ClientSession) if session1 == nil { t.Fatalf("Session %s does not exist", hello1.Hello.SessionId) } // Client is allowed to send audio and video. session1.SetPermissions([]Permission{PERMISSION_MAY_PUBLISH_AUDIO, PERMISSION_MAY_PUBLISH_VIDEO}) if err := client1.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, }, MessageClientMessageData{ Type: "offer", Sid: "54321", RoomType: "video", Payload: map[string]interface{}{ "sdp": MockSdpOfferAudioAndVideo, }, }); err != nil { t.Fatal(err) } if err := client1.RunUntilAnswer(ctx, MockSdpAnswerAudioAndVideo); err != nil { t.Fatal(err) } // Client is no longer allowed to send video, this will stop the publisher. msg := &BackendServerRoomRequest{ Type: "participants", Participants: &BackendRoomParticipantsRequest{ Changed: []map[string]interface{}{ { "sessionId": roomId + "-" + hello1.Hello.SessionId, "permissions": []Permission{PERMISSION_MAY_PUBLISH_AUDIO}, }, }, Users: []map[string]interface{}{ { "sessionId": roomId + "-" + hello1.Hello.SessionId, "permissions": []Permission{PERMISSION_MAY_PUBLISH_AUDIO}, }, }, }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } ctx2, cancel2 := context.WithTimeout(ctx, time.Second) defer cancel2() pubs := mcu.GetPublishers() if len(pubs) != 1 { t.Fatalf("expected one publisher, got %+v", pubs) } loop: for { if err := ctx2.Err(); err != nil { t.Errorf("publisher was not closed: %s", err) } for _, pub := range pubs { if pub.isClosed() { break loop } } // Give some time to async processing. time.Sleep(time.Millisecond) } } func TestClientSendOfferPermissionsAudioVideoMedia(t *testing.T) { hub, _, _, server := CreateHubForTest(t) mcu, err := NewTestMCU() if err != nil { t.Fatal(err) } else if err := mcu.Start(); err != nil { t.Fatal(err) } defer mcu.Stop() hub.SetMcu(mcu) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } if err := client1.RunUntilJoined(ctx, hello1.Hello); err != nil { t.Error(err) } session1 := hub.GetSessionByPublicId(hello1.Hello.SessionId).(*ClientSession) if session1 == nil { t.Fatalf("Session %s does not exist", hello1.Hello.SessionId) } // Client is allowed to send audio and video. session1.SetPermissions([]Permission{PERMISSION_MAY_PUBLISH_MEDIA}) // Client may send an offer (audio and video). if err := client1.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, }, MessageClientMessageData{ Type: "offer", Sid: "54321", RoomType: "video", Payload: map[string]interface{}{ "sdp": MockSdpOfferAudioAndVideo, }, }); err != nil { t.Fatal(err) } if err := client1.RunUntilAnswer(ctx, MockSdpAnswerAudioAndVideo); err != nil { t.Fatal(err) } // Client is no longer allowed to send video, this will stop the publisher. msg := &BackendServerRoomRequest{ Type: "participants", Participants: &BackendRoomParticipantsRequest{ Changed: []map[string]interface{}{ { "sessionId": roomId + "-" + hello1.Hello.SessionId, "permissions": []Permission{PERMISSION_MAY_PUBLISH_MEDIA, PERMISSION_MAY_CONTROL}, }, }, Users: []map[string]interface{}{ { "sessionId": roomId + "-" + hello1.Hello.SessionId, "permissions": []Permission{PERMISSION_MAY_PUBLISH_MEDIA, PERMISSION_MAY_CONTROL}, }, }, }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } ctx2, cancel2 := context.WithTimeout(ctx, 100*time.Millisecond) defer cancel2() pubs := mcu.GetPublishers() if len(pubs) != 1 { t.Fatalf("expected one publisher, got %+v", pubs) } loop: for { if err := ctx2.Err(); err != nil { if err != context.DeadlineExceeded { t.Errorf("error while waiting for publisher: %s", err) } break } for _, pub := range pubs { if pub.isClosed() { t.Errorf("publisher was closed") break loop } } // Give some time to async processing. time.Sleep(time.Millisecond) } } func TestClientRequestOfferNotInRoom(t *testing.T) { for _, subtest := range clusteredTests { t.Run(subtest, func(t *testing.T) { var hub1 *Hub var hub2 *Hub var server1 *httptest.Server var server2 *httptest.Server if isLocalTest(t) { hub1, _, _, server1 = CreateHubForTest(t) hub2 = hub1 server2 = server1 } else { hub1, hub2, server1, server2 = CreateClusteredHubsForTest(t) } mcu, err := NewTestMCU() if err != nil { t.Fatal(err) } else if err := mcu.Start(); err != nil { t.Fatal(err) } defer mcu.Stop() hub1.SetMcu(mcu) hub2.SetMcu(mcu) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() client1 := NewTestClient(t, server1, hub1) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } client2 := NewTestClient(t, server2, hub2) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client1.JoinRoomWithRoomSession(ctx, roomId, "roomsession1"); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event. if err := client1.RunUntilJoined(ctx, hello1.Hello); err != nil { t.Error(err) } if err := client1.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, }, MessageClientMessageData{ Type: "offer", Sid: "54321", RoomType: "screen", Payload: map[string]interface{}{ "sdp": MockSdpOfferAudioAndVideo, }, }); err != nil { t.Fatal(err) } if err := client1.RunUntilAnswer(ctx, MockSdpAnswerAudioAndVideo); err != nil { t.Fatal(err) } // Client 2 may not request an offer (he is not in the room yet). if err := client2.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, }, MessageClientMessageData{ Type: "requestoffer", Sid: "12345", RoomType: "screen", }); err != nil { t.Fatal(err) } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageError(msg, "not_allowed"); err != nil { t.Fatal(err) } } if room, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event. if err := client1.RunUntilJoined(ctx, hello2.Hello); err != nil { t.Error(err) } if err := client2.RunUntilJoined(ctx, hello1.Hello, hello2.Hello); err != nil { t.Error(err) } // Client 2 may not request an offer (he is not in the call yet). if err := client2.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, }, MessageClientMessageData{ Type: "requestoffer", Sid: "12345", RoomType: "screen", }); err != nil { t.Fatal(err) } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageError(msg, "not_allowed"); err != nil { t.Fatal(err) } } // Simulate request from the backend that somebody joined the call. users1 := []map[string]interface{}{ { "sessionId": hello2.Hello.SessionId, "inCall": 1, }, } room2 := hub2.getRoom(roomId) if room2 == nil { t.Fatalf("Could not find room %s", roomId) } room2.PublishUsersInCallChanged(users1, users1) if err := checkReceiveClientEvent(ctx, client1, "update", nil); err != nil { t.Error(err) } if err := checkReceiveClientEvent(ctx, client2, "update", nil); err != nil { t.Error(err) } // Client 2 may not request an offer (recipient is not in the call yet). if err := client2.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, }, MessageClientMessageData{ Type: "requestoffer", Sid: "12345", RoomType: "screen", }); err != nil { t.Fatal(err) } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageError(msg, "not_allowed"); err != nil { t.Fatal(err) } } // Simulate request from the backend that somebody joined the call. users2 := []map[string]interface{}{ { "sessionId": hello1.Hello.SessionId, "inCall": 1, }, } room1 := hub1.getRoom(roomId) if room1 == nil { t.Fatalf("Could not find room %s", roomId) } room1.PublishUsersInCallChanged(users2, users2) if err := checkReceiveClientEvent(ctx, client1, "update", nil); err != nil { t.Error(err) } if err := checkReceiveClientEvent(ctx, client2, "update", nil); err != nil { t.Error(err) } // Client 2 may request an offer now (both are in the same room and call). if err := client2.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, }, MessageClientMessageData{ Type: "requestoffer", Sid: "12345", RoomType: "screen", }); err != nil { t.Fatal(err) } if err := client2.RunUntilOffer(ctx, MockSdpOfferAudioAndVideo); err != nil { t.Fatal(err) } }) } } func TestNoSendBetweenSessionsOnDifferentBackends(t *testing.T) { // Clients can't send messages to sessions connected from other backends. hub, _, _, server := CreateHubWithMultipleBackendsForTest(t) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() params1 := TestBackendClientAuthParams{ UserId: "user1", } if err := client1.SendHelloParams(server.URL+"/one", HelloVersionV1, "client", nil, params1); err != nil { t.Fatal(err) } hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() params2 := TestBackendClientAuthParams{ UserId: "user2", } if err := client2.SendHelloParams(server.URL+"/two", HelloVersionV1, "client", nil, params2); err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } recipient1 := MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, } recipient2 := MessageClientMessageRecipient{ Type: "session", SessionId: hello2.Hello.SessionId, } data1 := "from-1-to-2" client1.SendMessage(recipient2, data1) // nolint data2 := "from-2-to-1" client2.SendMessage(recipient1, data2) // nolint var payload string ctx2, cancel2 := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel2() if err := checkReceiveClientMessage(ctx2, client1, "session", hello2.Hello, &payload); err != nil { if err != ErrNoMessageReceived { t.Error(err) } } else { t.Errorf("Expected no payload, got %+v", payload) } ctx3, cancel3 := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel3() if err := checkReceiveClientMessage(ctx3, client2, "session", hello1.Hello, &payload); err != nil { if err != ErrNoMessageReceived { t.Error(err) } } else { t.Errorf("Expected no payload, got %+v", payload) } } func TestNoSameRoomOnDifferentBackends(t *testing.T) { hub, _, _, server := CreateHubWithMultipleBackendsForTest(t) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() params1 := TestBackendClientAuthParams{ UserId: "user1", } if err := client1.SendHelloParams(server.URL+"/one", HelloVersionV1, "client", nil, params1); err != nil { t.Fatal(err) } hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() params2 := TestBackendClientAuthParams{ UserId: "user2", } if err := client2.SendHelloParams(server.URL+"/two", HelloVersionV1, "client", nil, params2); err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } msg1, err := client1.RunUntilMessage(ctx) if err != nil { t.Error(err) } if err := client1.checkMessageJoined(msg1, hello1.Hello); err != nil { t.Error(err) } if room, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } msg2, err := client2.RunUntilMessage(ctx) if err != nil { t.Error(err) } if err := client2.checkMessageJoined(msg2, hello2.Hello); err != nil { t.Error(err) } hub.ru.RLock() var rooms []*Room for _, room := range hub.rooms { defer room.Close() rooms = append(rooms, room) } hub.ru.RUnlock() if len(rooms) != 2 { t.Errorf("Expected 2 rooms, got %+v", rooms) } if rooms[0].IsEqual(rooms[1]) { t.Errorf("Rooms should be different: %+v", rooms) } recipient := MessageClientMessageRecipient{ Type: "room", } data1 := "from-1-to-2" client1.SendMessage(recipient, data1) // nolint data2 := "from-2-to-1" client2.SendMessage(recipient, data2) // nolint var payload string ctx2, cancel2 := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel2() if err := checkReceiveClientMessage(ctx2, client1, "session", hello2.Hello, &payload); err != nil { if err != ErrNoMessageReceived { t.Error(err) } } else { t.Errorf("Expected no payload, got %+v", payload) } ctx3, cancel3 := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel3() if err := checkReceiveClientMessage(ctx3, client2, "session", hello1.Hello, &payload); err != nil { if err != ErrNoMessageReceived { t.Error(err) } } else { t.Errorf("Expected no payload, got %+v", payload) } } func TestClientSendOffer(t *testing.T) { for _, subtest := range clusteredTests { t.Run(subtest, func(t *testing.T) { var hub1 *Hub var hub2 *Hub var server1 *httptest.Server var server2 *httptest.Server if isLocalTest(t) { hub1, _, _, server1 = CreateHubForTest(t) hub2 = hub1 server2 = server1 } else { hub1, hub2, server1, server2 = CreateClusteredHubsForTest(t) } mcu, err := NewTestMCU() if err != nil { t.Fatal(err) } else if err := mcu.Start(); err != nil { t.Fatal(err) } defer mcu.Stop() hub1.SetMcu(mcu) hub2.SetMcu(mcu) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() client1 := NewTestClient(t, server1, hub1) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } client2 := NewTestClient(t, server2, hub2) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client1.JoinRoomWithRoomSession(ctx, roomId, "roomsession1"); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // Give message processing some time. time.Sleep(10 * time.Millisecond) if room, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } WaitForUsersJoined(ctx, t, client1, hello1, client2, hello2) if err := client1.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, }, MessageClientMessageData{ Type: "offer", Sid: "12345", RoomType: "video", Payload: map[string]interface{}{ "sdp": MockSdpOfferAudioAndVideo, }, }); err != nil { t.Fatal(err) } if err := client1.RunUntilAnswer(ctx, MockSdpAnswerAudioAndVideo); err != nil { t.Fatal(err) } if err := client1.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello2.Hello.SessionId, }, MessageClientMessageData{ Type: "sendoffer", RoomType: "video", }); err != nil { t.Fatal(err) } // The sender won't get a reply... ctx2, cancel2 := context.WithTimeout(context.Background(), 200*time.Millisecond) defer cancel2() if message, err := client1.RunUntilMessage(ctx2); err != nil && err != ErrNoMessageReceived && err != context.DeadlineExceeded { t.Error(err) } else if message != nil { t.Errorf("Expected no message, got %+v", message) } // ...but the other peer will get an offer. if err := client2.RunUntilOffer(ctx, MockSdpOfferAudioAndVideo); err != nil { t.Fatal(err) } }) } } func TestClientUnshareScreen(t *testing.T) { hub, _, _, server := CreateHubForTest(t) mcu, err := NewTestMCU() if err != nil { t.Fatal(err) } else if err := mcu.Start(); err != nil { t.Fatal(err) } defer mcu.Stop() hub.SetMcu(mcu) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } if err := client1.RunUntilJoined(ctx, hello1.Hello); err != nil { t.Error(err) } session1 := hub.GetSessionByPublicId(hello1.Hello.SessionId).(*ClientSession) if session1 == nil { t.Fatalf("Session %s does not exist", hello1.Hello.SessionId) } if err := client1.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, }, MessageClientMessageData{ Type: "offer", Sid: "54321", RoomType: "screen", Payload: map[string]interface{}{ "sdp": MockSdpOfferAudioOnly, }, }); err != nil { t.Fatal(err) } if err := client1.RunUntilAnswer(ctx, MockSdpAnswerAudioOnly); err != nil { t.Fatal(err) } publisher := mcu.GetPublisher(hello1.Hello.SessionId) if publisher == nil { t.Fatalf("No publisher for %s found", hello1.Hello.SessionId) } else if publisher.isClosed() { t.Fatalf("Publisher %s should not be closed", hello1.Hello.SessionId) } old := cleanupScreenPublisherDelay cleanupScreenPublisherDelay = time.Millisecond defer func() { cleanupScreenPublisherDelay = old }() if err := client1.SendMessage(MessageClientMessageRecipient{ Type: "session", SessionId: hello1.Hello.SessionId, }, MessageClientMessageData{ Type: "unshareScreen", Sid: "54321", RoomType: "screen", }); err != nil { t.Fatal(err) } time.Sleep(10 * time.Millisecond) if !publisher.isClosed() { t.Fatalf("Publisher %s should be closed", hello1.Hello.SessionId) } } func TestVirtualClientSessions(t *testing.T) { for _, subtest := range clusteredTests { t.Run(subtest, func(t *testing.T) { var hub1 *Hub var hub2 *Hub var server1 *httptest.Server var server2 *httptest.Server if isLocalTest(t) { hub1, _, _, server1 = CreateHubForTest(t) hub2 = hub1 server2 = server1 } else { hub1, hub2, server1, server2 = CreateClusteredHubsForTest(t) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() client1 := NewTestClient(t, server1, hub1) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } roomId := "test-room" if _, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } if err := client1.RunUntilJoined(ctx, hello1.Hello); err != nil { t.Error(err) } client2 := NewTestClient(t, server2, hub2) defer client2.CloseWithBye() if err := client2.SendHelloInternal(); err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } session2 := hub2.GetSessionByPublicId(hello2.Hello.SessionId).(*ClientSession) if session2 == nil { t.Fatalf("Session %s does not exist", hello2.Hello.SessionId) } if _, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } if err := client1.RunUntilJoined(ctx, hello2.Hello); err != nil { t.Error(err) } if msg, err := client1.RunUntilMessage(ctx); err != nil { t.Error(err) } else if msg, err := checkMessageParticipantsInCall(msg); err != nil { t.Error(err) } else if len(msg.Users) != 1 { t.Errorf("Expected one user, got %+v", msg) } else if v, ok := msg.Users[0]["internal"].(bool); !ok || !v { t.Errorf("Expected internal flag, got %+v", msg) } else if v, ok := msg.Users[0]["sessionId"].(string); !ok || v != hello2.Hello.SessionId { t.Errorf("Expected session id %s, got %+v", hello2.Hello.SessionId, msg) } else if v, ok := msg.Users[0]["inCall"].(float64); !ok || v != 3 { t.Errorf("Expected inCall flag 3, got %+v", msg) } _, unexpected, err := client2.RunUntilJoinedAndReturn(ctx, hello1.Hello, hello2.Hello) if err != nil { t.Error(err) } if len(unexpected) == 0 { if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Error(err) } else { unexpected = append(unexpected, msg) } } if len(unexpected) != 1 { t.Fatalf("expected one message, got %+v", unexpected) } if msg, err := checkMessageParticipantsInCall(unexpected[0]); err != nil { t.Error(err) } else if len(msg.Users) != 1 { t.Errorf("Expected one user, got %+v", msg) } else if v, ok := msg.Users[0]["internal"].(bool); !ok || !v { t.Errorf("Expected internal flag, got %+v", msg) } else if v, ok := msg.Users[0]["sessionId"].(string); !ok || v != hello2.Hello.SessionId { t.Errorf("Expected session id %s, got %+v", hello2.Hello.SessionId, msg) } else if v, ok := msg.Users[0]["inCall"].(float64); !ok || v != FlagInCall|FlagWithAudio { t.Errorf("Expected inCall flag %d, got %+v", FlagInCall|FlagWithAudio, msg) } calledCtx, calledCancel := context.WithTimeout(ctx, time.Second) virtualSessionId := "virtual-session-id" virtualUserId := "virtual-user-id" generatedSessionId := GetVirtualSessionId(session2, virtualSessionId) setSessionRequestHandler(t, func(request *BackendClientSessionRequest) { defer calledCancel() if request.Action != "add" { t.Errorf("Expected action add, got %+v", request) } else if request.RoomId != roomId { t.Errorf("Expected room id %s, got %+v", roomId, request) } else if request.SessionId == generatedSessionId { t.Errorf("Expected generated session id %s, got %+v", generatedSessionId, request) } else if request.UserId != virtualUserId { t.Errorf("Expected session id %s, got %+v", virtualUserId, request) } }) if err := client2.SendInternalAddSession(&AddSessionInternalClientMessage{ CommonSessionInternalClientMessage: CommonSessionInternalClientMessage{ SessionId: virtualSessionId, RoomId: roomId, }, UserId: virtualUserId, Flags: FLAG_MUTED_SPEAKING, }); err != nil { t.Fatal(err) } <-calledCtx.Done() if err := calledCtx.Err(); err != nil && !errors.Is(err, context.Canceled) { t.Fatal(err) } virtualSessions := session2.GetVirtualSessions() for len(virtualSessions) == 0 { time.Sleep(time.Millisecond) virtualSessions = session2.GetVirtualSessions() } virtualSession := virtualSessions[0] if msg, err := client1.RunUntilMessage(ctx); err != nil { t.Error(err) } else if err := client1.checkMessageJoinedSession(msg, virtualSession.PublicId(), virtualUserId); err != nil { t.Error(err) } if msg, err := client1.RunUntilMessage(ctx); err != nil { t.Error(err) } else if msg, err := checkMessageParticipantsInCall(msg); err != nil { t.Error(err) } else if len(msg.Users) != 2 { t.Errorf("Expected two users, got %+v", msg) } else if v, ok := msg.Users[0]["internal"].(bool); !ok || !v { t.Errorf("Expected internal flag, got %+v", msg) } else if v, ok := msg.Users[0]["sessionId"].(string); !ok || v != hello2.Hello.SessionId { t.Errorf("Expected session id %s, got %+v", hello2.Hello.SessionId, msg) } else if v, ok := msg.Users[0]["inCall"].(float64); !ok || v != FlagInCall|FlagWithAudio { t.Errorf("Expected inCall flag %d, got %+v", FlagInCall|FlagWithAudio, msg) } else if v, ok := msg.Users[1]["virtual"].(bool); !ok || !v { t.Errorf("Expected virtual flag, got %+v", msg) } else if v, ok := msg.Users[1]["sessionId"].(string); !ok || v != virtualSession.PublicId() { t.Errorf("Expected session id %s, got %+v", virtualSession.PublicId(), msg) } else if v, ok := msg.Users[1]["inCall"].(float64); !ok || v != FlagInCall|FlagWithPhone { t.Errorf("Expected inCall flag %d, got %+v", FlagInCall|FlagWithPhone, msg) } if msg, err := client1.RunUntilMessage(ctx); err != nil { t.Error(err) } else if flags, err := checkMessageParticipantFlags(msg); err != nil { t.Error(err) } else if flags.RoomId != roomId { t.Errorf("Expected room id %s, got %+v", roomId, msg) } else if flags.SessionId != virtualSession.PublicId() { t.Errorf("Expected session id %s, got %+v", virtualSession.PublicId(), msg) } else if flags.Flags != FLAG_MUTED_SPEAKING { t.Errorf("Expected flags %d, got %+v", FLAG_MUTED_SPEAKING, msg) } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Error(err) } else if err := client2.checkMessageJoinedSession(msg, virtualSession.PublicId(), virtualUserId); err != nil { t.Error(err) } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Error(err) } else if msg, err := checkMessageParticipantsInCall(msg); err != nil { t.Error(err) } else if len(msg.Users) != 2 { t.Errorf("Expected two users, got %+v", msg) } else if v, ok := msg.Users[0]["internal"].(bool); !ok || !v { t.Errorf("Expected internal flag, got %+v", msg) } else if v, ok := msg.Users[0]["sessionId"].(string); !ok || v != hello2.Hello.SessionId { t.Errorf("Expected session id %s, got %+v", hello2.Hello.SessionId, msg) } else if v, ok := msg.Users[0]["inCall"].(float64); !ok || v != FlagInCall|FlagWithAudio { t.Errorf("Expected inCall flag %d, got %+v", FlagInCall|FlagWithAudio, msg) } else if v, ok := msg.Users[1]["virtual"].(bool); !ok || !v { t.Errorf("Expected virtual flag, got %+v", msg) } else if v, ok := msg.Users[1]["sessionId"].(string); !ok || v != virtualSession.PublicId() { t.Errorf("Expected session id %s, got %+v", virtualSession.PublicId(), msg) } else if v, ok := msg.Users[1]["inCall"].(float64); !ok || v != FlagInCall|FlagWithPhone { t.Errorf("Expected inCall flag %d, got %+v", FlagInCall|FlagWithPhone, msg) } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Error(err) } else if flags, err := checkMessageParticipantFlags(msg); err != nil { t.Error(err) } else if flags.RoomId != roomId { t.Errorf("Expected room id %s, got %+v", roomId, msg) } else if flags.SessionId != virtualSession.PublicId() { t.Errorf("Expected session id %s, got %+v", virtualSession.PublicId(), msg) } else if flags.Flags != FLAG_MUTED_SPEAKING { t.Errorf("Expected flags %d, got %+v", FLAG_MUTED_SPEAKING, msg) } updatedFlags := uint32(0) if err := client2.SendInternalUpdateSession(&UpdateSessionInternalClientMessage{ CommonSessionInternalClientMessage: CommonSessionInternalClientMessage{ SessionId: virtualSessionId, RoomId: roomId, }, Flags: &updatedFlags, }); err != nil { t.Fatal(err) } if msg, err := client1.RunUntilMessage(ctx); err != nil { t.Error(err) } else if flags, err := checkMessageParticipantFlags(msg); err != nil { t.Error(err) } else if flags.RoomId != roomId { t.Errorf("Expected room id %s, got %+v", roomId, msg) } else if flags.SessionId != virtualSession.PublicId() { t.Errorf("Expected session id %s, got %+v", virtualSession.PublicId(), msg) } else if flags.Flags != 0 { t.Errorf("Expected flags %d, got %+v", 0, msg) } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Error(err) } else if flags, err := checkMessageParticipantFlags(msg); err != nil { t.Error(err) } else if flags.RoomId != roomId { t.Errorf("Expected room id %s, got %+v", roomId, msg) } else if flags.SessionId != virtualSession.PublicId() { t.Errorf("Expected session id %s, got %+v", virtualSession.PublicId(), msg) } else if flags.Flags != 0 { t.Errorf("Expected flags %d, got %+v", 0, msg) } calledCtx, calledCancel = context.WithTimeout(ctx, time.Second) setSessionRequestHandler(t, func(request *BackendClientSessionRequest) { defer calledCancel() if request.Action != "remove" { t.Errorf("Expected action remove, got %+v", request) } else if request.RoomId != roomId { t.Errorf("Expected room id %s, got %+v", roomId, request) } else if request.SessionId == generatedSessionId { t.Errorf("Expected generated session id %s, got %+v", generatedSessionId, request) } else if request.UserId != virtualUserId { t.Errorf("Expected user id %s, got %+v", virtualUserId, request) } }) // Messages to virtual sessions are sent to the associated client session. virtualRecipient := MessageClientMessageRecipient{ Type: "session", SessionId: virtualSession.PublicId(), } data := "message-to-virtual" client1.SendMessage(virtualRecipient, data) // nolint var payload string var sender *MessageServerMessageSender var recipient *MessageClientMessageRecipient if err := checkReceiveClientMessageWithSenderAndRecipient(ctx, client2, "session", hello1.Hello, &payload, &sender, &recipient); err != nil { t.Error(err) } else if recipient.SessionId != virtualSessionId { t.Errorf("Expected session id %s, got %+v", virtualSessionId, recipient) } else if payload != data { t.Errorf("Expected payload %s, got %s", data, payload) } data = "control-to-virtual" client1.SendControl(virtualRecipient, data) // nolint if err := checkReceiveClientControlWithSenderAndRecipient(ctx, client2, "session", hello1.Hello, &payload, &sender, &recipient); err != nil { t.Error(err) } else if recipient.SessionId != virtualSessionId { t.Errorf("Expected session id %s, got %+v", virtualSessionId, recipient) } else if payload != data { t.Errorf("Expected payload %s, got %s", data, payload) } if err := client2.SendInternalRemoveSession(&RemoveSessionInternalClientMessage{ CommonSessionInternalClientMessage: CommonSessionInternalClientMessage{ SessionId: virtualSessionId, RoomId: roomId, }, UserId: virtualUserId, }); err != nil { t.Fatal(err) } <-calledCtx.Done() if err := calledCtx.Err(); err != nil && !errors.Is(err, context.Canceled) { t.Fatal(err) } if msg, err := client1.RunUntilMessage(ctx); err != nil { t.Error(err) } else if err := client1.checkMessageRoomLeaveSession(msg, virtualSession.PublicId()); err != nil { t.Error(err) } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Error(err) } else if err := client2.checkMessageRoomLeaveSession(msg, virtualSession.PublicId()); err != nil { t.Error(err) } }) } } func DoTestSwitchToOne(t *testing.T, details map[string]interface{}) { for _, subtest := range clusteredTests { t.Run(subtest, func(t *testing.T) { var hub1 *Hub var hub2 *Hub var server1 *httptest.Server var server2 *httptest.Server if isLocalTest(t) { hub1, _, _, server1 = CreateHubForTest(t) hub2 = hub1 server2 = server1 } else { hub1, hub2, server1, server2 = CreateClusteredHubsForTest(t) } client1 := NewTestClient(t, server1, hub1) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server2, hub2) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } roomSessionId1 := "roomsession1" roomId1 := "test-room" if room, err := client1.JoinRoomWithRoomSession(ctx, roomId1, roomSessionId1); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId1 { t.Fatalf("Expected room %s, got %s", roomId1, room.Room.RoomId) } roomSessionId2 := "roomsession2" if room, err := client2.JoinRoomWithRoomSession(ctx, roomId1, roomSessionId2); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId1 { t.Fatalf("Expected room %s, got %s", roomId1, room.Room.RoomId) } if err := client1.RunUntilJoined(ctx, hello1.Hello, hello2.Hello); err != nil { t.Error(err) } if err := client2.RunUntilJoined(ctx, hello1.Hello, hello2.Hello); err != nil { t.Error(err) } roomId2 := "test-room-2" var sessions json.RawMessage if details != nil { if sessions, err = json.Marshal(map[string]interface{}{ roomSessionId1: details, }); err != nil { t.Fatal(err) } } else { if sessions, err = json.Marshal([]string{ roomSessionId1, }); err != nil { t.Fatal(err) } } // Notify first client to switch to different room. msg := &BackendServerRoomRequest{ Type: "switchto", SwitchTo: &BackendRoomSwitchToMessageRequest{ RoomId: roomId2, Sessions: &sessions, }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server2.URL+"/api/v1/room/"+roomId1, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } var detailsData json.RawMessage if details != nil { if detailsData, err = json.Marshal(details); err != nil { t.Fatal(err) } } if _, err := client1.RunUntilSwitchTo(ctx, roomId2, detailsData); err != nil { t.Error(err) } // The other client will not receive a message. ctx2, cancel2 := context.WithTimeout(context.Background(), 200*time.Millisecond) defer cancel2() if message, err := client2.RunUntilMessage(ctx2); err != nil && err != ErrNoMessageReceived && err != context.DeadlineExceeded { t.Error(err) } else if message != nil { t.Errorf("Expected no message, got %+v", message) } }) } } func TestSwitchToOneMap(t *testing.T) { DoTestSwitchToOne(t, map[string]interface{}{ "foo": "bar", }) } func TestSwitchToOneList(t *testing.T) { DoTestSwitchToOne(t, nil) } func DoTestSwitchToMultiple(t *testing.T, details1 map[string]interface{}, details2 map[string]interface{}) { for _, subtest := range clusteredTests { t.Run(subtest, func(t *testing.T) { var hub1 *Hub var hub2 *Hub var server1 *httptest.Server var server2 *httptest.Server if isLocalTest(t) { hub1, _, _, server1 = CreateHubForTest(t) hub2 = hub1 server2 = server1 } else { hub1, hub2, server1, server2 = CreateClusteredHubsForTest(t) } client1 := NewTestClient(t, server1, hub1) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } client2 := NewTestClient(t, server2, hub2) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } roomSessionId1 := "roomsession1" roomId1 := "test-room" if room, err := client1.JoinRoomWithRoomSession(ctx, roomId1, roomSessionId1); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId1 { t.Fatalf("Expected room %s, got %s", roomId1, room.Room.RoomId) } roomSessionId2 := "roomsession2" if room, err := client2.JoinRoomWithRoomSession(ctx, roomId1, roomSessionId2); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId1 { t.Fatalf("Expected room %s, got %s", roomId1, room.Room.RoomId) } if err := client1.RunUntilJoined(ctx, hello1.Hello, hello2.Hello); err != nil { t.Error(err) } if err := client2.RunUntilJoined(ctx, hello1.Hello, hello2.Hello); err != nil { t.Error(err) } roomId2 := "test-room-2" var sessions json.RawMessage if details1 != nil || details2 != nil { if sessions, err = json.Marshal(map[string]interface{}{ roomSessionId1: details1, roomSessionId2: details2, }); err != nil { t.Fatal(err) } } else { if sessions, err = json.Marshal([]string{ roomSessionId1, roomSessionId2, }); err != nil { t.Fatal(err) } } msg := &BackendServerRoomRequest{ Type: "switchto", SwitchTo: &BackendRoomSwitchToMessageRequest{ RoomId: roomId2, Sessions: &sessions, }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server2.URL+"/api/v1/room/"+roomId1, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } var detailsData1 json.RawMessage if details1 != nil { if detailsData1, err = json.Marshal(details1); err != nil { t.Fatal(err) } } if _, err := client1.RunUntilSwitchTo(ctx, roomId2, detailsData1); err != nil { t.Error(err) } var detailsData2 json.RawMessage if details2 != nil { if detailsData2, err = json.Marshal(details2); err != nil { t.Fatal(err) } } if _, err := client2.RunUntilSwitchTo(ctx, roomId2, detailsData2); err != nil { t.Error(err) } }) } } func TestSwitchToMultipleMap(t *testing.T) { DoTestSwitchToMultiple(t, map[string]interface{}{ "foo": "bar", }, map[string]interface{}{ "bar": "baz", }) } func TestSwitchToMultipleList(t *testing.T) { DoTestSwitchToMultiple(t, nil, nil) } func TestSwitchToMultipleMixed(t *testing.T) { DoTestSwitchToMultiple(t, map[string]interface{}{ "foo": "bar", }, nil) } func TestGeoipOverrides(t *testing.T) { country1 := "DE" country2 := "IT" country3 := "site1" hub, _, _, _ := CreateHubForTestWithConfig(t, func(server *httptest.Server) (*goconf.ConfigFile, error) { conf, err := getTestConfig(server) if err != nil { return nil, err } conf.AddOption("geoip-overrides", "10.1.0.0/16", country1) conf.AddOption("geoip-overrides", "10.2.0.0/16", country2) conf.AddOption("geoip-overrides", "192.168.10.20", country3) return conf, err }) if country := hub.OnLookupCountry(&Client{addr: "127.0.0.1"}); country != loopback { t.Errorf("expected country %s, got %s", loopback, country) } if country := hub.OnLookupCountry(&Client{addr: "8.8.8.8"}); country != unknownCountry { t.Errorf("expected country %s, got %s", unknownCountry, country) } if country := hub.OnLookupCountry(&Client{addr: "10.1.1.2"}); country != country1 { t.Errorf("expected country %s, got %s", country1, country) } if country := hub.OnLookupCountry(&Client{addr: "10.2.1.2"}); country != country2 { t.Errorf("expected country %s, got %s", country2, country) } if country := hub.OnLookupCountry(&Client{addr: "192.168.10.20"}); country != strings.ToUpper(country3) { t.Errorf("expected country %s, got %s", strings.ToUpper(country3), country) } } func TestDialoutStatus(t *testing.T) { _, _, _, hub, _, server := CreateBackendServerForTest(t) internalClient := NewTestClient(t, server, hub) defer internalClient.CloseWithBye() if err := internalClient.SendHelloInternalWithFeatures([]string{"start-dialout"}); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() _, err := internalClient.RunUntilHello(ctx) if err != nil { t.Fatal(err) } roomId := "12345" client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } hello, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if _, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } if err := client.RunUntilJoined(ctx, hello.Hello); err != nil { t.Error(err) } callId := "call-123" stopped := make(chan struct{}) go func(client *TestClient) { defer close(stopped) msg, err := client.RunUntilMessage(ctx) if err != nil { t.Error(err) return } if msg.Type != "internal" || msg.Internal.Type != "dialout" { t.Errorf("expected internal dialout message, got %+v", msg) return } if msg.Internal.Dialout.RoomId != roomId { t.Errorf("expected room id %s, got %+v", roomId, msg) } response := &ClientMessage{ Id: msg.Id, Type: "internal", Internal: &InternalClientMessage{ Type: "dialout", Dialout: &DialoutInternalClientMessage{ Type: "status", RoomId: msg.Internal.Dialout.RoomId, Status: &DialoutStatusInternalClientMessage{ Status: "accepted", CallId: callId, }, }, }, } if err := client.WriteJSON(response); err != nil { t.Error(err) } }(internalClient) defer func() { <-stopped }() msg := &BackendServerRoomRequest{ Type: "dialout", Dialout: &BackendRoomDialoutRequest{ Number: "+1234567890", }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != http.StatusOK { t.Fatalf("Expected error %d, got %s: %s", http.StatusOK, res.Status, string(body)) } var response BackendServerRoomResponse if err := json.Unmarshal(body, &response); err != nil { t.Fatal(err) } if response.Type != "dialout" || response.Dialout == nil { t.Fatalf("expected type dialout, got %s", string(body)) } if response.Dialout.Error != nil { t.Fatalf("expected dialout success, got %s", string(body)) } if response.Dialout.CallId != callId { t.Errorf("expected call id %s, got %s", callId, string(body)) } key := "callstatus_" + callId if msg, err := client.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageTransientSet(msg, key, map[string]interface{}{ "callid": callId, "status": "accepted", }, nil); err != nil { t.Error(err) } } if err := internalClient.SendInternalDialout(&DialoutInternalClientMessage{ RoomId: roomId, Type: "status", Status: &DialoutStatusInternalClientMessage{ CallId: callId, Status: "ringing", }, }); err != nil { t.Fatal(err) } if msg, err := client.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageTransientSet(msg, key, map[string]interface{}{ "callid": callId, "status": "ringing", }, map[string]interface{}{ "callid": callId, "status": "accepted", }); err != nil { t.Error(err) } } old := removeCallStatusTTL defer func() { removeCallStatusTTL = old }() removeCallStatusTTL = 500 * time.Millisecond clearedCause := "cleared-call" if err := internalClient.SendInternalDialout(&DialoutInternalClientMessage{ RoomId: roomId, Type: "status", Status: &DialoutStatusInternalClientMessage{ CallId: callId, Status: "cleared", Cause: clearedCause, }, }); err != nil { t.Fatal(err) } if msg, err := client.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageTransientSet(msg, key, map[string]interface{}{ "callid": callId, "status": "cleared", "cause": clearedCause, }, map[string]interface{}{ "callid": callId, "status": "ringing", }); err != nil { t.Error(err) } } ctx2, cancel := context.WithTimeout(ctx, removeCallStatusTTL*2) defer cancel() if msg, err := client.RunUntilMessage(ctx2); err != nil { t.Fatal(err) } else { if err := checkMessageTransientRemove(msg, key, map[string]interface{}{ "callid": callId, "status": "cleared", "cause": clearedCause, }); err != nil { t.Error(err) } } } nextcloud-spreed-signaling-1.2.4/janus_client.go000066400000000000000000000543501460321600400217640ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ /** * Contents heavily based on * https://github.com/notedit/janus-go/blob/master/janus.go * * Added error handling and improve functionality. */ package signaling import ( "bytes" "context" "encoding/json" "fmt" "log" "net/http" "strconv" "sync" "sync/atomic" "time" "github.com/gorilla/websocket" "github.com/notedit/janus-go" ) const ( /*! \brief Success (no error) */ JANUS_OK = 0 /*! \brief Unauthorized (can only happen when using apisecret/auth token) */ JANUS_ERROR_UNAUTHORIZED = 403 /*! \brief Unauthorized access to a plugin (can only happen when using auth token) */ JANUS_ERROR_UNAUTHORIZED_PLUGIN = 405 /*! \brief Unknown/undocumented error */ JANUS_ERROR_UNKNOWN = 490 /*! \brief Transport related error */ JANUS_ERROR_TRANSPORT_SPECIFIC = 450 /*! \brief The request is missing in the message */ JANUS_ERROR_MISSING_REQUEST = 452 /*! \brief The gateway does not suppurt this request */ JANUS_ERROR_UNKNOWN_REQUEST = 453 /*! \brief The payload is not a valid JSON message */ JANUS_ERROR_INVALID_JSON = 454 /*! \brief The object is not a valid JSON object as expected */ JANUS_ERROR_INVALID_JSON_OBJECT = 455 /*! \brief A mandatory element is missing in the message */ JANUS_ERROR_MISSING_MANDATORY_ELEMENT = 456 /*! \brief The request cannot be handled for this webserver path */ JANUS_ERROR_INVALID_REQUEST_PATH = 457 /*! \brief The session the request refers to doesn't exist */ JANUS_ERROR_SESSION_NOT_FOUND = 458 /*! \brief The handle the request refers to doesn't exist */ JANUS_ERROR_HANDLE_NOT_FOUND = 459 /*! \brief The plugin the request wants to talk to doesn't exist */ JANUS_ERROR_PLUGIN_NOT_FOUND = 460 /*! \brief An error occurring when trying to attach to a plugin and create a handle */ JANUS_ERROR_PLUGIN_ATTACH = 461 /*! \brief An error occurring when trying to send a message/request to the plugin */ JANUS_ERROR_PLUGIN_MESSAGE = 462 /*! \brief An error occurring when trying to detach from a plugin and destroy the related handle */ JANUS_ERROR_PLUGIN_DETACH = 463 /*! \brief The gateway doesn't support this SDP type * \todo The gateway currently only supports OFFER and ANSWER. */ JANUS_ERROR_JSEP_UNKNOWN_TYPE = 464 /*! \brief The Session Description provided by the peer is invalid */ JANUS_ERROR_JSEP_INVALID_SDP = 465 /*! \brief The stream a trickle candidate for does not exist or is invalid */ JANUS_ERROR_TRICKE_INVALID_STREAM = 466 /*! \brief A JSON element is of the wrong type (e.g., an integer instead of a string) */ JANUS_ERROR_INVALID_ELEMENT_TYPE = 467 /*! \brief The ID provided to create a new session is already in use */ JANUS_ERROR_SESSION_CONFLICT = 468 /*! \brief We got an ANSWER to an OFFER we never made */ JANUS_ERROR_UNEXPECTED_ANSWER = 469 /*! \brief The auth token the request refers to doesn't exist */ JANUS_ERROR_TOKEN_NOT_FOUND = 470 // Error codes of videoroom plugin. JANUS_VIDEOROOM_ERROR_UNKNOWN_ERROR = 499 JANUS_VIDEOROOM_ERROR_NO_MESSAGE = 421 JANUS_VIDEOROOM_ERROR_INVALID_JSON = 422 JANUS_VIDEOROOM_ERROR_INVALID_REQUEST = 423 JANUS_VIDEOROOM_ERROR_JOIN_FIRST = 424 JANUS_VIDEOROOM_ERROR_ALREADY_JOINED = 425 JANUS_VIDEOROOM_ERROR_NO_SUCH_ROOM = 426 JANUS_VIDEOROOM_ERROR_ROOM_EXISTS = 427 JANUS_VIDEOROOM_ERROR_NO_SUCH_FEED = 428 JANUS_VIDEOROOM_ERROR_MISSING_ELEMENT = 429 JANUS_VIDEOROOM_ERROR_INVALID_ELEMENT = 430 JANUS_VIDEOROOM_ERROR_INVALID_SDP_TYPE = 431 JANUS_VIDEOROOM_ERROR_PUBLISHERS_FULL = 432 JANUS_VIDEOROOM_ERROR_UNAUTHORIZED = 433 JANUS_VIDEOROOM_ERROR_ALREADY_PUBLISHED = 434 JANUS_VIDEOROOM_ERROR_NOT_PUBLISHED = 435 JANUS_VIDEOROOM_ERROR_ID_EXISTS = 436 JANUS_VIDEOROOM_ERROR_INVALID_SDP = 437 ) var ( janusDialer = websocket.Dialer{ Subprotocols: []string{"janus-protocol"}, Proxy: http.ProxyFromEnvironment, } ) var msgtypes = map[string]func() interface{}{ "error": func() interface{} { return &janus.ErrorMsg{} }, "success": func() interface{} { return &janus.SuccessMsg{} }, "detached": func() interface{} { return &janus.DetachedMsg{} }, "server_info": func() interface{} { return &InfoMsg{} }, "ack": func() interface{} { return &janus.AckMsg{} }, "event": func() interface{} { return &janus.EventMsg{} }, "webrtcup": func() interface{} { return &janus.WebRTCUpMsg{} }, "media": func() interface{} { return &janus.MediaMsg{} }, "hangup": func() interface{} { return &janus.HangupMsg{} }, "slowlink": func() interface{} { return &janus.SlowLinkMsg{} }, "timeout": func() interface{} { return &janus.TimeoutMsg{} }, "trickle": func() interface{} { return &TrickleMsg{} }, } type InfoMsg struct { Name string Version int VersionString string `json:"version_string"` Author string DataChannels bool `json:"data_channels"` IPv6 bool `json:"ipv6"` LocalIP string `json:"local-ip"` ICE_TCP bool `json:"ice-tcp"` FullTrickle bool `json:"full-trickle"` Transports map[string]janus.PluginInfo Plugins map[string]janus.PluginInfo } type TrickleMsg struct { Session uint64 `json:"session_id"` Handle uint64 `json:"sender"` Candidate struct { SdpMid string `json:"sdpMid"` SdpMLineIndex int `json:"sdpMLineIndex"` Candidate string `json:"candidate"` Completed bool `json:"completed,omitempty"` } `json:"candidate"` } func unexpected(request string) error { return fmt.Errorf("unexpected response received to '%s' request", request) } type transaction struct { ch chan interface{} incoming chan interface{} closer *Closer } func (t *transaction) run() { for { select { case msg := <-t.incoming: t.ch <- msg case <-t.closer.C: return } } } func (t *transaction) add(msg interface{}) { t.incoming <- msg } func (t *transaction) quit() { t.closer.Close() } func newTransaction() *transaction { t := &transaction{ ch: make(chan interface{}, 1), incoming: make(chan interface{}, 8), closer: NewCloser(), } return t } func newRequest(method string) (map[string]interface{}, *transaction) { req := make(map[string]interface{}, 8) req["janus"] = method return req, newTransaction() } type GatewayListener interface { ConnectionInterrupted() } type dummyGatewayListener struct { } func (l *dummyGatewayListener) ConnectionInterrupted() { } // Gateway represents a connection to an instance of the Janus Gateway. type JanusGateway struct { listener GatewayListener // Sessions is a map of the currently active sessions to the gateway. Sessions map[uint64]*JanusSession // Access to the Sessions map should be synchronized with the Gateway.Lock() // and Gateway.Unlock() methods provided by the embedded sync.Mutex. sync.Mutex conn *websocket.Conn nextTransaction atomic.Uint64 transactions map[uint64]*transaction closer *Closer writeMu sync.Mutex } // Connect creates a new Gateway instance, connected to the Janus Gateway. // path should be a filesystem path to the Unix Socket that the Unix transport // is bound to. // On success, a new Gateway object will be returned and error will be nil. // func Connect(path string, netType string) (*JanusGateway, error) { // conn, err := net.Dial(netType, path) // if err != nil { // return nil, err // } // gateway := new(Gateway) // //gateway.conn = conn // gateway.transactions = make(map[uint64]chan interface{}) // gateway.Sessions = make(map[uint64]*JanusSession) // go gateway.recv() // return gateway, nil // } func NewJanusGateway(wsURL string, listener GatewayListener) (*JanusGateway, error) { conn, _, err := janusDialer.Dial(wsURL, nil) if err != nil { return nil, err } if listener == nil { listener = new(dummyGatewayListener) } gateway := &JanusGateway{ conn: conn, listener: listener, transactions: make(map[uint64]*transaction), Sessions: make(map[uint64]*JanusSession), closer: NewCloser(), } go gateway.ping() go gateway.recv() return gateway, nil } // Close closes the underlying connection to the Gateway. func (gateway *JanusGateway) Close() error { gateway.closer.Close() gateway.writeMu.Lock() if gateway.conn == nil { gateway.writeMu.Unlock() return nil } err := gateway.conn.Close() gateway.conn = nil gateway.writeMu.Unlock() gateway.cancelTransactions() return err } func (gateway *JanusGateway) cancelTransactions() { msg := &janus.ErrorMsg{ Err: janus.ErrorData{ Code: 500, Reason: "cancelled", }, } gateway.Lock() for _, t := range gateway.transactions { go func(t *transaction) { t.add(msg) t.quit() }(t) } gateway.transactions = make(map[uint64]*transaction) gateway.Unlock() } func (gateway *JanusGateway) removeTransaction(id uint64) { gateway.Lock() t, found := gateway.transactions[id] if found { delete(gateway.transactions, id) } gateway.Unlock() if t != nil { t.quit() } } func (gateway *JanusGateway) send(msg map[string]interface{}, t *transaction) (uint64, error) { id := gateway.nextTransaction.Add(1) msg["transaction"] = strconv.FormatUint(id, 10) data, err := json.Marshal(msg) if err != nil { return 0, err } go t.run() gateway.Lock() gateway.transactions[id] = t gateway.Unlock() gateway.writeMu.Lock() if gateway.conn == nil { gateway.writeMu.Unlock() gateway.removeTransaction(id) return 0, fmt.Errorf("not connected") } err = gateway.conn.WriteMessage(websocket.TextMessage, data) gateway.writeMu.Unlock() if err != nil { gateway.removeTransaction(id) return 0, err } return id, nil } func passMsg(ch chan interface{}, msg interface{}) { ch <- msg } func (gateway *JanusGateway) ping() { ticker := time.NewTicker(time.Second * 30) defer ticker.Stop() loop: for { select { case <-ticker.C: gateway.writeMu.Lock() if gateway.conn == nil { gateway.writeMu.Unlock() continue } err := gateway.conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(20*time.Second)) gateway.writeMu.Unlock() if err != nil { log.Println("Error sending ping to MCU:", err) } case <-gateway.closer.C: break loop } } } func (gateway *JanusGateway) recv() { var decodeBuffer bytes.Buffer for { // Read message from Gateway // Decode to Msg struct var base janus.BaseMsg gateway.writeMu.Lock() conn := gateway.conn gateway.writeMu.Unlock() if conn == nil { return } _, reader, err := conn.NextReader() if err != nil { log.Printf("conn.NextReader: %s", err) gateway.writeMu.Lock() gateway.conn = nil gateway.writeMu.Unlock() gateway.cancelTransactions() go gateway.listener.ConnectionInterrupted() return } decodeBuffer.Reset() if _, err := decodeBuffer.ReadFrom(reader); err != nil { log.Printf("decodeBuffer.ReadFrom: %s", err) gateway.writeMu.Lock() gateway.conn = nil gateway.writeMu.Unlock() gateway.cancelTransactions() go gateway.listener.ConnectionInterrupted() break } data := bytes.NewReader(decodeBuffer.Bytes()) decoder := json.NewDecoder(data) decoder.UseNumber() if err := decoder.Decode(&base); err != nil { log.Printf("json.Unmarshal of %s: %s", decodeBuffer.String(), err) continue } typeFunc, ok := msgtypes[base.Type] if !ok { log.Printf("Unknown message type received: %s", decodeBuffer.String()) continue } msg := typeFunc() data = bytes.NewReader(decodeBuffer.Bytes()) decoder = json.NewDecoder(data) decoder.UseNumber() if err := decoder.Decode(&msg); err != nil { log.Printf("json.Unmarshal of %s: %s", decodeBuffer.String(), err) continue // Decode error } // Pass message on from here if base.ID == "" { // Is this a Handle event? if base.Handle == 0 { // Nope. No idea what's going on... // Error() log.Printf("Received event without handle, ignoring: %s", decodeBuffer.String()) } else { // Lookup Session gateway.Lock() session := gateway.Sessions[base.Session] gateway.Unlock() if session == nil { log.Printf("Unable to deliver message %s. Session %d gone?", decodeBuffer.String(), base.Session) continue } // Lookup Handle session.Lock() handle := session.Handles[base.Handle] session.Unlock() if handle == nil { log.Printf("Unable to deliver message %s. Handle %d gone?", decodeBuffer.String(), base.Handle) continue } // Pass msg go passMsg(handle.Events, msg) } } else { id, err := strconv.ParseUint(base.ID, 10, 64) if err != nil { log.Printf("Could not decode transaction id %s: %s", base.ID, err) continue } // Lookup Transaction gateway.Lock() transaction := gateway.transactions[id] gateway.Unlock() if transaction == nil { // Error() log.Printf("Received event for unknown transaction, ignoring: %s", decodeBuffer.String()) continue } // Pass msg transaction.add(msg) } } } func waitForMessage(ctx context.Context, t *transaction) (interface{}, error) { select { case <-ctx.Done(): return nil, ctx.Err() case msg := <-t.ch: return msg, nil } } // Info sends an info request to the Gateway. // On success, an InfoMsg will be returned and error will be nil. func (gateway *JanusGateway) Info(ctx context.Context) (*InfoMsg, error) { req, ch := newRequest("info") id, err := gateway.send(req, ch) if err != nil { return nil, err } defer gateway.removeTransaction(id) msg, err := waitForMessage(ctx, ch) if err != nil { return nil, err } switch msg := msg.(type) { case *InfoMsg: return msg, nil case *janus.ErrorMsg: return nil, msg } return nil, unexpected("info") } // Create sends a create request to the Gateway. // On success, a new Session will be returned and error will be nil. func (gateway *JanusGateway) Create(ctx context.Context) (*JanusSession, error) { req, ch := newRequest("create") id, err := gateway.send(req, ch) if err != nil { return nil, err } defer gateway.removeTransaction(id) msg, err := waitForMessage(ctx, ch) if err != nil { return nil, err } var success *janus.SuccessMsg switch msg := msg.(type) { case *janus.SuccessMsg: success = msg case *janus.ErrorMsg: return nil, msg } // Create new session session := new(JanusSession) session.gateway = gateway session.Id = success.Data.ID session.Handles = make(map[uint64]*JanusHandle) // Store this session gateway.Lock() gateway.Sessions[session.Id] = session gateway.Unlock() return session, nil } // Session represents a session instance on the Janus Gateway. type JanusSession struct { // Id is the session_id of this session Id uint64 // Handles is a map of plugin handles within this session Handles map[uint64]*JanusHandle // Access to the Handles map should be synchronized with the Session.Lock() // and Session.Unlock() methods provided by the embedded sync.Mutex. sync.Mutex gateway *JanusGateway } func (session *JanusSession) send(msg map[string]interface{}, t *transaction) (uint64, error) { msg["session_id"] = session.Id return session.gateway.send(msg, t) } // Attach sends an attach request to the Gateway within this session. // plugin should be the unique string of the plugin to attach to. // On success, a new Handle will be returned and error will be nil. func (session *JanusSession) Attach(ctx context.Context, plugin string) (*JanusHandle, error) { req, ch := newRequest("attach") req["plugin"] = plugin id, err := session.send(req, ch) if err != nil { return nil, err } defer session.gateway.removeTransaction(id) msg, err := waitForMessage(ctx, ch) if err != nil { return nil, err } var success *janus.SuccessMsg switch msg := msg.(type) { case *janus.SuccessMsg: success = msg case *janus.ErrorMsg: return nil, msg } handle := new(JanusHandle) handle.session = session handle.Id = success.Data.ID handle.Events = make(chan interface{}, 8) session.Lock() session.Handles[handle.Id] = handle session.Unlock() return handle, nil } // KeepAlive sends a keep-alive request to the Gateway. // On success, an AckMsg will be returned and error will be nil. func (session *JanusSession) KeepAlive(ctx context.Context) (*janus.AckMsg, error) { req, ch := newRequest("keepalive") id, err := session.send(req, ch) if err != nil { return nil, err } defer session.gateway.removeTransaction(id) msg, err := waitForMessage(ctx, ch) if err != nil { return nil, err } switch msg := msg.(type) { case *janus.AckMsg: return msg, nil case *janus.ErrorMsg: return nil, msg } return nil, unexpected("keepalive") } // Destroy sends a destroy request to the Gateway to tear down this session. // On success, the Session will be removed from the Gateway.Sessions map, an // AckMsg will be returned and error will be nil. func (session *JanusSession) Destroy(ctx context.Context) (*janus.AckMsg, error) { req, ch := newRequest("destroy") id, err := session.send(req, ch) if err != nil { return nil, err } defer session.gateway.removeTransaction(id) msg, err := waitForMessage(ctx, ch) if err != nil { return nil, err } var ack *janus.AckMsg switch msg := msg.(type) { case *janus.AckMsg: ack = msg case *janus.ErrorMsg: return nil, msg } // Remove this session from the gateway session.gateway.Lock() delete(session.gateway.Sessions, session.Id) session.gateway.Unlock() return ack, nil } // Handle represents a handle to a plugin instance on the Gateway. type JanusHandle struct { // Id is the handle_id of this plugin handle Id uint64 // Type // pub or sub Type string //User // Userid User string // Events is a receive only channel that can be used to receive events // related to this handle from the gateway. Events chan interface{} session *JanusSession } func (handle *JanusHandle) send(msg map[string]interface{}, t *transaction) (uint64, error) { msg["handle_id"] = handle.Id return handle.session.send(msg, t) } // send sync request func (handle *JanusHandle) Request(ctx context.Context, body interface{}) (*janus.SuccessMsg, error) { req, ch := newRequest("message") if body != nil { req["body"] = body } id, err := handle.send(req, ch) if err != nil { return nil, err } defer handle.session.gateway.removeTransaction(id) msg, err := waitForMessage(ctx, ch) if err != nil { return nil, err } switch msg := msg.(type) { case *janus.SuccessMsg: return msg, nil case *janus.ErrorMsg: return nil, msg } return nil, unexpected("message") } // Message sends a message request to a plugin handle on the Gateway. // body should be the plugin data to be passed to the plugin, and jsep should // contain an optional SDP offer/answer to establish a WebRTC PeerConnection. // On success, an EventMsg will be returned and error will be nil. func (handle *JanusHandle) Message(ctx context.Context, body, jsep interface{}) (*janus.EventMsg, error) { req, ch := newRequest("message") if body != nil { req["body"] = body } if jsep != nil { req["jsep"] = jsep } id, err := handle.send(req, ch) if err != nil { return nil, err } defer handle.session.gateway.removeTransaction(id) GetMessage: // No tears.. msg, err := waitForMessage(ctx, ch) if err != nil { return nil, err } switch msg := msg.(type) { case *janus.AckMsg: goto GetMessage // ..only dreams. case *janus.EventMsg: return msg, nil case *janus.ErrorMsg: return nil, msg } return nil, unexpected("message") } // Trickle sends a trickle request to the Gateway as part of establishing // a new PeerConnection with a plugin. // candidate should be a single ICE candidate, or a completed object to // signify that all candidates have been sent: // // { // "completed": true // } // // On success, an AckMsg will be returned and error will be nil. func (handle *JanusHandle) Trickle(ctx context.Context, candidate interface{}) (*janus.AckMsg, error) { req, ch := newRequest("trickle") req["candidate"] = candidate id, err := handle.send(req, ch) if err != nil { return nil, err } defer handle.session.gateway.removeTransaction(id) msg, err := waitForMessage(ctx, ch) if err != nil { return nil, err } switch msg := msg.(type) { case *janus.AckMsg: return msg, nil case *janus.ErrorMsg: return nil, msg } return nil, unexpected("trickle") } // TrickleMany sends a trickle request to the Gateway as part of establishing // a new PeerConnection with a plugin. // candidates should be an array of ICE candidates. // On success, an AckMsg will be returned and error will be nil. func (handle *JanusHandle) TrickleMany(ctx context.Context, candidates interface{}) (*janus.AckMsg, error) { req, ch := newRequest("trickle") req["candidates"] = candidates id, err := handle.send(req, ch) if err != nil { return nil, err } handle.session.gateway.removeTransaction(id) msg, err := waitForMessage(ctx, ch) if err != nil { return nil, err } switch msg := msg.(type) { case *janus.AckMsg: return msg, nil case *janus.ErrorMsg: return nil, msg } return nil, unexpected("trickle") } // Detach sends a detach request to the Gateway to remove this handle. // On success, an AckMsg will be returned and error will be nil. func (handle *JanusHandle) Detach(ctx context.Context) (*janus.AckMsg, error) { req, ch := newRequest("detach") id, err := handle.send(req, ch) if err != nil { return nil, err } defer handle.session.gateway.removeTransaction(id) msg, err := waitForMessage(ctx, ch) if err != nil { return nil, err } var ack *janus.AckMsg switch msg := msg.(type) { case *janus.AckMsg: ack = msg case *janus.ErrorMsg: return nil, msg } // Remove this handle from the session handle.session.Lock() delete(handle.session.Handles, handle.Id) handle.session.Unlock() return ack, nil } nextcloud-spreed-signaling-1.2.4/lru.go000066400000000000000000000045651460321600400201130ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "container/list" "sync" ) type cacheEntry struct { key string value interface{} } type LruCache struct { size int mu sync.Mutex entries *list.List data map[string]*list.Element } func NewLruCache(size int) *LruCache { return &LruCache{ size: size, entries: list.New(), data: make(map[string]*list.Element), } } func (c *LruCache) Set(key string, value interface{}) { c.mu.Lock() if v, found := c.data[key]; found { c.entries.MoveToFront(v) v.Value.(*cacheEntry).value = value c.mu.Unlock() return } v := c.entries.PushFront(&cacheEntry{ key: key, value: value, }) c.data[key] = v if c.size > 0 && c.entries.Len() > c.size { c.removeOldestLocked() } c.mu.Unlock() } func (c *LruCache) Get(key string) interface{} { c.mu.Lock() if v, found := c.data[key]; found { c.entries.MoveToFront(v) value := v.Value.(*cacheEntry).value c.mu.Unlock() return value } c.mu.Unlock() return nil } func (c *LruCache) Remove(key string) { c.mu.Lock() if v, found := c.data[key]; found { c.removeElement(v) } c.mu.Unlock() } func (c *LruCache) removeOldestLocked() { v := c.entries.Back() if v != nil { c.removeElement(v) } } func (c *LruCache) RemoveOldest() { c.mu.Lock() c.removeOldestLocked() c.mu.Unlock() } func (c *LruCache) removeElement(e *list.Element) { c.entries.Remove(e) entry := e.Value.(*cacheEntry) delete(c.data, entry.key) } func (c *LruCache) Len() int { c.mu.Lock() defer c.mu.Unlock() return c.entries.Len() } nextcloud-spreed-signaling-1.2.4/lru_test.go000066400000000000000000000106311460321600400211410ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "fmt" "testing" ) func TestLruUnbound(t *testing.T) { lru := NewLruCache(0) count := 10 for i := 0; i < count; i++ { key := fmt.Sprintf("%d", i) lru.Set(key, i) } if lru.Len() != count { t.Errorf("Expected %d entries, got %d", count, lru.Len()) } for i := 0; i < count; i++ { key := fmt.Sprintf("%d", i) value := lru.Get(key) if value == nil { t.Errorf("No value found for %s", key) continue } else if value.(int) != i { t.Errorf("Expected value to be %d, got %d", value.(int), i) } } // The first key ("0") is now the oldest. lru.RemoveOldest() if lru.Len() != count-1 { t.Errorf("Expected %d entries after RemoveOldest, got %d", count-1, lru.Len()) } for i := 0; i < count; i++ { key := fmt.Sprintf("%d", i) value := lru.Get(key) if i == 0 { if value != nil { t.Errorf("The value for key %s should have been removed", key) } continue } else if value == nil { t.Errorf("No value found for %s", key) continue } else if value.(int) != i { t.Errorf("Expected value to be %d, got %d", value.(int), i) } } // NOTE: Key "0" no longer exists below, so make sure to not set it again. // Using the same keys will update the ordering. for i := count - 1; i >= 1; i-- { key := fmt.Sprintf("%d", i) lru.Set(key, i) } if lru.Len() != count-1 { t.Errorf("Expected %d entries, got %d", count-1, lru.Len()) } // NOTE: The same ordering as the Set calls above. for i := count - 1; i >= 1; i-- { key := fmt.Sprintf("%d", i) value := lru.Get(key) if value == nil { t.Errorf("No value found for %s", key) continue } else if value.(int) != i { t.Errorf("Expected value to be %d, got %d", value.(int), i) } } // The last key ("9") is now the oldest. lru.RemoveOldest() if lru.Len() != count-2 { t.Errorf("Expected %d entries after RemoveOldest, got %d", count-2, lru.Len()) } for i := 0; i < count; i++ { key := fmt.Sprintf("%d", i) value := lru.Get(key) if i == 0 || i == count-1 { if value != nil { t.Errorf("The value for key %s should have been removed", key) } continue } else if value == nil { t.Errorf("No value found for %s", key) continue } else if value.(int) != i { t.Errorf("Expected value to be %d, got %d", value.(int), i) } } // Remove an arbitrary key from the cache key := fmt.Sprintf("%d", count/2) lru.Remove(key) if lru.Len() != count-3 { t.Errorf("Expected %d entries after RemoveOldest, got %d", count-3, lru.Len()) } for i := 0; i < count; i++ { key := fmt.Sprintf("%d", i) value := lru.Get(key) if i == 0 || i == count-1 || i == count/2 { if value != nil { t.Errorf("The value for key %s should have been removed", key) } continue } else if value == nil { t.Errorf("No value found for %s", key) continue } else if value.(int) != i { t.Errorf("Expected value to be %d, got %d", value.(int), i) } } } func TestLruBound(t *testing.T) { size := 2 lru := NewLruCache(size) count := 10 for i := 0; i < count; i++ { key := fmt.Sprintf("%d", i) lru.Set(key, i) } if lru.Len() != size { t.Errorf("Expected %d entries, got %d", size, lru.Len()) } // Only the last "size" entries have been stored. for i := 0; i < count; i++ { key := fmt.Sprintf("%d", i) value := lru.Get(key) if i < count-size { if value != nil { t.Errorf("The value for key %s should have been removed", key) } continue } else if value == nil { t.Errorf("No value found for %s", key) continue } else if value.(int) != i { t.Errorf("Expected value to be %d, got %d", value.(int), i) } } } nextcloud-spreed-signaling-1.2.4/mcu_common.go000066400000000000000000000055251460321600400214420ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "fmt" "github.com/dlintw/goconf" ) const ( McuTypeJanus = "janus" McuTypeProxy = "proxy" McuTypeDefault = McuTypeJanus ) var ( ErrNotConnected = fmt.Errorf("not connected") ) type MediaType int const ( MediaTypeAudio MediaType = 1 << 0 MediaTypeVideo MediaType = 1 << 1 MediaTypeScreen MediaType = 1 << 2 ) type McuListener interface { PublicId() string OnUpdateOffer(client McuClient, offer map[string]interface{}) OnIceCandidate(client McuClient, candidate interface{}) OnIceCompleted(client McuClient) SubscriberSidUpdated(subscriber McuSubscriber) PublisherClosed(publisher McuPublisher) SubscriberClosed(subscriber McuSubscriber) } type McuInitiator interface { Country() string } type Mcu interface { Start() error Stop() Reload(config *goconf.ConfigFile) SetOnConnected(func()) SetOnDisconnected(func()) GetStats() interface{} NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error) NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType) (McuSubscriber, error) } type StreamType string const ( StreamTypeAudio StreamType = "audio" StreamTypeVideo StreamType = "video" StreamTypeScreen StreamType = "screen" ) func IsValidStreamType(s string) bool { switch s { case string(StreamTypeAudio): fallthrough case string(StreamTypeVideo): fallthrough case string(StreamTypeScreen): return true default: return false } } type McuClient interface { Id() string Sid() string StreamType() StreamType MaxBitrate() int Close(ctx context.Context) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) } type McuPublisher interface { McuClient HasMedia(MediaType) bool SetMedia(MediaType) } type McuSubscriber interface { McuClient Publisher() string } nextcloud-spreed-signaling-1.2.4/mcu_common_test.go000066400000000000000000000017461460321600400225020ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "testing" ) func TestCommonMcuStats(t *testing.T) { collectAndLint(t, commonMcuStats...) } nextcloud-spreed-signaling-1.2.4/mcu_janus.go000066400000000000000000001134131460321600400212660ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "database/sql" "encoding/json" "fmt" "log" "reflect" "strconv" "sync" "sync/atomic" "time" "github.com/dlintw/goconf" "github.com/notedit/janus-go" ) const ( pluginVideoRoom = "janus.plugin.videoroom" keepaliveInterval = 30 * time.Second videoPublisherUserId = 1 screenPublisherUserId = 2 initialReconnectInterval = 1 * time.Second maxReconnectInterval = 32 * time.Second defaultMaxStreamBitrate = 1024 * 1024 defaultMaxScreenBitrate = 2048 * 1024 ) var ( streamTypeUserIds = map[StreamType]uint64{ StreamTypeVideo: videoPublisherUserId, StreamTypeScreen: screenPublisherUserId, } ) func getStreamId(publisherId string, streamType StreamType) string { return fmt.Sprintf("%s|%s", publisherId, streamType) } func getPluginValue(data janus.PluginData, pluginName string, key string) interface{} { if data.Plugin != pluginName { return nil } return data.Data[key] } func convertIntValue(value interface{}) (uint64, error) { switch t := value.(type) { case float64: if t < 0 { return 0, fmt.Errorf("Unsupported float64 number: %+v", t) } return uint64(t), nil case uint64: return t, nil case int64: if t < 0 { return 0, fmt.Errorf("Unsupported int64 number: %+v", t) } return uint64(t), nil case json.Number: r, err := t.Int64() if err != nil { return 0, err } else if r < 0 { return 0, fmt.Errorf("Unsupported JSON number: %+v", t) } return uint64(r), nil default: return 0, fmt.Errorf("Unknown number type: %+v", t) } } func getPluginIntValue(data janus.PluginData, pluginName string, key string) uint64 { val := getPluginValue(data, pluginName, key) if val == nil { return 0 } result, err := convertIntValue(val) if err != nil { log.Printf("Invalid value %+v for %s: %s", val, key, err) result = 0 } return result } func getPluginStringValue(data janus.PluginData, pluginName string, key string) string { val := getPluginValue(data, pluginName, key) if val == nil { return "" } strVal, ok := val.(string) if !ok { return "" } return strVal } // TODO(jojo): Lots of error handling still missing. type clientInterface interface { NotifyReconnected() } type mcuJanus struct { url string mu sync.Mutex maxStreamBitrate int maxScreenBitrate int mcuTimeout time.Duration gw *JanusGateway session *JanusSession handle *JanusHandle closeChan chan struct{} muClients sync.Mutex clients map[clientInterface]bool clientId atomic.Uint64 publishers map[string]*mcuJanusPublisher publisherCreated Notifier publisherConnected Notifier reconnectTimer *time.Timer reconnectInterval time.Duration connectedSince time.Time onConnected atomic.Value onDisconnected atomic.Value } func emptyOnConnected() {} func emptyOnDisconnected() {} func NewMcuJanus(url string, config *goconf.ConfigFile) (Mcu, error) { maxStreamBitrate, _ := config.GetInt("mcu", "maxstreambitrate") if maxStreamBitrate <= 0 { maxStreamBitrate = defaultMaxStreamBitrate } maxScreenBitrate, _ := config.GetInt("mcu", "maxscreenbitrate") if maxScreenBitrate <= 0 { maxScreenBitrate = defaultMaxScreenBitrate } mcuTimeoutSeconds, _ := config.GetInt("mcu", "timeout") if mcuTimeoutSeconds <= 0 { mcuTimeoutSeconds = defaultMcuTimeoutSeconds } mcuTimeout := time.Duration(mcuTimeoutSeconds) * time.Second mcu := &mcuJanus{ url: url, maxStreamBitrate: maxStreamBitrate, maxScreenBitrate: maxScreenBitrate, mcuTimeout: mcuTimeout, closeChan: make(chan struct{}, 1), clients: make(map[clientInterface]bool), publishers: make(map[string]*mcuJanusPublisher), reconnectInterval: initialReconnectInterval, } mcu.onConnected.Store(emptyOnConnected) mcu.onDisconnected.Store(emptyOnDisconnected) mcu.reconnectTimer = time.AfterFunc(mcu.reconnectInterval, mcu.doReconnect) mcu.reconnectTimer.Stop() if err := mcu.reconnect(); err != nil { return nil, err } return mcu, nil } func (m *mcuJanus) disconnect() { if handle := m.handle; handle != nil { m.handle = nil m.closeChan <- struct{}{} if _, err := handle.Detach(context.TODO()); err != nil { log.Printf("Error detaching handle %d: %s", handle.Id, err) } } if m.session != nil { if _, err := m.session.Destroy(context.TODO()); err != nil { log.Printf("Error destroying session %d: %s", m.session.Id, err) } m.session = nil } if m.gw != nil { if err := m.gw.Close(); err != nil { log.Println("Error while closing connection to MCU", err) } m.gw = nil } } func (m *mcuJanus) reconnect() error { m.disconnect() gw, err := NewJanusGateway(m.url, m) if err != nil { return err } m.gw = gw m.reconnectTimer.Stop() return nil } func (m *mcuJanus) doReconnect() { if err := m.reconnect(); err != nil { m.scheduleReconnect(err) return } if err := m.Start(); err != nil { m.scheduleReconnect(err) return } log.Println("Reconnection to Janus gateway successful") m.mu.Lock() m.publishers = make(map[string]*mcuJanusPublisher) m.publisherCreated.Reset() m.publisherConnected.Reset() m.reconnectInterval = initialReconnectInterval m.mu.Unlock() m.muClients.Lock() for client := range m.clients { go client.NotifyReconnected() } m.muClients.Unlock() } func (m *mcuJanus) scheduleReconnect(err error) { m.mu.Lock() defer m.mu.Unlock() m.reconnectTimer.Reset(m.reconnectInterval) if err == nil { log.Printf("Connection to Janus gateway was interrupted, reconnecting in %s", m.reconnectInterval) } else { log.Printf("Reconnect to Janus gateway failed (%s), reconnecting in %s", err, m.reconnectInterval) } m.reconnectInterval = m.reconnectInterval * 2 if m.reconnectInterval > maxReconnectInterval { m.reconnectInterval = maxReconnectInterval } } func (m *mcuJanus) ConnectionInterrupted() { m.scheduleReconnect(nil) m.notifyOnDisconnected() } func (m *mcuJanus) Start() error { ctx := context.TODO() info, err := m.gw.Info(ctx) if err != nil { return err } log.Printf("Connected to %s %s by %s", info.Name, info.VersionString, info.Author) plugin, found := info.Plugins[pluginVideoRoom] if !found { return fmt.Errorf("Plugin %s is not supported", pluginVideoRoom) } log.Printf("Found %s %s by %s", plugin.Name, plugin.VersionString, plugin.Author) if !info.DataChannels { return fmt.Errorf("Data channels are not supported") } log.Println("Data channels are supported") if !info.FullTrickle { log.Println("WARNING: Full-Trickle is NOT enabled in Janus!") } else { log.Println("Full-Trickle is enabled") } log.Printf("Maximum bandwidth %d bits/sec per publishing stream", m.maxStreamBitrate) log.Printf("Maximum bandwidth %d bits/sec per screensharing stream", m.maxScreenBitrate) if m.session, err = m.gw.Create(ctx); err != nil { m.disconnect() return err } log.Println("Created Janus session", m.session.Id) m.connectedSince = time.Now() if m.handle, err = m.session.Attach(ctx, pluginVideoRoom); err != nil { m.disconnect() return err } log.Println("Created Janus handle", m.handle.Id) go m.run() m.notifyOnConnected() return nil } func (m *mcuJanus) registerClient(client clientInterface) { m.muClients.Lock() m.clients[client] = true m.muClients.Unlock() } func (m *mcuJanus) unregisterClient(client clientInterface) { m.muClients.Lock() delete(m.clients, client) m.muClients.Unlock() } func (m *mcuJanus) run() { ticker := time.NewTicker(keepaliveInterval) defer ticker.Stop() loop: for { select { case <-ticker.C: m.sendKeepalive() case <-m.closeChan: break loop } } } func (m *mcuJanus) Stop() { m.disconnect() m.reconnectTimer.Stop() } func (m *mcuJanus) Reload(config *goconf.ConfigFile) { } func (m *mcuJanus) SetOnConnected(f func()) { if f == nil { f = emptyOnConnected } m.onConnected.Store(f) } func (m *mcuJanus) notifyOnConnected() { f := m.onConnected.Load().(func()) f() } func (m *mcuJanus) SetOnDisconnected(f func()) { if f == nil { f = emptyOnDisconnected } m.onDisconnected.Store(f) } func (m *mcuJanus) notifyOnDisconnected() { f := m.onDisconnected.Load().(func()) f() } type mcuJanusConnectionStats struct { Url string `json:"url"` Connected bool `json:"connected"` Publishers int64 `json:"publishers"` Clients int64 `json:"clients"` Uptime *time.Time `json:"uptime,omitempty"` } func (m *mcuJanus) GetStats() interface{} { result := mcuJanusConnectionStats{ Url: m.url, } if m.session != nil { result.Connected = true result.Uptime = &m.connectedSince } m.mu.Lock() result.Publishers = int64(len(m.publishers)) m.mu.Unlock() m.muClients.Lock() result.Clients = int64(len(m.clients)) m.muClients.Unlock() return result } func (m *mcuJanus) sendKeepalive() { ctx := context.TODO() if _, err := m.session.KeepAlive(ctx); err != nil { log.Println("Could not send keepalive request", err) if e, ok := err.(*janus.ErrorMsg); ok { switch e.Err.Code { case JANUS_ERROR_SESSION_NOT_FOUND: m.scheduleReconnect(err) } } } } type mcuJanusClient struct { mcu *mcuJanus listener McuListener mu sync.Mutex // nolint id uint64 session uint64 roomId uint64 sid string streamType StreamType maxBitrate int handle *JanusHandle handleId uint64 closeChan chan struct{} deferred chan func() handleEvent func(event *janus.EventMsg) handleHangup func(event *janus.HangupMsg) handleDetached func(event *janus.DetachedMsg) handleConnected func(event *janus.WebRTCUpMsg) handleSlowLink func(event *janus.SlowLinkMsg) handleMedia func(event *janus.MediaMsg) } func (c *mcuJanusClient) Id() string { return strconv.FormatUint(c.id, 10) } func (c *mcuJanusClient) Sid() string { return c.sid } func (c *mcuJanusClient) StreamType() StreamType { return c.streamType } func (c *mcuJanusClient) MaxBitrate() int { return c.maxBitrate } func (c *mcuJanusClient) Close(ctx context.Context) { } func (c *mcuJanusClient) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) { } func (c *mcuJanusClient) closeClient(ctx context.Context) bool { if handle := c.handle; handle != nil { c.handle = nil close(c.closeChan) if _, err := handle.Detach(ctx); err != nil { if e, ok := err.(*janus.ErrorMsg); !ok || e.Err.Code != JANUS_ERROR_HANDLE_NOT_FOUND { log.Println("Could not detach client", handle.Id, err) } } return true } return false } func (c *mcuJanusClient) run(handle *JanusHandle, closeChan <-chan struct{}) { loop: for { select { case msg := <-handle.Events: switch t := msg.(type) { case *janus.EventMsg: c.handleEvent(t) case *janus.HangupMsg: c.handleHangup(t) case *janus.DetachedMsg: c.handleDetached(t) case *janus.MediaMsg: c.handleMedia(t) case *janus.WebRTCUpMsg: c.handleConnected(t) case *janus.SlowLinkMsg: c.handleSlowLink(t) case *TrickleMsg: c.handleTrickle(t) default: log.Println("Received unsupported event type", msg, reflect.TypeOf(msg)) } case f := <-c.deferred: f() case <-closeChan: break loop } } } func (c *mcuJanusClient) sendOffer(ctx context.Context, offer map[string]interface{}, callback func(error, map[string]interface{})) { handle := c.handle if handle == nil { callback(ErrNotConnected, nil) return } configure_msg := map[string]interface{}{ "request": "configure", "audio": true, "video": true, "data": true, } answer_msg, err := handle.Message(ctx, configure_msg, offer) if err != nil { callback(err, nil) return } callback(nil, answer_msg.Jsep) } func (c *mcuJanusClient) sendAnswer(ctx context.Context, answer map[string]interface{}, callback func(error, map[string]interface{})) { handle := c.handle if handle == nil { callback(ErrNotConnected, nil) return } start_msg := map[string]interface{}{ "request": "start", "room": c.roomId, } start_response, err := handle.Message(ctx, start_msg, answer) if err != nil { callback(err, nil) return } log.Println("Started listener", start_response) callback(nil, nil) } func (c *mcuJanusClient) sendCandidate(ctx context.Context, candidate interface{}, callback func(error, map[string]interface{})) { handle := c.handle if handle == nil { callback(ErrNotConnected, nil) return } if _, err := handle.Trickle(ctx, candidate); err != nil { callback(err, nil) return } callback(nil, nil) } func (c *mcuJanusClient) handleTrickle(event *TrickleMsg) { if event.Candidate.Completed { c.listener.OnIceCompleted(c) } else { c.listener.OnIceCandidate(c, event.Candidate) } } func (c *mcuJanusClient) selectStream(ctx context.Context, stream *streamSelection, callback func(error, map[string]interface{})) { handle := c.handle if handle == nil { callback(ErrNotConnected, nil) return } if stream == nil || !stream.HasValues() { callback(nil, nil) return } configure_msg := map[string]interface{}{ "request": "configure", } if stream != nil { stream.AddToMessage(configure_msg) } _, err := handle.Message(ctx, configure_msg, nil) if err != nil { callback(err, nil) return } callback(nil, nil) } type publisherStatsCounter struct { mu sync.Mutex streamTypes map[StreamType]bool subscribers map[string]bool } func (c *publisherStatsCounter) Reset() { c.mu.Lock() defer c.mu.Unlock() count := len(c.subscribers) for streamType := range c.streamTypes { statsMcuPublisherStreamTypesCurrent.WithLabelValues(string(streamType)).Dec() statsMcuSubscriberStreamTypesCurrent.WithLabelValues(string(streamType)).Sub(float64(count)) } c.streamTypes = nil c.subscribers = nil } func (c *publisherStatsCounter) EnableStream(streamType StreamType, enable bool) { c.mu.Lock() defer c.mu.Unlock() if enable == c.streamTypes[streamType] { return } if enable { if c.streamTypes == nil { c.streamTypes = make(map[StreamType]bool) } c.streamTypes[streamType] = true statsMcuPublisherStreamTypesCurrent.WithLabelValues(string(streamType)).Inc() statsMcuSubscriberStreamTypesCurrent.WithLabelValues(string(streamType)).Add(float64(len(c.subscribers))) } else { delete(c.streamTypes, streamType) statsMcuPublisherStreamTypesCurrent.WithLabelValues(string(streamType)).Dec() statsMcuSubscriberStreamTypesCurrent.WithLabelValues(string(streamType)).Sub(float64(len(c.subscribers))) } } func (c *publisherStatsCounter) AddSubscriber(id string) { c.mu.Lock() defer c.mu.Unlock() if c.subscribers[id] { return } if c.subscribers == nil { c.subscribers = make(map[string]bool) } c.subscribers[id] = true for streamType := range c.streamTypes { statsMcuSubscriberStreamTypesCurrent.WithLabelValues(string(streamType)).Inc() } } func (c *publisherStatsCounter) RemoveSubscriber(id string) { c.mu.Lock() defer c.mu.Unlock() if !c.subscribers[id] { return } delete(c.subscribers, id) for streamType := range c.streamTypes { statsMcuSubscriberStreamTypesCurrent.WithLabelValues(string(streamType)).Dec() } } type mcuJanusPublisher struct { mcuJanusClient id string bitrate int mediaTypes MediaType stats publisherStatsCounter } func (m *mcuJanus) SubscriberConnected(id string, publisher string, streamType StreamType) { m.mu.Lock() defer m.mu.Unlock() if p, found := m.publishers[getStreamId(publisher, streamType)]; found { p.stats.AddSubscriber(id) } } func (m *mcuJanus) SubscriberDisconnected(id string, publisher string, streamType StreamType) { m.mu.Lock() defer m.mu.Unlock() if p, found := m.publishers[getStreamId(publisher, streamType)]; found { p.stats.RemoveSubscriber(id) } } func min(a, b int) int { if a <= b { return a } return b } func (m *mcuJanus) getOrCreatePublisherHandle(ctx context.Context, id string, streamType StreamType, bitrate int) (*JanusHandle, uint64, uint64, int, error) { session := m.session if session == nil { return nil, 0, 0, 0, ErrNotConnected } handle, err := session.Attach(ctx, pluginVideoRoom) if err != nil { return nil, 0, 0, 0, err } log.Printf("Attached %s as publisher %d to plugin %s in session %d", streamType, handle.Id, pluginVideoRoom, session.Id) create_msg := map[string]interface{}{ "request": "create", "description": getStreamId(id, streamType), // We publish every stream in its own Janus room. "publishers": 1, // Do not use the video-orientation RTP extension as it breaks video // orientation changes in Firefox. "videoorient_ext": false, } var maxBitrate int if streamType == StreamTypeScreen { maxBitrate = m.maxScreenBitrate } else { maxBitrate = m.maxStreamBitrate } if bitrate <= 0 { bitrate = maxBitrate } else { bitrate = min(bitrate, maxBitrate) } create_msg["bitrate"] = bitrate create_response, err := handle.Request(ctx, create_msg) if err != nil { if _, err2 := handle.Detach(ctx); err2 != nil { log.Printf("Error detaching handle %d: %s", handle.Id, err2) } return nil, 0, 0, 0, err } roomId := getPluginIntValue(create_response.PluginData, pluginVideoRoom, "room") if roomId == 0 { if _, err := handle.Detach(ctx); err != nil { log.Printf("Error detaching handle %d: %s", handle.Id, err) } return nil, 0, 0, 0, fmt.Errorf("No room id received: %+v", create_response) } log.Println("Created room", roomId, create_response.PluginData) msg := map[string]interface{}{ "request": "join", "ptype": "publisher", "room": roomId, "id": streamTypeUserIds[streamType], } response, err := handle.Message(ctx, msg, nil) if err != nil { if _, err2 := handle.Detach(ctx); err2 != nil { log.Printf("Error detaching handle %d: %s", handle.Id, err2) } return nil, 0, 0, 0, err } return handle, response.Session, roomId, bitrate, nil } func (m *mcuJanus) NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error) { if _, found := streamTypeUserIds[streamType]; !found { return nil, fmt.Errorf("Unsupported stream type %s", streamType) } handle, session, roomId, maxBitrate, err := m.getOrCreatePublisherHandle(ctx, id, streamType, bitrate) if err != nil { return nil, err } client := &mcuJanusPublisher{ mcuJanusClient: mcuJanusClient{ mcu: m, listener: listener, id: m.clientId.Add(1), session: session, roomId: roomId, sid: sid, streamType: streamType, maxBitrate: maxBitrate, handle: handle, handleId: handle.Id, closeChan: make(chan struct{}, 1), deferred: make(chan func(), 64), }, id: id, bitrate: bitrate, mediaTypes: mediaTypes, } client.mcuJanusClient.handleEvent = client.handleEvent client.mcuJanusClient.handleHangup = client.handleHangup client.mcuJanusClient.handleDetached = client.handleDetached client.mcuJanusClient.handleConnected = client.handleConnected client.mcuJanusClient.handleSlowLink = client.handleSlowLink client.mcuJanusClient.handleMedia = client.handleMedia m.registerClient(client) log.Printf("Publisher %s is using handle %d", client.id, client.handleId) go client.run(handle, client.closeChan) m.mu.Lock() m.publishers[getStreamId(id, streamType)] = client m.publisherCreated.Notify(getStreamId(id, streamType)) m.mu.Unlock() statsPublishersCurrent.WithLabelValues(string(streamType)).Inc() statsPublishersTotal.WithLabelValues(string(streamType)).Inc() return client, nil } func (p *mcuJanusPublisher) handleEvent(event *janus.EventMsg) { if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" { ctx := context.TODO() switch videoroom { case "destroyed": log.Printf("Publisher %d: associated room has been destroyed, closing", p.handleId) go p.Close(ctx) case "slow_link": // Ignore, processed through "handleSlowLink" in the general events. default: log.Printf("Unsupported videoroom publisher event in %d: %+v", p.handleId, event) } } else { log.Printf("Unsupported publisher event in %d: %+v", p.handleId, event) } } func (p *mcuJanusPublisher) handleHangup(event *janus.HangupMsg) { log.Printf("Publisher %d received hangup (%s), closing", p.handleId, event.Reason) go p.Close(context.Background()) } func (p *mcuJanusPublisher) handleDetached(event *janus.DetachedMsg) { log.Printf("Publisher %d received detached, closing", p.handleId) go p.Close(context.Background()) } func (p *mcuJanusPublisher) handleConnected(event *janus.WebRTCUpMsg) { log.Printf("Publisher %d received connected", p.handleId) p.mcu.publisherConnected.Notify(getStreamId(p.id, p.streamType)) } func (p *mcuJanusPublisher) handleSlowLink(event *janus.SlowLinkMsg) { if event.Uplink { log.Printf("Publisher %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost) } else { log.Printf("Publisher %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost) } } func (p *mcuJanusPublisher) handleMedia(event *janus.MediaMsg) { mediaType := StreamType(event.Type) if mediaType == StreamTypeVideo && p.streamType == StreamTypeScreen { // We want to differentiate between audio, video and screensharing mediaType = p.streamType } p.stats.EnableStream(mediaType, event.Receiving) } func (p *mcuJanusPublisher) HasMedia(mt MediaType) bool { return (p.mediaTypes & mt) == mt } func (p *mcuJanusPublisher) SetMedia(mt MediaType) { p.mediaTypes = mt } func (p *mcuJanusPublisher) NotifyReconnected() { ctx := context.TODO() handle, session, roomId, _, err := p.mcu.getOrCreatePublisherHandle(ctx, p.id, p.streamType, p.bitrate) if err != nil { log.Printf("Could not reconnect publisher %s: %s", p.id, err) // TODO(jojo): Retry return } p.handle = handle p.handleId = handle.Id p.session = session p.roomId = roomId log.Printf("Publisher %s reconnected on handle %d", p.id, p.handleId) } func (p *mcuJanusPublisher) Close(ctx context.Context) { notify := false p.mu.Lock() if handle := p.handle; handle != nil && p.roomId != 0 { destroy_msg := map[string]interface{}{ "request": "destroy", "room": p.roomId, } if _, err := handle.Request(ctx, destroy_msg); err != nil { log.Printf("Error destroying room %d: %s", p.roomId, err) } else { log.Printf("Room %d destroyed", p.roomId) } p.mcu.mu.Lock() delete(p.mcu.publishers, getStreamId(p.id, p.streamType)) p.mcu.mu.Unlock() p.roomId = 0 notify = true } p.closeClient(ctx) p.mu.Unlock() p.stats.Reset() if notify { statsPublishersCurrent.WithLabelValues(string(p.streamType)).Dec() p.mcu.unregisterClient(p) p.listener.PublisherClosed(p) } p.mcuJanusClient.Close(ctx) } func (p *mcuJanusPublisher) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) { statsMcuMessagesTotal.WithLabelValues(data.Type).Inc() jsep_msg := data.Payload switch data.Type { case "offer": p.deferred <- func() { msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout) defer cancel() // TODO Tear down previous publisher and get a new one if sid does // not match? p.sendOffer(msgctx, jsep_msg, callback) } case "candidate": p.deferred <- func() { msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout) defer cancel() if data.Sid == "" || data.Sid == p.Sid() { p.sendCandidate(msgctx, jsep_msg["candidate"], callback) } else { go callback(fmt.Errorf("Candidate message sid (%s) does not match publisher sid (%s)", data.Sid, p.Sid()), nil) } } case "endOfCandidates": // Ignore default: go callback(fmt.Errorf("Unsupported message type: %s", data.Type), nil) } } type mcuJanusSubscriber struct { mcuJanusClient publisher string } func (m *mcuJanus) getPublisher(ctx context.Context, publisher string, streamType StreamType) (*mcuJanusPublisher, error) { // Do the direct check immediately as this should be the normal case. key := getStreamId(publisher, streamType) m.mu.Lock() if result, found := m.publishers[key]; found { m.mu.Unlock() return result, nil } waiter := m.publisherCreated.NewWaiter(key) m.mu.Unlock() defer m.publisherCreated.Release(waiter) for { m.mu.Lock() result := m.publishers[key] m.mu.Unlock() if result != nil { return result, nil } if err := waiter.Wait(ctx); err != nil { return nil, err } } } func (m *mcuJanus) getOrCreateSubscriberHandle(ctx context.Context, publisher string, streamType StreamType) (*JanusHandle, *mcuJanusPublisher, error) { var pub *mcuJanusPublisher var err error if pub, err = m.getPublisher(ctx, publisher, streamType); err != nil { return nil, nil, err } session := m.session if session == nil { return nil, nil, ErrNotConnected } handle, err := session.Attach(ctx, pluginVideoRoom) if err != nil { return nil, nil, err } log.Printf("Attached subscriber to room %d of publisher %s in plugin %s in session %d as %d", pub.roomId, publisher, pluginVideoRoom, session.Id, handle.Id) return handle, pub, nil } func (m *mcuJanus) NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType) (McuSubscriber, error) { if _, found := streamTypeUserIds[streamType]; !found { return nil, fmt.Errorf("Unsupported stream type %s", streamType) } handle, pub, err := m.getOrCreateSubscriberHandle(ctx, publisher, streamType) if err != nil { return nil, err } client := &mcuJanusSubscriber{ mcuJanusClient: mcuJanusClient{ mcu: m, listener: listener, id: m.clientId.Add(1), roomId: pub.roomId, sid: strconv.FormatUint(handle.Id, 10), streamType: streamType, maxBitrate: pub.MaxBitrate(), handle: handle, handleId: handle.Id, closeChan: make(chan struct{}, 1), deferred: make(chan func(), 64), }, publisher: publisher, } client.mcuJanusClient.handleEvent = client.handleEvent client.mcuJanusClient.handleHangup = client.handleHangup client.mcuJanusClient.handleDetached = client.handleDetached client.mcuJanusClient.handleConnected = client.handleConnected client.mcuJanusClient.handleSlowLink = client.handleSlowLink client.mcuJanusClient.handleMedia = client.handleMedia m.registerClient(client) go client.run(handle, client.closeChan) statsSubscribersCurrent.WithLabelValues(string(streamType)).Inc() statsSubscribersTotal.WithLabelValues(string(streamType)).Inc() return client, nil } func (p *mcuJanusSubscriber) Publisher() string { return p.publisher } func (p *mcuJanusSubscriber) handleEvent(event *janus.EventMsg) { if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" { ctx := context.TODO() switch videoroom { case "destroyed": log.Printf("Subscriber %d: associated room has been destroyed, closing", p.handleId) go p.Close(ctx) case "event": // Handle renegotiations, but ignore other events like selected // substream / temporal layer. if getPluginStringValue(event.Plugindata, pluginVideoRoom, "configured") == "ok" && event.Jsep != nil && event.Jsep["type"] == "offer" && event.Jsep["sdp"] != nil { p.listener.OnUpdateOffer(p, event.Jsep) } case "slow_link": // Ignore, processed through "handleSlowLink" in the general events. default: log.Printf("Unsupported videoroom event %s for subscriber %d: %+v", videoroom, p.handleId, event) } } else { log.Printf("Unsupported event for subscriber %d: %+v", p.handleId, event) } } func (p *mcuJanusSubscriber) handleHangup(event *janus.HangupMsg) { log.Printf("Subscriber %d received hangup (%s), closing", p.handleId, event.Reason) go p.Close(context.Background()) } func (p *mcuJanusSubscriber) handleDetached(event *janus.DetachedMsg) { log.Printf("Subscriber %d received detached, closing", p.handleId) go p.Close(context.Background()) } func (p *mcuJanusSubscriber) handleConnected(event *janus.WebRTCUpMsg) { log.Printf("Subscriber %d received connected", p.handleId) p.mcu.SubscriberConnected(p.Id(), p.publisher, p.streamType) } func (p *mcuJanusSubscriber) handleSlowLink(event *janus.SlowLinkMsg) { if event.Uplink { log.Printf("Subscriber %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost) } else { log.Printf("Subscriber %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost) } } func (p *mcuJanusSubscriber) handleMedia(event *janus.MediaMsg) { // Only triggered for publishers } func (p *mcuJanusSubscriber) NotifyReconnected() { ctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout) defer cancel() handle, pub, err := p.mcu.getOrCreateSubscriberHandle(ctx, p.publisher, p.streamType) if err != nil { // TODO(jojo): Retry? log.Printf("Could not reconnect subscriber for publisher %s: %s", p.publisher, err) p.Close(context.Background()) return } p.handle = handle p.handleId = handle.Id p.roomId = pub.roomId p.sid = strconv.FormatUint(handle.Id, 10) p.listener.SubscriberSidUpdated(p) log.Printf("Subscriber %d for publisher %s reconnected on handle %d", p.id, p.publisher, p.handleId) } func (p *mcuJanusSubscriber) Close(ctx context.Context) { p.mu.Lock() closed := p.closeClient(ctx) p.mu.Unlock() if closed { p.mcu.SubscriberDisconnected(p.Id(), p.publisher, p.streamType) statsSubscribersCurrent.WithLabelValues(string(p.streamType)).Dec() } p.mcu.unregisterClient(p) p.listener.SubscriberClosed(p) p.mcuJanusClient.Close(ctx) } func (p *mcuJanusSubscriber) joinRoom(ctx context.Context, stream *streamSelection, callback func(error, map[string]interface{})) { handle := p.handle if handle == nil { callback(ErrNotConnected, nil) return } waiter := p.mcu.publisherConnected.NewWaiter(getStreamId(p.publisher, p.streamType)) defer p.mcu.publisherConnected.Release(waiter) loggedNotPublishingYet := false retry: join_msg := map[string]interface{}{ "request": "join", "ptype": "subscriber", "room": p.roomId, "feed": streamTypeUserIds[p.streamType], } if stream != nil { stream.AddToMessage(join_msg) } join_response, err := handle.Message(ctx, join_msg, nil) if err != nil { callback(err, nil) return } if error_code := getPluginIntValue(join_response.Plugindata, pluginVideoRoom, "error_code"); error_code > 0 { switch error_code { case JANUS_VIDEOROOM_ERROR_ALREADY_JOINED: // The subscriber is already connected to the room. This can happen // if a client leaves a call but keeps the subscriber objects active. // On joining the call again, the subscriber tries to join on the // MCU which will fail because he is still connected. // To get a new Offer SDP, we have to tear down the session on the // MCU and join again. p.mu.Lock() p.closeClient(ctx) p.mu.Unlock() var pub *mcuJanusPublisher handle, pub, err = p.mcu.getOrCreateSubscriberHandle(ctx, p.publisher, p.streamType) if err != nil { // Reconnection didn't work, need to unregister/remove subscriber // so a new object will be created if the request is retried. p.mcu.unregisterClient(p) p.listener.SubscriberClosed(p) callback(fmt.Errorf("Already connected as subscriber for %s, error during re-joining: %s", p.streamType, err), nil) return } p.handle = handle p.handleId = handle.Id p.roomId = pub.roomId p.sid = strconv.FormatUint(handle.Id, 10) p.listener.SubscriberSidUpdated(p) p.closeChan = make(chan struct{}, 1) go p.run(p.handle, p.closeChan) log.Printf("Already connected subscriber %d for %s, leaving and re-joining on handle %d", p.id, p.streamType, p.handleId) goto retry case JANUS_VIDEOROOM_ERROR_NO_SUCH_ROOM: fallthrough case JANUS_VIDEOROOM_ERROR_NO_SUCH_FEED: switch error_code { case JANUS_VIDEOROOM_ERROR_NO_SUCH_ROOM: log.Printf("Publisher %s not created yet for %s, wait and retry to join room %d as subscriber", p.publisher, p.streamType, p.roomId) case JANUS_VIDEOROOM_ERROR_NO_SUCH_FEED: log.Printf("Publisher %s not sending yet for %s, wait and retry to join room %d as subscriber", p.publisher, p.streamType, p.roomId) } if !loggedNotPublishingYet { loggedNotPublishingYet = true statsWaitingForPublisherTotal.WithLabelValues(string(p.streamType)).Inc() } if err := waiter.Wait(ctx); err != nil { callback(err, nil) return } log.Printf("Retry subscribing %s from %s", p.streamType, p.publisher) goto retry default: // TODO(jojo): Should we handle other errors, too? callback(fmt.Errorf("Error joining room as subscriber: %+v", join_response), nil) return } } //log.Println("Joined as listener", join_response) p.session = join_response.Session callback(nil, join_response.Jsep) } func (p *mcuJanusSubscriber) update(ctx context.Context, stream *streamSelection, callback func(error, map[string]interface{})) { handle := p.handle if handle == nil { callback(ErrNotConnected, nil) return } configure_msg := map[string]interface{}{ "request": "configure", "update": true, } if stream != nil { stream.AddToMessage(configure_msg) } configure_response, err := handle.Message(ctx, configure_msg, nil) if err != nil { callback(err, nil) return } callback(nil, configure_response.Jsep) } type streamSelection struct { substream sql.NullInt16 temporal sql.NullInt16 audio sql.NullBool video sql.NullBool } func (s *streamSelection) HasValues() bool { return s.substream.Valid || s.temporal.Valid || s.audio.Valid || s.video.Valid } func (s *streamSelection) AddToMessage(message map[string]interface{}) { if s.substream.Valid { message["substream"] = s.substream.Int16 } if s.temporal.Valid { message["temporal"] = s.temporal.Int16 } if s.audio.Valid { message["audio"] = s.audio.Bool } if s.video.Valid { message["video"] = s.video.Bool } } func parseStreamSelection(payload map[string]interface{}) (*streamSelection, error) { var stream streamSelection if value, found := payload["substream"]; found { switch value := value.(type) { case int: stream.substream.Valid = true stream.substream.Int16 = int16(value) case float32: stream.substream.Valid = true stream.substream.Int16 = int16(value) case float64: stream.substream.Valid = true stream.substream.Int16 = int16(value) default: return nil, fmt.Errorf("Unsupported substream value: %v", value) } } if value, found := payload["temporal"]; found { switch value := value.(type) { case int: stream.temporal.Valid = true stream.temporal.Int16 = int16(value) case float32: stream.temporal.Valid = true stream.temporal.Int16 = int16(value) case float64: stream.temporal.Valid = true stream.temporal.Int16 = int16(value) default: return nil, fmt.Errorf("Unsupported temporal value: %v", value) } } if value, found := payload["audio"]; found { switch value := value.(type) { case bool: stream.audio.Valid = true stream.audio.Bool = value default: return nil, fmt.Errorf("Unsupported audio value: %v", value) } } if value, found := payload["video"]; found { switch value := value.(type) { case bool: stream.video.Valid = true stream.video.Bool = value default: return nil, fmt.Errorf("Unsupported video value: %v", value) } } return &stream, nil } func (p *mcuJanusSubscriber) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) { statsMcuMessagesTotal.WithLabelValues(data.Type).Inc() jsep_msg := data.Payload switch data.Type { case "requestoffer": fallthrough case "sendoffer": p.deferred <- func() { msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout) defer cancel() stream, err := parseStreamSelection(jsep_msg) if err != nil { go callback(err, nil) return } if data.Sid == "" || data.Sid != p.Sid() { p.joinRoom(msgctx, stream, callback) } else { p.update(msgctx, stream, callback) } } case "answer": p.deferred <- func() { msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout) defer cancel() if data.Sid == "" || data.Sid == p.Sid() { p.sendAnswer(msgctx, jsep_msg, callback) } else { go callback(fmt.Errorf("Answer message sid (%s) does not match subscriber sid (%s)", data.Sid, p.Sid()), nil) } } case "candidate": p.deferred <- func() { msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout) defer cancel() if data.Sid == "" || data.Sid == p.Sid() { p.sendCandidate(msgctx, jsep_msg["candidate"], callback) } else { go callback(fmt.Errorf("Candidate message sid (%s) does not match subscriber sid (%s)", data.Sid, p.Sid()), nil) } } case "endOfCandidates": // Ignore case "selectStream": stream, err := parseStreamSelection(jsep_msg) if err != nil { go callback(err, nil) return } if stream == nil || !stream.HasValues() { // Nothing to do go callback(nil, nil) return } p.deferred <- func() { msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout) defer cancel() p.selectStream(msgctx, stream, callback) } default: // Return error asynchronously go callback(fmt.Errorf("Unsupported message type: %s", data.Type), nil) } } nextcloud-spreed-signaling-1.2.4/mcu_janus_test.go000066400000000000000000000133141460321600400223240ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "testing" ) func TestPublisherStatsCounter(t *testing.T) { RegisterJanusMcuStats() var c publisherStatsCounter c.Reset() checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 0) c.EnableStream("audio", false) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 0) c.EnableStream("audio", true) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 1) c.EnableStream("audio", true) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 1) c.EnableStream("video", true) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 1) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("video"), 1) c.EnableStream("audio", false) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 0) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("video"), 1) c.EnableStream("audio", false) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 0) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("video"), 1) c.AddSubscriber("1") checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 0) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("video"), 1) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("audio"), 0) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("video"), 1) c.EnableStream("audio", true) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 1) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("video"), 1) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("audio"), 1) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("video"), 1) c.AddSubscriber("1") checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 1) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("video"), 1) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("audio"), 1) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("video"), 1) c.AddSubscriber("2") checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 1) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("video"), 1) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("audio"), 2) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("video"), 2) c.RemoveSubscriber("3") checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 1) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("video"), 1) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("audio"), 2) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("video"), 2) c.RemoveSubscriber("1") checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 1) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("video"), 1) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("audio"), 1) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("video"), 1) c.AddSubscriber("1") checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 1) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("video"), 1) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("audio"), 2) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("video"), 2) c.EnableStream("audio", false) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 0) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("video"), 1) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("audio"), 0) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("video"), 2) c.EnableStream("audio", true) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 1) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("video"), 1) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("audio"), 2) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("video"), 2) c.EnableStream("audio", false) c.EnableStream("video", false) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("audio"), 0) checkStatsValue(t, statsMcuPublisherStreamTypesCurrent.WithLabelValues("video"), 0) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("audio"), 0) checkStatsValue(t, statsMcuSubscriberStreamTypesCurrent.WithLabelValues("video"), 0) collectAndLint(t, commonMcuStats...) } nextcloud-spreed-signaling-1.2.4/mcu_proxy.go000066400000000000000000001311361460321600400213310ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2020 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "crypto/rsa" "crypto/tls" "encoding/json" "errors" "fmt" "log" "net" "net/http" "net/url" "os" "sort" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/dlintw/goconf" "github.com/golang-jwt/jwt/v4" "github.com/gorilla/websocket" ) const ( closeTimeout = time.Second proxyDebugMessages = false // Very high value so the connections get sorted at the end. loadNotConnected = 1000000 // Sort connections by load every 10 publishing requests or once per second. connectionSortRequests = 10 connectionSortInterval = time.Second proxyUrlTypeStatic = "static" proxyUrlTypeEtcd = "etcd" initialWaitDelay = time.Second maxWaitDelay = 8 * time.Second defaultProxyTimeoutSeconds = 2 rttLogDuration = 500 * time.Millisecond ) type McuProxy interface { AddConnection(ignoreErrors bool, url string, ips ...net.IP) error KeepConnection(url string, ips ...net.IP) RemoveConnection(url string, ips ...net.IP) } type mcuProxyPubSubCommon struct { sid string streamType StreamType maxBitrate int proxyId string conn *mcuProxyConnection listener McuListener } func (c *mcuProxyPubSubCommon) Id() string { return c.proxyId } func (c *mcuProxyPubSubCommon) Sid() string { return c.sid } func (c *mcuProxyPubSubCommon) StreamType() StreamType { return c.streamType } func (c *mcuProxyPubSubCommon) MaxBitrate() int { return c.maxBitrate } func (c *mcuProxyPubSubCommon) doSendMessage(ctx context.Context, msg *ProxyClientMessage, callback func(error, map[string]interface{})) { c.conn.performAsyncRequest(ctx, msg, func(err error, response *ProxyServerMessage) { if err != nil { callback(err, nil) return } if proxyDebugMessages { log.Printf("Response from %s: %+v", c.conn, response) } if response.Type == "error" { callback(response.Error, nil) } else if response.Payload != nil { callback(nil, response.Payload.Payload) } else { callback(nil, nil) } }) } func (c *mcuProxyPubSubCommon) doProcessPayload(client McuClient, msg *PayloadProxyServerMessage) { switch msg.Type { case "offer": c.listener.OnUpdateOffer(client, msg.Payload["offer"].(map[string]interface{})) case "candidate": c.listener.OnIceCandidate(client, msg.Payload["candidate"]) default: log.Printf("Unsupported payload from %s: %+v", c.conn, msg) } } type mcuProxyPublisher struct { mcuProxyPubSubCommon id string mediaTypes MediaType } func newMcuProxyPublisher(id string, sid string, streamType StreamType, maxBitrate int, mediaTypes MediaType, proxyId string, conn *mcuProxyConnection, listener McuListener) *mcuProxyPublisher { return &mcuProxyPublisher{ mcuProxyPubSubCommon: mcuProxyPubSubCommon{ sid: sid, streamType: streamType, maxBitrate: maxBitrate, proxyId: proxyId, conn: conn, listener: listener, }, id: id, mediaTypes: mediaTypes, } } func (p *mcuProxyPublisher) HasMedia(mt MediaType) bool { return (p.mediaTypes & mt) == mt } func (p *mcuProxyPublisher) SetMedia(mt MediaType) { // TODO: Also update mediaTypes on proxy. p.mediaTypes = mt } func (p *mcuProxyPublisher) NotifyClosed() { p.listener.PublisherClosed(p) p.conn.removePublisher(p) } func (p *mcuProxyPublisher) Close(ctx context.Context) { p.NotifyClosed() msg := &ProxyClientMessage{ Type: "command", Command: &CommandProxyClientMessage{ Type: "delete-publisher", ClientId: p.proxyId, }, } if response, err := p.conn.performSyncRequest(ctx, msg); err != nil { log.Printf("Could not delete publisher %s at %s: %s", p.proxyId, p.conn, err) return } else if response.Type == "error" { log.Printf("Could not delete publisher %s at %s: %s", p.proxyId, p.conn, response.Error) return } log.Printf("Delete publisher %s at %s", p.proxyId, p.conn) } func (p *mcuProxyPublisher) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) { msg := &ProxyClientMessage{ Type: "payload", Payload: &PayloadProxyClientMessage{ Type: data.Type, ClientId: p.proxyId, Sid: data.Sid, Payload: data.Payload, }, } p.doSendMessage(ctx, msg, callback) } func (p *mcuProxyPublisher) ProcessPayload(msg *PayloadProxyServerMessage) { p.doProcessPayload(p, msg) } func (p *mcuProxyPublisher) ProcessEvent(msg *EventProxyServerMessage) { switch msg.Type { case "ice-completed": p.listener.OnIceCompleted(p) case "publisher-closed": p.NotifyClosed() default: log.Printf("Unsupported event from %s: %+v", p.conn, msg) } } type mcuProxySubscriber struct { mcuProxyPubSubCommon publisherId string } func newMcuProxySubscriber(publisherId string, sid string, streamType StreamType, maxBitrate int, proxyId string, conn *mcuProxyConnection, listener McuListener) *mcuProxySubscriber { return &mcuProxySubscriber{ mcuProxyPubSubCommon: mcuProxyPubSubCommon{ sid: sid, streamType: streamType, maxBitrate: maxBitrate, proxyId: proxyId, conn: conn, listener: listener, }, publisherId: publisherId, } } func (s *mcuProxySubscriber) Publisher() string { return s.publisherId } func (s *mcuProxySubscriber) NotifyClosed() { s.listener.SubscriberClosed(s) s.conn.removeSubscriber(s) } func (s *mcuProxySubscriber) Close(ctx context.Context) { s.NotifyClosed() msg := &ProxyClientMessage{ Type: "command", Command: &CommandProxyClientMessage{ Type: "delete-subscriber", ClientId: s.proxyId, }, } if response, err := s.conn.performSyncRequest(ctx, msg); err != nil { log.Printf("Could not delete subscriber %s at %s: %s", s.proxyId, s.conn, err) return } else if response.Type == "error" { log.Printf("Could not delete subscriber %s at %s: %s", s.proxyId, s.conn, response.Error) return } log.Printf("Delete subscriber %s at %s", s.proxyId, s.conn) } func (s *mcuProxySubscriber) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) { msg := &ProxyClientMessage{ Type: "payload", Payload: &PayloadProxyClientMessage{ Type: data.Type, ClientId: s.proxyId, Sid: data.Sid, Payload: data.Payload, }, } s.doSendMessage(ctx, msg, callback) } func (s *mcuProxySubscriber) ProcessPayload(msg *PayloadProxyServerMessage) { s.doProcessPayload(s, msg) } func (s *mcuProxySubscriber) ProcessEvent(msg *EventProxyServerMessage) { switch msg.Type { case "ice-completed": s.listener.OnIceCompleted(s) case "subscriber-sid-updated": s.sid = msg.Sid s.listener.SubscriberSidUpdated(s) case "subscriber-closed": s.NotifyClosed() default: log.Printf("Unsupported event from %s: %+v", s.conn, msg) } } type mcuProxyConnection struct { proxy *mcuProxy rawUrl string url *url.URL ip net.IP load atomic.Int64 mu sync.Mutex closer *Closer closedDone *Closer closed atomic.Bool conn *websocket.Conn connectedSince time.Time reconnectTimer *time.Timer reconnectInterval atomic.Int64 shutdownScheduled atomic.Bool closeScheduled atomic.Bool trackClose atomic.Bool temporary atomic.Bool connectedNotifier SingleNotifier msgId atomic.Int64 helloMsgId string sessionId string country atomic.Value callbacks map[string]func(*ProxyServerMessage) publishersLock sync.RWMutex publishers map[string]*mcuProxyPublisher publisherIds map[string]string subscribersLock sync.RWMutex subscribers map[string]*mcuProxySubscriber } func newMcuProxyConnection(proxy *mcuProxy, baseUrl string, ip net.IP) (*mcuProxyConnection, error) { parsed, err := url.Parse(baseUrl) if err != nil { return nil, err } conn := &mcuProxyConnection{ proxy: proxy, rawUrl: baseUrl, url: parsed, ip: ip, closer: NewCloser(), closedDone: NewCloser(), callbacks: make(map[string]func(*ProxyServerMessage)), publishers: make(map[string]*mcuProxyPublisher), publisherIds: make(map[string]string), subscribers: make(map[string]*mcuProxySubscriber), } conn.reconnectInterval.Store(int64(initialReconnectInterval)) conn.load.Store(loadNotConnected) conn.country.Store("") return conn, nil } func (c *mcuProxyConnection) String() string { if c.ip != nil { return fmt.Sprintf("%s (%s)", c.rawUrl, c.ip) } return c.rawUrl } type mcuProxyConnectionStats struct { Url string `json:"url"` IP net.IP `json:"ip,omitempty"` Connected bool `json:"connected"` Publishers int64 `json:"publishers"` Clients int64 `json:"clients"` Load *int64 `json:"load,omitempty"` Shutdown *bool `json:"shutdown,omitempty"` Temporary *bool `json:"temporary,omitempty"` Uptime *time.Time `json:"uptime,omitempty"` } func (c *mcuProxyConnection) GetStats() *mcuProxyConnectionStats { result := &mcuProxyConnectionStats{ Url: c.url.String(), IP: c.ip, } c.mu.Lock() if c.conn != nil { result.Connected = true result.Uptime = &c.connectedSince load := c.Load() result.Load = &load shutdown := c.IsShutdownScheduled() result.Shutdown = &shutdown temporary := c.IsTemporary() result.Temporary = &temporary } c.mu.Unlock() c.publishersLock.RLock() result.Publishers = int64(len(c.publishers)) c.publishersLock.RUnlock() c.subscribersLock.RLock() result.Clients = int64(len(c.subscribers)) c.subscribersLock.RUnlock() result.Clients += result.Publishers return result } func (c *mcuProxyConnection) Load() int64 { return c.load.Load() } func (c *mcuProxyConnection) Country() string { return c.country.Load().(string) } func (c *mcuProxyConnection) IsTemporary() bool { return c.temporary.Load() } func (c *mcuProxyConnection) setTemporary() { c.temporary.Store(true) } func (c *mcuProxyConnection) clearTemporary() { c.temporary.Store(false) } func (c *mcuProxyConnection) IsShutdownScheduled() bool { return c.shutdownScheduled.Load() || c.closeScheduled.Load() } func (c *mcuProxyConnection) readPump() { defer func() { if !c.closed.Load() { c.scheduleReconnect() } else { c.closedDone.Close() } }() defer c.close() defer c.load.Store(loadNotConnected) c.mu.Lock() conn := c.conn c.mu.Unlock() conn.SetPongHandler(func(msg string) error { now := time.Now() conn.SetReadDeadline(now.Add(pongWait)) // nolint if msg == "" { return nil } if ts, err := strconv.ParseInt(msg, 10, 64); err == nil { rtt := now.Sub(time.Unix(0, ts)) if rtt >= rttLogDuration { rtt_ms := rtt.Nanoseconds() / time.Millisecond.Nanoseconds() log.Printf("Proxy at %s has RTT of %d ms (%s)", c, rtt_ms, rtt) } } return nil }) for { conn.SetReadDeadline(time.Now().Add(pongWait)) // nolint _, message, err := conn.ReadMessage() if err != nil { if errors.Is(err, websocket.ErrCloseSent) { break } else if _, ok := err.(*websocket.CloseError); !ok || websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway, websocket.CloseNoStatusReceived) { log.Printf("Error reading from %s: %v", c, err) } break } var msg ProxyServerMessage if err := json.Unmarshal(message, &msg); err != nil { log.Printf("Error unmarshaling %s from %s: %s", string(message), c, err) continue } c.processMessage(&msg) } } func (c *mcuProxyConnection) sendPing() bool { c.mu.Lock() defer c.mu.Unlock() if c.conn == nil { return false } now := time.Now() msg := strconv.FormatInt(now.UnixNano(), 10) c.conn.SetWriteDeadline(now.Add(writeWait)) // nolint if err := c.conn.WriteMessage(websocket.PingMessage, []byte(msg)); err != nil { log.Printf("Could not send ping to proxy at %s: %v", c, err) go c.scheduleReconnect() return false } return true } func (c *mcuProxyConnection) writePump() { ticker := time.NewTicker(pingPeriod) defer func() { ticker.Stop() }() c.reconnectTimer = time.NewTimer(0) defer c.reconnectTimer.Stop() for { select { case <-c.reconnectTimer.C: c.reconnect() case <-ticker.C: c.sendPing() case <-c.closer.C: return } } } func (c *mcuProxyConnection) start() { go c.writePump() } func (c *mcuProxyConnection) sendClose() error { c.mu.Lock() defer c.mu.Unlock() if c.conn == nil { return ErrNotConnected } c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint return c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) } func (c *mcuProxyConnection) stop(ctx context.Context) { if !c.closed.CompareAndSwap(false, true) { return } c.closer.Close() if err := c.sendClose(); err != nil { if err != ErrNotConnected { log.Printf("Could not send close message to %s: %s", c, err) } c.close() return } select { case <-c.closedDone.C: case <-ctx.Done(): if err := ctx.Err(); err != nil { log.Printf("Error waiting for connection to %s get closed: %s", c, err) c.close() } } } func (c *mcuProxyConnection) close() { c.mu.Lock() defer c.mu.Unlock() c.connectedNotifier.Reset() if c.conn != nil { c.conn.Close() c.conn = nil if c.trackClose.CompareAndSwap(true, false) { statsConnectedProxyBackendsCurrent.WithLabelValues(c.Country()).Dec() } } } func (c *mcuProxyConnection) stopCloseIfEmpty() { c.closeScheduled.Store(false) } func (c *mcuProxyConnection) closeIfEmpty() bool { c.closeScheduled.Store(true) var total int64 c.publishersLock.RLock() total += int64(len(c.publishers)) c.publishersLock.RUnlock() c.subscribersLock.RLock() total += int64(len(c.subscribers)) c.subscribersLock.RUnlock() if total > 0 { // Connection will be closed once all clients have disconnected. log.Printf("Connection to %s is still used by %d clients, defer closing", c, total) return false } go func() { ctx, cancel := context.WithTimeout(context.Background(), closeTimeout) defer cancel() log.Printf("All clients disconnected, closing connection to %s", c) c.stop(ctx) c.proxy.removeConnection(c) }() return true } func (c *mcuProxyConnection) scheduleReconnect() { if err := c.sendClose(); err != nil && err != ErrNotConnected { log.Printf("Could not send close message to %s: %s", c, err) } c.close() if c.IsShutdownScheduled() { c.proxy.removeConnection(c) return } interval := c.reconnectInterval.Load() c.reconnectTimer.Reset(time.Duration(interval)) interval = interval * 2 if interval > int64(maxReconnectInterval) { interval = int64(maxReconnectInterval) } c.reconnectInterval.Store(interval) } func (c *mcuProxyConnection) reconnect() { u, err := c.url.Parse("proxy") if err != nil { log.Printf("Could not resolve url to proxy at %s: %s", c, err) c.scheduleReconnect() return } if u.Scheme == "http" { u.Scheme = "ws" } else if u.Scheme == "https" { u.Scheme = "wss" } dialer := c.proxy.dialer if c.ip != nil { dialer = &websocket.Dialer{ Proxy: http.ProxyFromEnvironment, HandshakeTimeout: c.proxy.dialer.HandshakeTimeout, TLSClientConfig: c.proxy.dialer.TLSClientConfig, // Override DNS lookup and connect to custom IP address. NetDialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { if _, port, err := net.SplitHostPort(addr); err == nil { addr = net.JoinHostPort(c.ip.String(), port) } return net.Dial(network, addr) }, } } conn, _, err := dialer.Dial(u.String(), nil) if err != nil { log.Printf("Could not connect to %s: %s", c, err) c.scheduleReconnect() return } if c.IsShutdownScheduled() { c.proxy.removeConnection(c) return } log.Printf("Connected to %s", c) c.closed.Store(false) c.mu.Lock() c.connectedSince = time.Now() c.conn = conn c.mu.Unlock() c.reconnectInterval.Store(int64(initialReconnectInterval)) c.shutdownScheduled.Store(false) if err := c.sendHello(); err != nil { log.Printf("Could not send hello request to %s: %s", c, err) c.scheduleReconnect() return } if !c.sendPing() { return } go c.readPump() } func (c *mcuProxyConnection) waitUntilConnected(ctx context.Context) error { c.mu.Lock() defer c.mu.Unlock() if c.conn != nil { return nil } waiter := c.connectedNotifier.NewWaiter() defer c.connectedNotifier.Release(waiter) c.mu.Unlock() defer c.mu.Lock() return waiter.Wait(ctx) } func (c *mcuProxyConnection) removePublisher(publisher *mcuProxyPublisher) { c.proxy.removePublisher(publisher) c.publishersLock.Lock() defer c.publishersLock.Unlock() if _, found := c.publishers[publisher.proxyId]; found { delete(c.publishers, publisher.proxyId) statsPublishersCurrent.WithLabelValues(string(publisher.StreamType())).Dec() } delete(c.publisherIds, getStreamId(publisher.id, publisher.StreamType())) if len(c.publishers) == 0 && (c.closeScheduled.Load() || c.IsTemporary()) { go c.closeIfEmpty() } } func (c *mcuProxyConnection) clearPublishers() { c.publishersLock.Lock() defer c.publishersLock.Unlock() go func(publishers map[string]*mcuProxyPublisher) { for _, publisher := range publishers { publisher.NotifyClosed() } }(c.publishers) c.publishers = make(map[string]*mcuProxyPublisher) c.publisherIds = make(map[string]string) if c.closeScheduled.Load() || c.IsTemporary() { go c.closeIfEmpty() } } func (c *mcuProxyConnection) removeSubscriber(subscriber *mcuProxySubscriber) { c.subscribersLock.Lock() defer c.subscribersLock.Unlock() if _, found := c.subscribers[subscriber.proxyId]; found { delete(c.subscribers, subscriber.proxyId) statsSubscribersCurrent.WithLabelValues(string(subscriber.StreamType())).Dec() } if len(c.subscribers) == 0 && (c.closeScheduled.Load() || c.IsTemporary()) { go c.closeIfEmpty() } } func (c *mcuProxyConnection) clearSubscribers() { c.subscribersLock.Lock() defer c.subscribersLock.Unlock() go func(subscribers map[string]*mcuProxySubscriber) { for _, subscriber := range subscribers { subscriber.NotifyClosed() } }(c.subscribers) c.subscribers = make(map[string]*mcuProxySubscriber) if c.closeScheduled.Load() || c.IsTemporary() { go c.closeIfEmpty() } } func (c *mcuProxyConnection) clearCallbacks() { c.mu.Lock() defer c.mu.Unlock() c.callbacks = make(map[string]func(*ProxyServerMessage)) } func (c *mcuProxyConnection) getCallback(id string) func(*ProxyServerMessage) { c.mu.Lock() defer c.mu.Unlock() callback, found := c.callbacks[id] if found { delete(c.callbacks, id) } return callback } func (c *mcuProxyConnection) processMessage(msg *ProxyServerMessage) { if c.helloMsgId != "" && msg.Id == c.helloMsgId { c.helloMsgId = "" switch msg.Type { case "error": if msg.Error.Code == "no_such_session" { log.Printf("Session %s could not be resumed on %s, registering new", c.sessionId, c) c.clearPublishers() c.clearSubscribers() c.clearCallbacks() c.sessionId = "" if err := c.sendHello(); err != nil { log.Printf("Could not send hello request to %s: %s", c, err) c.scheduleReconnect() } return } log.Printf("Hello connection to %s failed with %+v, reconnecting", c, msg.Error) c.scheduleReconnect() case "hello": resumed := c.sessionId == msg.Hello.SessionId c.sessionId = msg.Hello.SessionId country := "" if msg.Hello.Server != nil { if country = msg.Hello.Server.Country; country != "" && !IsValidCountry(country) { log.Printf("Proxy %s sent invalid country %s in hello response", c, country) country = "" } } c.country.Store(country) if resumed { log.Printf("Resumed session %s on %s", c.sessionId, c) } else if country != "" { log.Printf("Received session %s from %s (in %s)", c.sessionId, c, country) } else { log.Printf("Received session %s from %s", c.sessionId, c) } if c.trackClose.CompareAndSwap(false, true) { statsConnectedProxyBackendsCurrent.WithLabelValues(c.Country()).Inc() } c.connectedNotifier.Notify() default: log.Printf("Received unsupported hello response %+v from %s, reconnecting", msg, c) c.scheduleReconnect() } return } if proxyDebugMessages { log.Printf("Received from %s: %+v", c, msg) } callback := c.getCallback(msg.Id) if callback != nil { callback(msg) return } switch msg.Type { case "payload": c.processPayload(msg) case "event": c.processEvent(msg) case "bye": c.processBye(msg) default: log.Printf("Unsupported message received from %s: %+v", c, msg) } } func (c *mcuProxyConnection) processPayload(msg *ProxyServerMessage) { payload := msg.Payload c.publishersLock.RLock() publisher, found := c.publishers[payload.ClientId] c.publishersLock.RUnlock() if found { publisher.ProcessPayload(payload) return } c.subscribersLock.RLock() subscriber, found := c.subscribers[payload.ClientId] c.subscribersLock.RUnlock() if found { subscriber.ProcessPayload(payload) return } log.Printf("Received payload for unknown client %+v from %s", payload, c) } func (c *mcuProxyConnection) processEvent(msg *ProxyServerMessage) { event := msg.Event switch event.Type { case "backend-disconnected": log.Printf("Upstream backend at %s got disconnected, reset MCU objects", c) c.clearPublishers() c.clearSubscribers() c.clearCallbacks() // TODO: Should we also reconnect? return case "backend-connected": log.Printf("Upstream backend at %s is connected", c) return case "update-load": if proxyDebugMessages { log.Printf("Load of %s now at %d", c, event.Load) } c.load.Store(event.Load) statsProxyBackendLoadCurrent.WithLabelValues(c.url.String()).Set(float64(event.Load)) return case "shutdown-scheduled": log.Printf("Proxy %s is scheduled to shutdown", c) c.shutdownScheduled.Store(true) return } if proxyDebugMessages { log.Printf("Process event from %s: %+v", c, event) } c.publishersLock.RLock() publisher, found := c.publishers[event.ClientId] c.publishersLock.RUnlock() if found { publisher.ProcessEvent(event) return } c.subscribersLock.RLock() subscriber, found := c.subscribers[event.ClientId] c.subscribersLock.RUnlock() if found { subscriber.ProcessEvent(event) return } log.Printf("Received event for unknown client %+v from %s", event, c) } func (c *mcuProxyConnection) processBye(msg *ProxyServerMessage) { bye := msg.Bye switch bye.Reason { case "session_resumed": log.Printf("Session %s on %s was resumed by other client, resetting", c.sessionId, c) c.sessionId = "" default: log.Printf("Received bye with unsupported reason from %s %+v", c, bye) } } func (c *mcuProxyConnection) sendHello() error { c.helloMsgId = strconv.FormatInt(c.msgId.Add(1), 10) msg := &ProxyClientMessage{ Id: c.helloMsgId, Type: "hello", Hello: &HelloProxyClientMessage{ Version: "1.0", }, } if c.sessionId != "" { msg.Hello.ResumeId = c.sessionId } else { claims := &TokenClaims{ jwt.RegisteredClaims{ IssuedAt: jwt.NewNumericDate(time.Now()), Issuer: c.proxy.tokenId, }, } token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims) tokenString, err := token.SignedString(c.proxy.tokenKey) if err != nil { return err } msg.Hello.Token = tokenString } return c.sendMessage(msg) } func (c *mcuProxyConnection) sendMessage(msg *ProxyClientMessage) error { c.mu.Lock() defer c.mu.Unlock() return c.sendMessageLocked(msg) } func (c *mcuProxyConnection) sendMessageLocked(msg *ProxyClientMessage) error { if proxyDebugMessages { log.Printf("Send message to %s: %+v", c, msg) } if c.conn == nil { return ErrNotConnected } c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint return c.conn.WriteJSON(msg) } func (c *mcuProxyConnection) performAsyncRequest(ctx context.Context, msg *ProxyClientMessage, callback func(err error, response *ProxyServerMessage)) { msgId := strconv.FormatInt(c.msgId.Add(1), 10) msg.Id = msgId c.mu.Lock() defer c.mu.Unlock() c.callbacks[msgId] = func(msg *ProxyServerMessage) { callback(nil, msg) } if err := c.sendMessageLocked(msg); err != nil { delete(c.callbacks, msgId) go callback(err, nil) return } } func (c *mcuProxyConnection) performSyncRequest(ctx context.Context, msg *ProxyClientMessage) (*ProxyServerMessage, error) { if err := ctx.Err(); err != nil { return nil, err } errChan := make(chan error, 1) responseChan := make(chan *ProxyServerMessage, 1) c.performAsyncRequest(ctx, msg, func(err error, response *ProxyServerMessage) { if err != nil { errChan <- err } else { responseChan <- response } }) select { case <-ctx.Done(): return nil, ctx.Err() case err := <-errChan: return nil, err case response := <-responseChan: return response, nil } } func (c *mcuProxyConnection) newPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType) (McuPublisher, error) { msg := &ProxyClientMessage{ Type: "command", Command: &CommandProxyClientMessage{ Type: "create-publisher", Sid: sid, StreamType: streamType, Bitrate: bitrate, MediaTypes: mediaTypes, }, } response, err := c.performSyncRequest(ctx, msg) if err != nil { // TODO: Cancel request return nil, err } else if response.Type == "error" { return nil, fmt.Errorf("Error creating %s publisher for %s on %s: %+v", streamType, id, c, response.Error) } proxyId := response.Command.Id log.Printf("Created %s publisher %s on %s for %s", streamType, proxyId, c, id) publisher := newMcuProxyPublisher(id, sid, streamType, response.Command.Bitrate, mediaTypes, proxyId, c, listener) c.publishersLock.Lock() c.publishers[proxyId] = publisher c.publisherIds[getStreamId(id, streamType)] = proxyId c.publishersLock.Unlock() statsPublishersCurrent.WithLabelValues(string(streamType)).Inc() statsPublishersTotal.WithLabelValues(string(streamType)).Inc() return publisher, nil } func (c *mcuProxyConnection) newSubscriber(ctx context.Context, listener McuListener, publisherId string, publisherSessionId string, streamType StreamType) (McuSubscriber, error) { msg := &ProxyClientMessage{ Type: "command", Command: &CommandProxyClientMessage{ Type: "create-subscriber", StreamType: streamType, PublisherId: publisherId, }, } response, err := c.performSyncRequest(ctx, msg) if err != nil { // TODO: Cancel request return nil, err } else if response.Type == "error" { return nil, fmt.Errorf("Error creating %s subscriber for %s on %s: %+v", streamType, publisherSessionId, c, response.Error) } proxyId := response.Command.Id log.Printf("Created %s subscriber %s on %s for %s", streamType, proxyId, c, publisherSessionId) subscriber := newMcuProxySubscriber(publisherSessionId, response.Command.Sid, streamType, response.Command.Bitrate, proxyId, c, listener) c.subscribersLock.Lock() c.subscribers[proxyId] = subscriber c.subscribersLock.Unlock() statsSubscribersCurrent.WithLabelValues(string(streamType)).Inc() statsSubscribersTotal.WithLabelValues(string(streamType)).Inc() return subscriber, nil } type mcuProxy struct { urlType string tokenId string tokenKey *rsa.PrivateKey config ProxyConfig dialer *websocket.Dialer connections []*mcuProxyConnection connectionsMap map[string][]*mcuProxyConnection connectionsMu sync.RWMutex proxyTimeout time.Duration connRequests atomic.Int64 nextSort atomic.Int64 maxStreamBitrate int maxScreenBitrate int mu sync.RWMutex publishers map[string]*mcuProxyConnection publisherWaiters ChannelWaiters continentsMap atomic.Value rpcClients *GrpcClients } func NewMcuProxy(config *goconf.ConfigFile, etcdClient *EtcdClient, rpcClients *GrpcClients, dnsMonitor *DnsMonitor) (Mcu, error) { urlType, _ := config.GetString("mcu", "urltype") if urlType == "" { urlType = proxyUrlTypeStatic } tokenId, _ := config.GetString("mcu", "token_id") if tokenId == "" { return nil, fmt.Errorf("No token id configured") } tokenKeyFilename, _ := config.GetString("mcu", "token_key") if tokenKeyFilename == "" { return nil, fmt.Errorf("No token key configured") } tokenKeyData, err := os.ReadFile(tokenKeyFilename) if err != nil { return nil, fmt.Errorf("Could not read private key from %s: %s", tokenKeyFilename, err) } tokenKey, err := jwt.ParseRSAPrivateKeyFromPEM(tokenKeyData) if err != nil { return nil, fmt.Errorf("Could not parse private key from %s: %s", tokenKeyFilename, err) } proxyTimeoutSeconds, _ := config.GetInt("mcu", "proxytimeout") if proxyTimeoutSeconds <= 0 { proxyTimeoutSeconds = defaultProxyTimeoutSeconds } proxyTimeout := time.Duration(proxyTimeoutSeconds) * time.Second log.Printf("Using a timeout of %s for proxy requests", proxyTimeout) maxStreamBitrate, _ := config.GetInt("mcu", "maxstreambitrate") if maxStreamBitrate <= 0 { maxStreamBitrate = defaultMaxStreamBitrate } maxScreenBitrate, _ := config.GetInt("mcu", "maxscreenbitrate") if maxScreenBitrate <= 0 { maxScreenBitrate = defaultMaxScreenBitrate } mcu := &mcuProxy{ urlType: urlType, tokenId: tokenId, tokenKey: tokenKey, dialer: &websocket.Dialer{ Proxy: http.ProxyFromEnvironment, HandshakeTimeout: proxyTimeout, }, connectionsMap: make(map[string][]*mcuProxyConnection), proxyTimeout: proxyTimeout, maxStreamBitrate: maxStreamBitrate, maxScreenBitrate: maxScreenBitrate, publishers: make(map[string]*mcuProxyConnection), rpcClients: rpcClients, } if err := mcu.loadContinentsMap(config); err != nil { return nil, err } skipverify, _ := config.GetBool("mcu", "skipverify") if skipverify { log.Println("WARNING: MCU verification is disabled!") mcu.dialer.TLSClientConfig = &tls.Config{ InsecureSkipVerify: skipverify, } } switch urlType { case proxyUrlTypeStatic: mcu.config, err = NewProxyConfigStatic(config, mcu, dnsMonitor) case proxyUrlTypeEtcd: mcu.config, err = NewProxyConfigEtcd(config, etcdClient, mcu) default: err = fmt.Errorf("Unsupported proxy URL type %s", urlType) } if err != nil { return nil, err } return mcu, nil } func (m *mcuProxy) loadContinentsMap(config *goconf.ConfigFile) error { options, err := GetStringOptions(config, "continent-overrides", false) if err != nil { return err } if len(options) == 0 { m.setContinentsMap(nil) return nil } continentsMap := make(map[string][]string) for option, value := range options { option = strings.ToUpper(strings.TrimSpace(option)) if !IsValidContinent(option) { log.Printf("Ignore unknown continent %s", option) continue } var values []string for _, v := range strings.Split(value, ",") { v = strings.ToUpper(strings.TrimSpace(v)) if !IsValidContinent(v) { log.Printf("Ignore unknown continent %s for override %s", v, option) continue } values = append(values, v) } if len(values) == 0 { log.Printf("No valid values found for continent override %s, ignoring", option) continue } continentsMap[option] = values log.Printf("Mapping users on continent %s to %s", option, values) } m.setContinentsMap(continentsMap) return nil } func (m *mcuProxy) Start() error { log.Printf("Maximum bandwidth %d bits/sec per publishing stream", m.maxStreamBitrate) log.Printf("Maximum bandwidth %d bits/sec per screensharing stream", m.maxScreenBitrate) return m.config.Start() } func (m *mcuProxy) Stop() { m.connectionsMu.RLock() defer m.connectionsMu.RUnlock() ctx, cancel := context.WithTimeout(context.Background(), closeTimeout) defer cancel() for _, c := range m.connections { c.stop(ctx) } m.config.Stop() } func (m *mcuProxy) AddConnection(ignoreErrors bool, url string, ips ...net.IP) error { m.connectionsMu.Lock() defer m.connectionsMu.Unlock() var conns []*mcuProxyConnection if len(ips) == 0 { conn, err := newMcuProxyConnection(m, url, nil) if err != nil { if ignoreErrors { log.Printf("Could not create proxy connection to %s: %s", url, err) return nil } return err } conns = append(conns, conn) } else { for _, ip := range ips { conn, err := newMcuProxyConnection(m, url, ip) if err != nil { if ignoreErrors { log.Printf("Could not create proxy connection to %s (%s): %s", url, ip, err) continue } return err } conns = append(conns, conn) } } for _, conn := range conns { log.Printf("Adding new connection to %s", conn) conn.start() m.connections = append(m.connections, conn) if existing, found := m.connectionsMap[url]; found { m.connectionsMap[url] = append(existing, conn) } else { m.connectionsMap[url] = []*mcuProxyConnection{conn} } } m.nextSort.Store(0) return nil } func containsIP(ips []net.IP, ip net.IP) bool { for _, i := range ips { if i.Equal(ip) { return true } } return false } func (m *mcuProxy) iterateConnections(url string, ips []net.IP, f func(conn *mcuProxyConnection)) { m.connectionsMu.Lock() defer m.connectionsMu.Unlock() conns, found := m.connectionsMap[url] if !found { return } var toRemove []*mcuProxyConnection if len(ips) == 0 { toRemove = conns } else { for _, conn := range conns { if containsIP(ips, conn.ip) { toRemove = append(toRemove, conn) } } } for _, conn := range toRemove { f(conn) } } func (m *mcuProxy) RemoveConnection(url string, ips ...net.IP) { m.iterateConnections(url, ips, func(conn *mcuProxyConnection) { log.Printf("Removing connection to %s", conn) conn.closeIfEmpty() }) } func (m *mcuProxy) KeepConnection(url string, ips ...net.IP) { m.iterateConnections(url, ips, func(conn *mcuProxyConnection) { conn.stopCloseIfEmpty() conn.clearTemporary() }) } func (m *mcuProxy) Reload(config *goconf.ConfigFile) { if err := m.loadContinentsMap(config); err != nil { log.Printf("Error loading continents map: %s", err) } if err := m.config.Reload(config); err != nil { log.Printf("could not reload proxy configuration: %s", err) } } func (m *mcuProxy) removeConnection(c *mcuProxyConnection) { m.connectionsMu.Lock() defer m.connectionsMu.Unlock() if conns, found := m.connectionsMap[c.rawUrl]; found { for idx, conn := range conns { if conn == c { conns = append(conns[:idx], conns[idx+1:]...) break } } if len(conns) == 0 { delete(m.connectionsMap, c.rawUrl) m.connections = nil for _, conns := range m.connectionsMap { m.connections = append(m.connections, conns...) } } else { m.connectionsMap[c.rawUrl] = conns } m.nextSort.Store(0) } } func (m *mcuProxy) SetOnConnected(f func()) { // Not supported. } func (m *mcuProxy) SetOnDisconnected(f func()) { // Not supported. } type mcuProxyStats struct { Publishers int64 `json:"publishers"` Clients int64 `json:"clients"` Details []*mcuProxyConnectionStats `json:"details"` } func (m *mcuProxy) GetStats() interface{} { result := &mcuProxyStats{} m.connectionsMu.RLock() defer m.connectionsMu.RUnlock() for _, conn := range m.connections { stats := conn.GetStats() result.Publishers += stats.Publishers result.Clients += stats.Clients result.Details = append(result.Details, stats) } return result } func (m *mcuProxy) getContinentsMap() map[string][]string { continentsMap := m.continentsMap.Load() if continentsMap == nil { return nil } return continentsMap.(map[string][]string) } func (m *mcuProxy) setContinentsMap(continentsMap map[string][]string) { if continentsMap == nil { continentsMap = make(map[string][]string) } m.continentsMap.Store(continentsMap) } type mcuProxyConnectionsList []*mcuProxyConnection func (l mcuProxyConnectionsList) Len() int { return len(l) } func (l mcuProxyConnectionsList) Less(i, j int) bool { return l[i].Load() < l[j].Load() } func (l mcuProxyConnectionsList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } func (l mcuProxyConnectionsList) Sort() { sort.Sort(l) } func ContinentsOverlap(a, b []string) bool { if len(a) == 0 || len(b) == 0 { return false } for _, checkA := range a { for _, checkB := range b { if checkA == checkB { return true } } } return false } func sortConnectionsForCountry(connections []*mcuProxyConnection, country string, continentMap map[string][]string) []*mcuProxyConnection { // Move connections in the same country to the start of the list. sorted := make(mcuProxyConnectionsList, 0, len(connections)) unprocessed := make(mcuProxyConnectionsList, 0, len(connections)) for _, conn := range connections { if country == conn.Country() { sorted = append(sorted, conn) } else { unprocessed = append(unprocessed, conn) } } if continents, found := ContinentMap[country]; found && len(unprocessed) > 1 { remaining := make(mcuProxyConnectionsList, 0, len(unprocessed)) // Map continents to other continents (e.g. use Europe for Africa). for _, continent := range continents { if toAdd, found := continentMap[continent]; found { continents = append(continents, toAdd...) } } // Next up are connections on the same or mapped continent. for _, conn := range unprocessed { connCountry := conn.Country() if IsValidCountry(connCountry) { connContinents := ContinentMap[connCountry] if ContinentsOverlap(continents, connContinents) { sorted = append(sorted, conn) } else { remaining = append(remaining, conn) } } else { remaining = append(remaining, conn) } } unprocessed = remaining } // Add all other connections by load. sorted = append(sorted, unprocessed...) return sorted } func (m *mcuProxy) getSortedConnections(initiator McuInitiator) []*mcuProxyConnection { m.connectionsMu.RLock() connections := m.connections m.connectionsMu.RUnlock() if len(connections) < 2 { return connections } // Connections are re-sorted every requests or // every . now := time.Now().UnixNano() if m.connRequests.Add(1)%connectionSortRequests == 0 || m.nextSort.Load() <= now { m.nextSort.Store(now + int64(connectionSortInterval)) sorted := make(mcuProxyConnectionsList, len(connections)) copy(sorted, connections) sorted.Sort() m.connectionsMu.Lock() m.connections = sorted m.connectionsMu.Unlock() connections = sorted } if initiator != nil { if country := initiator.Country(); IsValidCountry(country) { connections = sortConnectionsForCountry(connections, country, m.getContinentsMap()) } } return connections } func (m *mcuProxy) removePublisher(publisher *mcuProxyPublisher) { m.mu.Lock() defer m.mu.Unlock() delete(m.publishers, getStreamId(publisher.id, publisher.StreamType())) } func (m *mcuProxy) NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error) { connections := m.getSortedConnections(initiator) for _, conn := range connections { if conn.IsShutdownScheduled() || conn.IsTemporary() { continue } subctx, cancel := context.WithTimeout(ctx, m.proxyTimeout) defer cancel() var maxBitrate int if streamType == StreamTypeScreen { maxBitrate = m.maxScreenBitrate } else { maxBitrate = m.maxStreamBitrate } if bitrate <= 0 { bitrate = maxBitrate } else { bitrate = min(bitrate, maxBitrate) } publisher, err := conn.newPublisher(subctx, listener, id, sid, streamType, bitrate, mediaTypes) if err != nil { log.Printf("Could not create %s publisher for %s on %s: %s", streamType, id, conn, err) continue } m.mu.Lock() m.publishers[getStreamId(id, streamType)] = conn m.mu.Unlock() m.publisherWaiters.Wakeup() return publisher, nil } statsProxyNobackendAvailableTotal.WithLabelValues(string(streamType)).Inc() return nil, fmt.Errorf("No MCU connection available") } func (m *mcuProxy) getPublisherConnection(publisher string, streamType StreamType) *mcuProxyConnection { m.mu.RLock() defer m.mu.RUnlock() return m.publishers[getStreamId(publisher, streamType)] } func (m *mcuProxy) waitForPublisherConnection(ctx context.Context, publisher string, streamType StreamType) *mcuProxyConnection { m.mu.Lock() defer m.mu.Unlock() conn := m.publishers[getStreamId(publisher, streamType)] if conn != nil { // Publisher was created while waiting for lock. return conn } ch := make(chan struct{}, 1) id := m.publisherWaiters.Add(ch) defer m.publisherWaiters.Remove(id) statsWaitingForPublisherTotal.WithLabelValues(string(streamType)).Inc() for { m.mu.Unlock() select { case <-ch: m.mu.Lock() conn = m.publishers[getStreamId(publisher, streamType)] if conn != nil { return conn } case <-ctx.Done(): m.mu.Lock() return nil } } } func (m *mcuProxy) NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType) (McuSubscriber, error) { if conn := m.getPublisherConnection(publisher, streamType); conn != nil { // Fast common path: publisher is available locally. conn.publishersLock.Lock() id, found := conn.publisherIds[getStreamId(publisher, streamType)] conn.publishersLock.Unlock() if !found { return nil, fmt.Errorf("Unknown publisher %s", publisher) } return conn.newSubscriber(ctx, listener, id, publisher, streamType) } log.Printf("No %s publisher %s found yet, deferring", streamType, publisher) ch := make(chan McuSubscriber) getctx, cancel := context.WithCancel(ctx) defer cancel() // Wait for publisher to be created locally. go func() { if conn := m.waitForPublisherConnection(getctx, publisher, streamType); conn != nil { cancel() // Cancel pending RPC calls. conn.publishersLock.Lock() id, found := conn.publisherIds[getStreamId(publisher, streamType)] conn.publishersLock.Unlock() if !found { log.Printf("Unknown id for local %s publisher %s", streamType, publisher) return } subscriber, err := conn.newSubscriber(ctx, listener, id, publisher, streamType) if subscriber != nil { ch <- subscriber } else if err != nil { log.Printf("Error creating local subscriber for %s publisher %s: %s", streamType, publisher, err) } } }() // Wait for publisher to be created on one of the other servers in the cluster. if clients := m.rpcClients.GetClients(); len(clients) > 0 { for _, client := range clients { go func(client *GrpcClient) { id, url, ip, err := client.GetPublisherId(getctx, publisher, streamType) if errors.Is(err, context.Canceled) { return } else if err != nil { log.Printf("Error getting %s publisher id %s from %s: %s", streamType, publisher, client.Target(), err) return } else if id == "" { // Publisher not found on other server return } cancel() // Cancel pending RPC calls. log.Printf("Found publisher id %s through %s on proxy %s", id, client.Target(), url) m.connectionsMu.RLock() connections := m.connections m.connectionsMu.RUnlock() var publisherConn *mcuProxyConnection for _, conn := range connections { if conn.rawUrl != url || !ip.Equal(conn.ip) { continue } // Simple case, signaling server has a connection to the same endpoint publisherConn = conn break } if publisherConn == nil { publisherConn, err = newMcuProxyConnection(m, url, ip) if err != nil { log.Printf("Could not create temporary connection to %s for %s publisher %s: %s", url, streamType, publisher, err) return } publisherConn.setTemporary() publisherConn.start() if err := publisherConn.waitUntilConnected(ctx); err != nil { log.Printf("Could not establish new connection to %s: %s", publisherConn, err) publisherConn.closeIfEmpty() return } m.connectionsMu.Lock() m.connections = append(m.connections, publisherConn) conns, found := m.connectionsMap[url] if found { conns = append(conns, publisherConn) } else { conns = []*mcuProxyConnection{publisherConn} } m.connectionsMap[url] = conns m.connectionsMu.Unlock() } subscriber, err := publisherConn.newSubscriber(ctx, listener, id, publisher, streamType) if err != nil { if publisherConn.IsTemporary() { publisherConn.closeIfEmpty() } log.Printf("Could not create subscriber for %s publisher %s: %s", streamType, publisher, err) return } ch <- subscriber }(client) } } select { case subscriber := <-ch: return subscriber, nil case <-ctx.Done(): return nil, fmt.Errorf("No %s publisher %s found", streamType, publisher) } } nextcloud-spreed-signaling-1.2.4/mcu_proxy_test.go000066400000000000000000000104311460321600400223620ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2020 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "testing" ) func TestMcuProxyStats(t *testing.T) { collectAndLint(t, proxyMcuStats...) } func newProxyConnectionWithCountry(country string) *mcuProxyConnection { conn := &mcuProxyConnection{} conn.country.Store(country) return conn } func Test_sortConnectionsForCountry(t *testing.T) { conn_de := newProxyConnectionWithCountry("DE") conn_at := newProxyConnectionWithCountry("AT") conn_jp := newProxyConnectionWithCountry("JP") conn_us := newProxyConnectionWithCountry("US") testcases := map[string][][]*mcuProxyConnection{ // Direct country match "DE": { {conn_at, conn_jp, conn_de}, {conn_de, conn_at, conn_jp}, }, // Direct country match "AT": { {conn_at, conn_jp, conn_de}, {conn_at, conn_de, conn_jp}, }, // Continent match "CH": { {conn_de, conn_jp, conn_at}, {conn_de, conn_at, conn_jp}, }, // Direct country match "JP": { {conn_de, conn_jp, conn_at}, {conn_jp, conn_de, conn_at}, }, // Continent match "CN": { {conn_de, conn_jp, conn_at}, {conn_jp, conn_de, conn_at}, }, // Continent match "RU": { {conn_us, conn_de, conn_jp, conn_at}, {conn_de, conn_at, conn_us, conn_jp}, }, // No match "AU": { {conn_us, conn_de, conn_jp, conn_at}, {conn_us, conn_de, conn_jp, conn_at}, }, } for country, test := range testcases { country := country test := test t.Run(country, func(t *testing.T) { sorted := sortConnectionsForCountry(test[0], country, nil) for idx, conn := range sorted { if test[1][idx] != conn { t.Errorf("Index %d for %s: expected %s, got %s", idx, country, test[1][idx].Country(), conn.Country()) } } }) } } func Test_sortConnectionsForCountryWithOverride(t *testing.T) { conn_de := newProxyConnectionWithCountry("DE") conn_at := newProxyConnectionWithCountry("AT") conn_jp := newProxyConnectionWithCountry("JP") conn_us := newProxyConnectionWithCountry("US") testcases := map[string][][]*mcuProxyConnection{ // Direct country match "DE": { {conn_at, conn_jp, conn_de}, {conn_de, conn_at, conn_jp}, }, // Direct country match "AT": { {conn_at, conn_jp, conn_de}, {conn_at, conn_de, conn_jp}, }, // Continent match "CH": { {conn_de, conn_jp, conn_at}, {conn_de, conn_at, conn_jp}, }, // Direct country match "JP": { {conn_de, conn_jp, conn_at}, {conn_jp, conn_de, conn_at}, }, // Continent match "CN": { {conn_de, conn_jp, conn_at}, {conn_jp, conn_de, conn_at}, }, // Continent match "RU": { {conn_us, conn_de, conn_jp, conn_at}, {conn_de, conn_at, conn_us, conn_jp}, }, // No match "AR": { {conn_us, conn_de, conn_jp, conn_at}, {conn_us, conn_de, conn_jp, conn_at}, }, // No match but override (OC -> AS / NA) "AU": { {conn_us, conn_jp}, {conn_us, conn_jp}, }, // No match but override (AF -> EU) "ZA": { {conn_de, conn_at}, {conn_de, conn_at}, }, } continentMap := map[string][]string{ // Use European connections for Africa. "AF": {"EU"}, // Use Asian and North American connections for Oceania. "OC": {"AS", "NA"}, } for country, test := range testcases { country := country test := test t.Run(country, func(t *testing.T) { sorted := sortConnectionsForCountry(test[0], country, continentMap) for idx, conn := range sorted { if test[1][idx] != conn { t.Errorf("Index %d for %s: expected %s, got %s", idx, country, test[1][idx].Country(), conn.Country()) } } }) } } nextcloud-spreed-signaling-1.2.4/mcu_stats_prometheus.go000066400000000000000000000104511460321600400235550ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "github.com/prometheus/client_golang/prometheus" ) var ( statsPublishersCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "signaling", Subsystem: "mcu", Name: "publishers", Help: "The current number of publishers", }, []string{"type"}) statsPublishersTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "mcu", Name: "publishers_total", Help: "The total number of created publishers", }, []string{"type"}) statsSubscribersCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "signaling", Subsystem: "mcu", Name: "subscribers", Help: "The current number of subscribers", }, []string{"type"}) statsSubscribersTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "mcu", Name: "subscribers_total", Help: "The total number of created subscribers", }, []string{"type"}) statsWaitingForPublisherTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "mcu", Name: "nopublisher_total", Help: "The total number of subscribe requests where no publisher exists", }, []string{"type"}) statsMcuMessagesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "mcu", Name: "messages_total", Help: "The total number of MCU messages", }, []string{"type"}) statsMcuSubscriberStreamTypesCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "signaling", Subsystem: "mcu", Name: "subscriber_streams", Help: "The current number of subscribed media streams", }, []string{"type"}) statsMcuPublisherStreamTypesCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "signaling", Subsystem: "mcu", Name: "publisher_streams", Help: "The current number of published media streams", }, []string{"type"}) commonMcuStats = []prometheus.Collector{ statsPublishersCurrent, statsPublishersTotal, statsSubscribersCurrent, statsSubscribersTotal, statsWaitingForPublisherTotal, statsMcuMessagesTotal, statsMcuSubscriberStreamTypesCurrent, statsMcuPublisherStreamTypesCurrent, } statsConnectedProxyBackendsCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "signaling", Subsystem: "mcu", Name: "backend_connections", Help: "Current number of connections to signaling proxy backends", }, []string{"country"}) statsProxyBackendLoadCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "signaling", Subsystem: "mcu", Name: "backend_load", Help: "Current load of signaling proxy backends", }, []string{"url"}) statsProxyNobackendAvailableTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "mcu", Name: "no_backend_available_total", Help: "Total number of publishing requests where no backend was available", }, []string{"type"}) proxyMcuStats = []prometheus.Collector{ statsConnectedProxyBackendsCurrent, statsProxyBackendLoadCurrent, statsProxyNobackendAvailableTotal, } ) func RegisterJanusMcuStats() { registerAll(commonMcuStats...) } func UnregisterJanusMcuStats() { unregisterAll(commonMcuStats...) } func RegisterProxyMcuStats() { registerAll(commonMcuStats...) registerAll(proxyMcuStats...) } func UnregisterProxyMcuStats() { unregisterAll(commonMcuStats...) unregisterAll(proxyMcuStats...) } nextcloud-spreed-signaling-1.2.4/mcu_test.go000066400000000000000000000132231460321600400211230ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2019 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "fmt" "log" "sync" "sync/atomic" "github.com/dlintw/goconf" ) const ( TestMaxBitrateScreen = 12345678 TestMaxBitrateVideo = 23456789 ) type TestMCU struct { mu sync.Mutex publishers map[string]*TestMCUPublisher subscribers map[string]*TestMCUSubscriber } func NewTestMCU() (*TestMCU, error) { return &TestMCU{ publishers: make(map[string]*TestMCUPublisher), subscribers: make(map[string]*TestMCUSubscriber), }, nil } func (m *TestMCU) Start() error { return nil } func (m *TestMCU) Stop() { } func (m *TestMCU) Reload(config *goconf.ConfigFile) { } func (m *TestMCU) SetOnConnected(f func()) { } func (m *TestMCU) SetOnDisconnected(f func()) { } func (m *TestMCU) GetStats() interface{} { return nil } func (m *TestMCU) NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error) { var maxBitrate int if streamType == StreamTypeScreen { maxBitrate = TestMaxBitrateScreen } else { maxBitrate = TestMaxBitrateVideo } if bitrate <= 0 { bitrate = maxBitrate } else if bitrate > maxBitrate { bitrate = maxBitrate } pub := &TestMCUPublisher{ TestMCUClient: TestMCUClient{ id: id, sid: sid, streamType: streamType, }, mediaTypes: mediaTypes, bitrate: bitrate, } m.mu.Lock() defer m.mu.Unlock() m.publishers[id] = pub return pub, nil } func (m *TestMCU) GetPublishers() map[string]*TestMCUPublisher { m.mu.Lock() defer m.mu.Unlock() result := make(map[string]*TestMCUPublisher, len(m.publishers)) for id, pub := range m.publishers { result[id] = pub } return result } func (m *TestMCU) GetPublisher(id string) *TestMCUPublisher { m.mu.Lock() defer m.mu.Unlock() return m.publishers[id] } func (m *TestMCU) NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType) (McuSubscriber, error) { m.mu.Lock() defer m.mu.Unlock() pub := m.publishers[publisher] if pub == nil { return nil, fmt.Errorf("Waiting for publisher not implemented yet") } id := newRandomString(8) sub := &TestMCUSubscriber{ TestMCUClient: TestMCUClient{ id: id, streamType: streamType, }, publisher: pub, } return sub, nil } type TestMCUClient struct { closed atomic.Bool id string sid string streamType StreamType } func (c *TestMCUClient) Id() string { return c.id } func (c *TestMCUClient) Sid() string { return c.sid } func (c *TestMCUClient) StreamType() StreamType { return c.streamType } func (c *TestMCUClient) MaxBitrate() int { return 0 } func (c *TestMCUClient) Close(ctx context.Context) { if c.closed.CompareAndSwap(false, true) { log.Printf("Close MCU client %s", c.id) } } func (c *TestMCUClient) isClosed() bool { return c.closed.Load() } type TestMCUPublisher struct { TestMCUClient mediaTypes MediaType bitrate int sdp string } func (p *TestMCUPublisher) HasMedia(mt MediaType) bool { return (p.mediaTypes & mt) == mt } func (p *TestMCUPublisher) SetMedia(mt MediaType) { p.mediaTypes = mt } func (p *TestMCUPublisher) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) { go func() { if p.isClosed() { callback(fmt.Errorf("Already closed"), nil) return } switch data.Type { case "offer": sdp := data.Payload["sdp"] if sdp, ok := sdp.(string); ok { p.sdp = sdp if sdp == MockSdpOfferAudioOnly { callback(nil, map[string]interface{}{ "type": "answer", "sdp": MockSdpAnswerAudioOnly, }) return } else if sdp == MockSdpOfferAudioAndVideo { callback(nil, map[string]interface{}{ "type": "answer", "sdp": MockSdpAnswerAudioAndVideo, }) return } } callback(fmt.Errorf("Offer payload %+v is not implemented", data.Payload), nil) default: callback(fmt.Errorf("Message type %s is not implemented", data.Type), nil) } }() } type TestMCUSubscriber struct { TestMCUClient publisher *TestMCUPublisher } func (s *TestMCUSubscriber) Publisher() string { return s.publisher.id } func (s *TestMCUSubscriber) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) { go func() { if s.isClosed() { callback(fmt.Errorf("Already closed"), nil) return } switch data.Type { case "requestoffer": fallthrough case "sendoffer": sdp := s.publisher.sdp if sdp == "" { callback(fmt.Errorf("Publisher not sending (no SDP)"), nil) return } callback(nil, map[string]interface{}{ "type": "offer", "sdp": sdp, }) default: callback(fmt.Errorf("Message type %s is not implemented", data.Type), nil) } }() } nextcloud-spreed-signaling-1.2.4/mkdocs.yml000066400000000000000000000006601460321600400207550ustar00rootroot00000000000000site_name: Nextcloud Spreed Signaling API documentation theme: name: readthedocs highlightjs: true hljs_languages: - bash - golang - json - make - markdown markdown_extensions: - admonition - def_list - toc: permalink: "#" baselevel: 2 nav: - 'API documentation': - 'Standalone signaling API': 'standalone-signaling-api-v1.md' - 'Prometheus Metrics': 'prometheus-metrics.md' nextcloud-spreed-signaling-1.2.4/mock_data_test.go000066400000000000000000000124031460321600400222600ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling const ( // See https://tools.ietf.org/id/draft-ietf-rtcweb-sdp-08.html#rfc.section.5.2.1 MockSdpOfferAudioOnly = `v=0 o=- 20518 0 IN IP4 0.0.0.0 s=- t=0 0 a=group:BUNDLE audio-D.ietf-mmusic-sdp-bundle-negotiation a=ice-options:trickle-D.ietf-mmusic-trickle-ice m=audio 54609 UDP/TLS/RTP/SAVPF 109 0 8 c=IN IP4 192.168.0.1 a=mid:audio a=msid:ma ta a=sendrecv a=rtpmap:109 opus/48000/2 a=rtpmap:0 PCMU/8000 a=rtpmap:8 PCMA/8000 a=maxptime:120 a=ice-ufrag:074c6550 a=ice-pwd:a28a397a4c3f31747d1ee3474af08a068 a=fingerprint:sha-256 19:E2:1C:3B:4B:9F:81:E6:B8:5C:F4:A5:A8:D8:73:04:BB:05:2F:70:9F:04:A9:0E:05:E9:26:33:E8:70:88:A2 a=setup:actpass a=tls-id:1 a=rtcp-mux a=rtcp:60065 IN IP4 192.168.0.1 a=rtcp-rsize a=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level a=extmap:2 urn:ietf:params:rtp-hdrext:sdes:mid a=candidate:0 1 UDP 2122194687 192.0.2.4 61665 typ host a=candidate:1 1 UDP 1685987071 192.168.0.1 54609 typ srflx raddr 192.0.2.4 rport 61665 a=candidate:0 2 UDP 2122194687 192.0.2.4 61667 typ host a=candidate:1 2 UDP 1685987071 192.168.0.1 60065 typ srflx raddr 192.0.2.4 rport 61667 a=end-of-candidates ` MockSdpAnswerAudioOnly = `v=0 o=- 16833 0 IN IP4 0.0.0.0 s=- t=0 0 a=group:BUNDLE audio a=ice-options:trickle m=audio 49203 UDP/TLS/RTP/SAVPF 109 0 8 c=IN IP4 192.168.0.1 a=mid:audio a=msid:ma ta a=sendrecv a=rtpmap:109 opus/48000/2 a=rtpmap:0 PCMU/8000 a=rtpmap:8 PCMA/8000 a=maxptime:120 a=ice-ufrag:05067423 a=ice-pwd:1747d1ee3474a28a397a4c3f3af08a068 a=fingerprint:sha-256 6B:8B:F0:65:5F:78:E2:51:3B:AC:6F:F3:3F:46:1B:35:DC:B8:5F:64:1A:24:C2:43:F0:A1:58:D0:A1:2C:19:08 a=setup:active a=tls-id:1 a=rtcp-mux a=rtcp-rsize a=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level a=extmap:2 urn:ietf:params:rtp-hdrext:sdes:mid a=candidate:0 1 UDP 2122194687 198.51.100.7 51556 typ host a=candidate:1 1 UDP 1685987071 192.168.0.1 49203 typ srflx raddr 198.51.100.7 rport 51556 a=end-of-candidates ` // See https://tools.ietf.org/id/draft-ietf-rtcweb-sdp-08.html#rfc.section.5.2.2.1 MockSdpOfferAudioAndVideo = `v=0 o=- 20518 0 IN IP4 0.0.0.0 s=- t=0 0 a=group:BUNDLE audio-D.ietf-mmusic-sdp-bundle-negotiation a=ice-options:trickle-D.ietf-mmusic-trickle-ice m=audio 54609 UDP/TLS/RTP/SAVPF 109 0 8 c=IN IP4 192.168.0.1 a=mid:audio a=msid:ma ta a=sendrecv a=rtpmap:109 opus/48000/2 a=rtpmap:0 PCMU/8000 a=rtpmap:8 PCMA/8000 a=maxptime:120 a=ice-ufrag:074c6550 a=ice-pwd:a28a397a4c3f31747d1ee3474af08a068 a=fingerprint:sha-256 19:E2:1C:3B:4B:9F:81:E6:B8:5C:F4:A5:A8:D8:73:04:BB:05:2F:70:9F:04:A9:0E:05:E9:26:33:E8:70:88:A2 a=setup:actpass a=tls-id:1 a=rtcp-mux a=rtcp:60065 IN IP4 192.168.0.1 a=rtcp-rsize a=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level a=extmap:2 urn:ietf:params:rtp-hdrext:sdes:mid a=candidate:0 1 UDP 2122194687 192.0.2.4 61665 typ host a=candidate:1 1 UDP 1685987071 192.168.0.1 54609 typ srflx raddr 192.0.2.4 rport 61665 a=candidate:0 2 UDP 2122194687 192.0.2.4 61667 typ host a=candidate:1 2 UDP 1685987071 192.168.0.1 60065 typ srflx raddr 192.0.2.4 rport 61667 a=end-of-candidates m=video 54609 UDP/TLS/RTP/SAVPF 99 120 c=IN IP4 192.168.0.1 a=mid:video a=msid:ma tb a=sendrecv a=rtpmap:99 H264/90000 a=fmtp:99 profile-level-id=4d0028;packetization-mode=1 a=rtpmap:120 VP8/90000 a=rtcp-fb:99 nack a=rtcp-fb:99 nack pli a=rtcp-fb:99 ccm fir a=rtcp-fb:120 nack a=rtcp-fb:120 nack pli a=rtcp-fb:120 ccm fir a=extmap:2 urn:ietf:params:rtp-hdrext:sdes:mid ` MockSdpAnswerAudioAndVideo = `v=0 o=- 16833 0 IN IP4 0.0.0.0 s=- t=0 0 a=group:BUNDLE audio a=ice-options:trickle m=audio 49203 UDP/TLS/RTP/SAVPF 109 0 8 c=IN IP4 192.168.0.1 a=mid:audio a=msid:ma ta a=sendrecv a=rtpmap:109 opus/48000/2 a=rtpmap:0 PCMU/8000 a=rtpmap:8 PCMA/8000 a=maxptime:120 a=ice-ufrag:05067423 a=ice-pwd:1747d1ee3474a28a397a4c3f3af08a068 a=fingerprint:sha-256 6B:8B:F0:65:5F:78:E2:51:3B:AC:6F:F3:3F:46:1B:35:DC:B8:5F:64:1A:24:C2:43:F0:A1:58:D0:A1:2C:19:08 a=setup:active a=tls-id:1 a=rtcp-mux a=rtcp-rsize a=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level a=extmap:2 urn:ietf:params:rtp-hdrext:sdes:mid a=candidate:0 1 UDP 2122194687 198.51.100.7 51556 typ host a=candidate:1 1 UDP 1685987071 192.168.0.1 49203 typ srflx raddr 198.51.100.7 rport 51556 a=end-of-candidates m=video 49203 UDP/TLS/RTP/SAVPF 99 c=IN IP4 192.168.0.1 a=mid:video a=msid:ma tb a=sendrecv a=rtpmap:99 H264/90000 a=fmtp:99 profile-level-id=4d0028;packetization-mode=1 a=rtcp-fb:99 nack a=rtcp-fb:99 nack pli a=rtcp-fb:99 ccm fir a=extmap:2 urn:ietf:params:rtp-hdrext:sdes:mid ` ) nextcloud-spreed-signaling-1.2.4/natsclient.go000066400000000000000000000075061460321600400214530ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "encoding/base64" "fmt" "log" "os" "os/signal" "time" "github.com/nats-io/nats.go" ) const ( initialConnectInterval = time.Second maxConnectInterval = 8 * time.Second NatsLoopbackUrl = "nats://loopback" ) type NatsSubscription interface { Unsubscribe() error } type NatsClient interface { Close() Subscribe(subject string, ch chan *nats.Msg) (NatsSubscription, error) Publish(subject string, message interface{}) error Decode(msg *nats.Msg, v interface{}) error } // The NATS client doesn't work if a subject contains spaces. As the room id // can have an arbitrary format, we need to make sure the subject is valid. // See "https://github.com/nats-io/nats.js/issues/158" for a similar report. func GetEncodedSubject(prefix string, suffix string) string { return prefix + "." + base64.StdEncoding.EncodeToString([]byte(suffix)) } type natsClient struct { nc *nats.Conn conn *nats.EncodedConn } func NewNatsClient(url string) (NatsClient, error) { if url == ":loopback:" { log.Printf("WARNING: events url %s is deprecated, please use %s instead", url, NatsLoopbackUrl) url = NatsLoopbackUrl } if url == NatsLoopbackUrl { log.Println("Using internal NATS loopback client") return NewLoopbackNatsClient() } backoff, err := NewExponentialBackoff(initialConnectInterval, maxConnectInterval) if err != nil { return nil, err } client := &natsClient{} client.nc, err = nats.Connect(url, nats.ClosedHandler(client.onClosed), nats.DisconnectHandler(client.onDisconnected), nats.ReconnectHandler(client.onReconnected)) ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) defer stop() // The initial connect must succeed, so we retry in the case of an error. for err != nil { log.Printf("Could not create connection (%s), will retry in %s", err, backoff.NextWait()) backoff.Wait(ctx) if ctx.Err() != nil { return nil, fmt.Errorf("interrupted") } client.nc, err = nats.Connect(url) } log.Printf("Connection established to %s (%s)", client.nc.ConnectedUrl(), client.nc.ConnectedServerId()) // All communication will be JSON based. client.conn, _ = nats.NewEncodedConn(client.nc, nats.JSON_ENCODER) return client, nil } func (c *natsClient) Close() { c.conn.Close() } func (c *natsClient) onClosed(conn *nats.Conn) { log.Println("NATS client closed", conn.LastError()) } func (c *natsClient) onDisconnected(conn *nats.Conn) { log.Println("NATS client disconnected") } func (c *natsClient) onReconnected(conn *nats.Conn) { log.Printf("NATS client reconnected to %s (%s)", conn.ConnectedUrl(), conn.ConnectedServerId()) } func (c *natsClient) Subscribe(subject string, ch chan *nats.Msg) (NatsSubscription, error) { return c.nc.ChanSubscribe(subject, ch) } func (c *natsClient) Publish(subject string, message interface{}) error { return c.conn.Publish(subject, message) } func (c *natsClient) Decode(msg *nats.Msg, v interface{}) error { return c.conn.Enc.Decode(msg.Subject, msg.Data, v) } nextcloud-spreed-signaling-1.2.4/natsclient_loopback.go000066400000000000000000000075121460321600400233220ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "container/list" "encoding/json" "log" "strings" "sync" "github.com/nats-io/nats.go" ) type LoopbackNatsClient struct { mu sync.Mutex subscriptions map[string]map[*loopbackNatsSubscription]bool wakeup sync.Cond incoming list.List } func NewLoopbackNatsClient() (NatsClient, error) { client := &LoopbackNatsClient{ subscriptions: make(map[string]map[*loopbackNatsSubscription]bool), } client.wakeup.L = &client.mu go client.processMessages() return client, nil } func (c *LoopbackNatsClient) processMessages() { c.mu.Lock() defer c.mu.Unlock() for { for c.subscriptions != nil && c.incoming.Len() == 0 { c.wakeup.Wait() } if c.subscriptions == nil { // Client was closed. break } msg := c.incoming.Remove(c.incoming.Front()).(*nats.Msg) c.processMessage(msg) } } func (c *LoopbackNatsClient) processMessage(msg *nats.Msg) { subs, found := c.subscriptions[msg.Subject] if !found { return } channels := make([]chan *nats.Msg, 0, len(subs)) for sub := range subs { channels = append(channels, sub.ch) } c.mu.Unlock() defer c.mu.Lock() for _, ch := range channels { select { case ch <- msg: default: log.Printf("Slow consumer %s, dropping message", msg.Subject) } } } func (c *LoopbackNatsClient) Close() { c.mu.Lock() defer c.mu.Unlock() c.subscriptions = nil c.incoming.Init() c.wakeup.Signal() } type loopbackNatsSubscription struct { subject string client *LoopbackNatsClient ch chan *nats.Msg } func (s *loopbackNatsSubscription) Unsubscribe() error { s.client.unsubscribe(s) return nil } func (c *LoopbackNatsClient) Subscribe(subject string, ch chan *nats.Msg) (NatsSubscription, error) { if strings.HasSuffix(subject, ".") || strings.Contains(subject, " ") { return nil, nats.ErrBadSubject } c.mu.Lock() defer c.mu.Unlock() if c.subscriptions == nil { return nil, nats.ErrConnectionClosed } s := &loopbackNatsSubscription{ subject: subject, client: c, ch: ch, } subs, found := c.subscriptions[subject] if !found { subs = make(map[*loopbackNatsSubscription]bool) c.subscriptions[subject] = subs } subs[s] = true return s, nil } func (c *LoopbackNatsClient) unsubscribe(s *loopbackNatsSubscription) { c.mu.Lock() defer c.mu.Unlock() if subs, found := c.subscriptions[s.subject]; found { delete(subs, s) if len(subs) == 0 { delete(c.subscriptions, s.subject) } } } func (c *LoopbackNatsClient) Publish(subject string, message interface{}) error { if strings.HasSuffix(subject, ".") || strings.Contains(subject, " ") { return nats.ErrBadSubject } c.mu.Lock() defer c.mu.Unlock() if c.subscriptions == nil { return nats.ErrConnectionClosed } msg := &nats.Msg{ Subject: subject, } var err error if msg.Data, err = json.Marshal(message); err != nil { return err } c.incoming.PushBack(msg) c.wakeup.Signal() return nil } func (c *LoopbackNatsClient) Decode(msg *nats.Msg, v interface{}) error { return json.Unmarshal(msg.Data, v) } nextcloud-spreed-signaling-1.2.4/natsclient_loopback_test.go000066400000000000000000000044661460321600400243660ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2018 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "testing" "time" ) func (c *LoopbackNatsClient) waitForSubscriptionsEmpty(ctx context.Context, t *testing.T) { for { c.mu.Lock() count := len(c.subscriptions) c.mu.Unlock() if count == 0 { break } select { case <-ctx.Done(): c.mu.Lock() t.Errorf("Error waiting for subscriptions %+v to terminate: %s", c.subscriptions, ctx.Err()) c.mu.Unlock() return default: time.Sleep(time.Millisecond) } } } func CreateLoopbackNatsClientForTest(t *testing.T) NatsClient { result, err := NewLoopbackNatsClient() if err != nil { t.Fatal(err) } t.Cleanup(func() { result.Close() }) return result } func TestLoopbackNatsClient_Subscribe(t *testing.T) { ensureNoGoroutinesLeak(t, func(t *testing.T) { client := CreateLoopbackNatsClientForTest(t) testNatsClient_Subscribe(t, client) }) } func TestLoopbackClient_PublishAfterClose(t *testing.T) { ensureNoGoroutinesLeak(t, func(t *testing.T) { client := CreateLoopbackNatsClientForTest(t) testNatsClient_PublishAfterClose(t, client) }) } func TestLoopbackClient_SubscribeAfterClose(t *testing.T) { ensureNoGoroutinesLeak(t, func(t *testing.T) { client := CreateLoopbackNatsClientForTest(t) testNatsClient_SubscribeAfterClose(t, client) }) } func TestLoopbackClient_BadSubjects(t *testing.T) { ensureNoGoroutinesLeak(t, func(t *testing.T) { client := CreateLoopbackNatsClientForTest(t) testNatsClient_BadSubjects(t, client) }) } nextcloud-spreed-signaling-1.2.4/natsclient_test.go000066400000000000000000000077541460321600400225170ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "sync/atomic" "testing" "time" "github.com/nats-io/nats.go" natsserver "github.com/nats-io/nats-server/v2/test" ) func startLocalNatsServer(t *testing.T) string { opts := natsserver.DefaultTestOptions opts.Port = -1 opts.Cluster.Name = "testing" srv := natsserver.RunServer(&opts) t.Cleanup(func() { srv.Shutdown() srv.WaitForShutdown() }) return srv.ClientURL() } func CreateLocalNatsClientForTest(t *testing.T) NatsClient { url := startLocalNatsServer(t) result, err := NewNatsClient(url) if err != nil { t.Fatal(err) } t.Cleanup(func() { result.Close() }) return result } func testNatsClient_Subscribe(t *testing.T, client NatsClient) { dest := make(chan *nats.Msg) sub, err := client.Subscribe("foo", dest) if err != nil { t.Fatal(err) } ch := make(chan struct{}) var received atomic.Int32 max := int32(20) ready := make(chan struct{}) quit := make(chan struct{}) defer close(quit) go func() { close(ready) for { select { case <-dest: total := received.Add(1) if total == max { err := sub.Unsubscribe() if err != nil { t.Errorf("Unsubscribe failed with err: %s", err) return } close(ch) } case <-quit: return } } }() <-ready for i := int32(0); i < max; i++ { if err := client.Publish("foo", []byte("hello")); err != nil { t.Error(err) } // Allow NATS goroutines to process messages. time.Sleep(10 * time.Millisecond) } <-ch if r := received.Load(); r != max { t.Fatalf("Received wrong # of messages: %d vs %d", r, max) } } func TestNatsClient_Subscribe(t *testing.T) { ensureNoGoroutinesLeak(t, func(t *testing.T) { client := CreateLocalNatsClientForTest(t) testNatsClient_Subscribe(t, client) }) } func testNatsClient_PublishAfterClose(t *testing.T, client NatsClient) { client.Close() if err := client.Publish("foo", "bar"); err != nats.ErrConnectionClosed { t.Errorf("Expected %v, got %v", nats.ErrConnectionClosed, err) } } func TestNatsClient_PublishAfterClose(t *testing.T) { ensureNoGoroutinesLeak(t, func(t *testing.T) { client := CreateLocalNatsClientForTest(t) testNatsClient_PublishAfterClose(t, client) }) } func testNatsClient_SubscribeAfterClose(t *testing.T, client NatsClient) { client.Close() ch := make(chan *nats.Msg) if _, err := client.Subscribe("foo", ch); err != nats.ErrConnectionClosed { t.Errorf("Expected %v, got %v", nats.ErrConnectionClosed, err) } } func TestNatsClient_SubscribeAfterClose(t *testing.T) { ensureNoGoroutinesLeak(t, func(t *testing.T) { client := CreateLocalNatsClientForTest(t) testNatsClient_SubscribeAfterClose(t, client) }) } func testNatsClient_BadSubjects(t *testing.T, client NatsClient) { subjects := []string{ "foo bar", "foo.", } ch := make(chan *nats.Msg) for _, s := range subjects { if _, err := client.Subscribe(s, ch); err != nats.ErrBadSubject { t.Errorf("Expected %v for subject %s, got %v", nats.ErrBadSubject, s, err) } } } func TestNatsClient_BadSubjects(t *testing.T) { ensureNoGoroutinesLeak(t, func(t *testing.T) { client := CreateLocalNatsClientForTest(t) testNatsClient_BadSubjects(t, client) }) } nextcloud-spreed-signaling-1.2.4/notifier.go000066400000000000000000000044631460321600400211250ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "sync" ) type Waiter struct { key string sw *SingleWaiter } func (w *Waiter) Wait(ctx context.Context) error { return w.sw.Wait(ctx) } type Notifier struct { sync.Mutex waiters map[string]*Waiter waiterMap map[string]map[*Waiter]bool } func (n *Notifier) NewWaiter(key string) *Waiter { n.Lock() defer n.Unlock() waiter, found := n.waiters[key] if found { w := &Waiter{ key: key, sw: waiter.sw, } n.waiterMap[key][w] = true return w } waiter = &Waiter{ key: key, sw: newSingleWaiter(), } if n.waiters == nil { n.waiters = make(map[string]*Waiter) } if n.waiterMap == nil { n.waiterMap = make(map[string]map[*Waiter]bool) } n.waiters[key] = waiter if _, found := n.waiterMap[key]; !found { n.waiterMap[key] = make(map[*Waiter]bool) } n.waiterMap[key][waiter] = true return waiter } func (n *Notifier) Reset() { n.Lock() defer n.Unlock() for _, w := range n.waiters { w.sw.cancel() } n.waiters = nil n.waiterMap = nil } func (n *Notifier) Release(w *Waiter) { n.Lock() defer n.Unlock() if waiters, found := n.waiterMap[w.key]; found { if _, found := waiters[w]; found { delete(waiters, w) if len(waiters) == 0 { delete(n.waiters, w.key) w.sw.cancel() } } } } func (n *Notifier) Notify(key string) { n.Lock() defer n.Unlock() if w, found := n.waiters[key]; found { w.sw.cancel() delete(n.waiters, w.key) delete(n.waiterMap, w.key) } } nextcloud-spreed-signaling-1.2.4/notifier_test.go000066400000000000000000000064321460321600400221620ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "sync" "testing" "time" ) func TestNotifierNoWaiter(t *testing.T) { var notifier Notifier // Notifications can be sent even if no waiter exists. notifier.Notify("foo") } func TestNotifierSimple(t *testing.T) { var notifier Notifier var wg sync.WaitGroup wg.Add(1) waiter := notifier.NewWaiter("foo") defer notifier.Release(waiter) go func() { defer wg.Done() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if err := waiter.Wait(ctx); err != nil { t.Error(err) } }() notifier.Notify("foo") wg.Wait() } func TestNotifierMultiNotify(t *testing.T) { var notifier Notifier waiter := notifier.NewWaiter("foo") defer notifier.Release(waiter) notifier.Notify("foo") // The second notification will be ignored while the first is still pending. notifier.Notify("foo") } func TestNotifierWaitClosed(t *testing.T) { var notifier Notifier waiter := notifier.NewWaiter("foo") notifier.Release(waiter) if err := waiter.Wait(context.Background()); err != nil { t.Error(err) } } func TestNotifierWaitClosedMulti(t *testing.T) { var notifier Notifier waiter1 := notifier.NewWaiter("foo") waiter2 := notifier.NewWaiter("foo") notifier.Release(waiter1) notifier.Release(waiter2) if err := waiter1.Wait(context.Background()); err != nil { t.Error(err) } if err := waiter2.Wait(context.Background()); err != nil { t.Error(err) } } func TestNotifierResetWillNotify(t *testing.T) { var notifier Notifier var wg sync.WaitGroup wg.Add(1) waiter := notifier.NewWaiter("foo") defer notifier.Release(waiter) go func() { defer wg.Done() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if err := waiter.Wait(ctx); err != nil { t.Error(err) } }() notifier.Reset() wg.Wait() } func TestNotifierDuplicate(t *testing.T) { var notifier Notifier var wgStart sync.WaitGroup var wgEnd sync.WaitGroup for i := 0; i < 2; i++ { wgStart.Add(1) wgEnd.Add(1) go func() { defer wgEnd.Done() waiter := notifier.NewWaiter("foo") defer notifier.Release(waiter) // Goroutine has created the waiter and is ready. wgStart.Done() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if err := waiter.Wait(ctx); err != nil { t.Error(err) } }() } wgStart.Wait() time.Sleep(100 * time.Millisecond) notifier.Notify("foo") wgEnd.Wait() } nextcloud-spreed-signaling-1.2.4/proxy.conf.in000066400000000000000000000042271460321600400214120ustar00rootroot00000000000000[http] # IP and port to listen on for HTTP requests. # Comment line to disable the listener. #listen = 127.0.0.1:9090 [app] # Set to "true" to install pprof debug handlers. # See "https://golang.org/pkg/net/http/pprof/" for further information. #debug = false # ISO 3166 country this proxy is located at. This will be used by the signaling # servers to determine the closest proxy for publishers. #country = DE # Type of token configuration for signaling servers allowed to connect, see # below for details. Defaults to "static". # # Possible values: # - static: A mapping of token id -> public key is configured below. # - etcd: Token information are retrieved from an etcd cluster (see below). tokentype = static [tokens] # For token type "static": Mapping of = of signaling # servers allowed to connect. #server1 = pubkey1.pem #server2 = pubkey2.pem # For token type "etcd": Format of key name to retrieve the public key from, # "%s" will be replaced with the token id. Multiple possible formats can be # comma-separated. #keyformat = /signaling/proxy/tokens/%s/public-key [mcu] # The type of the MCU to use. Currently only "janus" is supported. type = janus # The URL to the websocket endpoint of the MCU server. url = ws://localhost:8188/ # The maximum bitrate per publishing stream (in bits per second). # Defaults to 1 mbit/sec. #maxstreambitrate = 1048576 # The maximum bitrate per screensharing stream (in bits per second). # Default is 2 mbit/sec. #maxscreenbitrate = 2097152 [stats] # Comma-separated list of IP addresses that are allowed to access the stats # endpoint. Leave empty (or commented) to only allow access from "127.0.0.1". #allowed_ips = [etcd] # Comma-separated list of static etcd endpoints to connect to. #endpoints = 127.0.0.1:2379,127.0.0.1:22379,127.0.0.1:32379 # Options to perform endpoint discovery through DNS SRV. # Only used if no endpoints are configured manually. #discoverysrv = example.com #discoveryservice = foo # Path to private key, client certificate and CA certificate if TLS # authentication should be used. #clientkey = /path/to/etcd-client.key #clientcert = /path/to/etcd-client.crt #cacert = /path/to/etcd-ca.crt nextcloud-spreed-signaling-1.2.4/proxy/000077500000000000000000000000001460321600400201315ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/proxy/main.go000066400000000000000000000071761460321600400214170ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2020 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package main import ( "flag" "fmt" "log" "net" "net/http" "os" "os/signal" "runtime" "strings" "syscall" "time" "github.com/dlintw/goconf" "github.com/gorilla/mux" ) var ( version = "unreleased" configFlag = flag.String("config", "proxy.conf", "config file to use") showVersion = flag.Bool("version", false, "show version and quit") ) const ( defaultReadTimeout = 15 defaultWriteTimeout = 15 proxyDebugMessages = false ) func main() { log.SetFlags(log.Lshortfile) flag.Parse() if *showVersion { fmt.Printf("nextcloud-spreed-signaling-proxy version %s/%s\n", version, runtime.Version()) os.Exit(0) } sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, os.Interrupt) signal.Notify(sigChan, syscall.SIGHUP) signal.Notify(sigChan, syscall.SIGUSR1) log.Printf("Starting up version %s/%s as pid %d", version, runtime.Version(), os.Getpid()) config, err := goconf.ReadConfigFile(*configFlag) if err != nil { log.Fatal("Could not read configuration: ", err) } cpus := runtime.NumCPU() runtime.GOMAXPROCS(cpus) log.Printf("Using a maximum of %d CPUs", cpus) r := mux.NewRouter() proxy, err := NewProxyServer(r, version, config) if err != nil { log.Fatal(err) } if err := proxy.Start(config); err != nil { log.Fatal(err) } defer proxy.Stop() if addr, _ := config.GetString("http", "listen"); addr != "" { readTimeout, _ := config.GetInt("http", "readtimeout") if readTimeout <= 0 { readTimeout = defaultReadTimeout } writeTimeout, _ := config.GetInt("http", "writetimeout") if writeTimeout <= 0 { writeTimeout = defaultWriteTimeout } for _, address := range strings.Split(addr, " ") { go func(address string) { log.Println("Listening on", address) listener, err := net.Listen("tcp", address) if err != nil { log.Fatal("Could not start listening: ", err) } srv := &http.Server{ Handler: r, Addr: addr, ReadTimeout: time.Duration(readTimeout) * time.Second, WriteTimeout: time.Duration(writeTimeout) * time.Second, } if err := srv.Serve(listener); err != nil { log.Fatal("Could not start server: ", err) } }(address) } } loop: for { select { case sig := <-sigChan: switch sig { case os.Interrupt: log.Println("Interrupted") break loop case syscall.SIGHUP: log.Printf("Received SIGHUP, reloading %s", *configFlag) if config, err := goconf.ReadConfigFile(*configFlag); err != nil { log.Printf("Could not read configuration from %s: %s", *configFlag, err) } else { proxy.Reload(config) } case syscall.SIGUSR1: log.Printf("Received SIGUSR1, scheduling server to shutdown") proxy.ScheduleShutdown() } case <-proxy.ShutdownChannel(): log.Printf("All clients disconnected, shutting down") break loop } } } nextcloud-spreed-signaling-1.2.4/proxy/proxy_client.go000066400000000000000000000036201460321600400232000ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2020 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package main import ( "sync/atomic" "time" "github.com/gorilla/websocket" signaling "github.com/strukturag/nextcloud-spreed-signaling" ) type ProxyClient struct { signaling.Client proxy *ProxyServer session atomic.Pointer[ProxySession] } func NewProxyClient(proxy *ProxyServer, conn *websocket.Conn, addr string) (*ProxyClient, error) { client := &ProxyClient{ proxy: proxy, } client.SetConn(conn, addr, client) return client, nil } func (c *ProxyClient) GetSession() *ProxySession { return c.session.Load() } func (c *ProxyClient) SetSession(session *ProxySession) { c.session.Store(session) } func (c *ProxyClient) OnClosed(client *signaling.Client) { if session := c.GetSession(); session != nil { session.MarkUsed() } c.proxy.clientClosed(&c.Client) } func (c *ProxyClient) OnMessageReceived(client *signaling.Client, data []byte) { c.proxy.processMessage(c, data) } func (c *ProxyClient) OnRTTReceived(client *signaling.Client, rtt time.Duration) { if session := c.GetSession(); session != nil { session.MarkUsed() } } nextcloud-spreed-signaling-1.2.4/proxy/proxy_server.go000066400000000000000000000705601460321600400232370ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2020 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package main import ( "context" "crypto/rand" "encoding/json" "fmt" "io" "log" "net" "net/http" "net/http/pprof" "os" "os/signal" runtimepprof "runtime/pprof" "strings" "sync" "sync/atomic" "time" "github.com/dlintw/goconf" "github.com/golang-jwt/jwt/v4" "github.com/google/uuid" "github.com/gorilla/mux" "github.com/gorilla/securecookie" "github.com/gorilla/websocket" "github.com/prometheus/client_golang/prometheus/promhttp" signaling "github.com/strukturag/nextcloud-spreed-signaling" ) const ( // Buffer sizes when reading/writing websocket connections. websocketReadBufferSize = 4096 websocketWriteBufferSize = 4096 initialMcuRetry = time.Second maxMcuRetry = time.Second * 16 updateLoadInterval = time.Second expireSessionsInterval = 10 * time.Second // Maximum age a token may have to prevent reuse of old tokens. maxTokenAge = 5 * time.Minute ) type ContextKey string var ( ContextKeySession = ContextKey("session") TimeoutCreatingPublisher = signaling.NewError("timeout", "Timeout creating publisher.") TimeoutCreatingSubscriber = signaling.NewError("timeout", "Timeout creating subscriber.") TokenAuthFailed = signaling.NewError("auth_failed", "The token could not be authenticated.") TokenExpired = signaling.NewError("token_expired", "The token is expired.") TokenNotValidYet = signaling.NewError("token_not_valid_yet", "The token is not valid yet.") UnknownClient = signaling.NewError("unknown_client", "Unknown client id given.") UnsupportedCommand = signaling.NewError("bad_request", "Unsupported command received.") UnsupportedMessage = signaling.NewError("bad_request", "Unsupported message received.") UnsupportedPayload = signaling.NewError("unsupported_payload", "Unsupported payload type.") ShutdownScheduled = signaling.NewError("shutdown_scheduled", "The server is scheduled to shutdown.") ) type ProxyServer struct { version string country string welcomeMessage string url string mcu signaling.Mcu stopped atomic.Bool load atomic.Int64 shutdownChannel chan struct{} shutdownScheduled atomic.Bool upgrader websocket.Upgrader tokens ProxyTokens statsAllowedIps *signaling.AllowedIps sid atomic.Uint64 cookie *securecookie.SecureCookie sessions map[uint64]*ProxySession sessionsLock sync.RWMutex clients map[string]signaling.McuClient clientIds map[string]string clientsLock sync.RWMutex } func NewProxyServer(r *mux.Router, version string, config *goconf.ConfigFile) (*ProxyServer, error) { hashKey := make([]byte, 64) if _, err := rand.Read(hashKey); err != nil { return nil, fmt.Errorf("Could not generate random hash key: %s", err) } blockKey := make([]byte, 32) if _, err := rand.Read(blockKey); err != nil { return nil, fmt.Errorf("Could not generate random block key: %s", err) } var tokens ProxyTokens var err error tokenType, _ := config.GetString("app", "tokentype") if tokenType == "" { tokenType = TokenTypeDefault } switch tokenType { case TokenTypeEtcd: tokens, err = NewProxyTokensEtcd(config) case TokenTypeStatic: tokens, err = NewProxyTokensStatic(config) default: return nil, fmt.Errorf("Unsupported token type configured: %s", tokenType) } if err != nil { return nil, err } statsAllowed, _ := config.GetString("stats", "allowed_ips") statsAllowedIps, err := signaling.ParseAllowedIps(statsAllowed) if err != nil { return nil, err } if !statsAllowedIps.Empty() { log.Printf("Only allowing access to the stats endpoint from %s", statsAllowed) } else { log.Printf("No IPs configured for the stats endpoint, only allowing access from 127.0.0.1") statsAllowedIps = signaling.DefaultAllowedIps() } country, _ := config.GetString("app", "country") country = strings.ToUpper(country) if signaling.IsValidCountry(country) { log.Printf("Sending %s as country information", country) } else if country != "" { return nil, fmt.Errorf("Invalid country: %s", country) } else { log.Printf("Not sending country information") } welcome := map[string]string{ "nextcloud-spreed-signaling-proxy": "Welcome", "version": version, } welcomeMessage, err := json.Marshal(welcome) if err != nil { // Should never happen. return nil, err } result := &ProxyServer{ version: version, country: country, welcomeMessage: string(welcomeMessage) + "\n", shutdownChannel: make(chan struct{}), upgrader: websocket.Upgrader{ ReadBufferSize: websocketReadBufferSize, WriteBufferSize: websocketWriteBufferSize, }, tokens: tokens, statsAllowedIps: statsAllowedIps, cookie: securecookie.New(hashKey, blockKey).MaxAge(0), sessions: make(map[uint64]*ProxySession), clients: make(map[string]signaling.McuClient), clientIds: make(map[string]string), } result.upgrader.CheckOrigin = result.checkOrigin if debug, _ := config.GetBool("app", "debug"); debug { log.Println("Installing debug handlers in \"/debug/pprof\"") r.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index)) r.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) r.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) r.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) r.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) for _, profile := range runtimepprof.Profiles() { name := profile.Name() r.Handle("/debug/pprof/"+name, pprof.Handler(name)) } } r.HandleFunc("/welcome", result.setCommonHeaders(result.welcomeHandler)).Methods("GET") r.HandleFunc("/proxy", result.setCommonHeaders(result.proxyHandler)).Methods("GET") r.HandleFunc("/stats", result.setCommonHeaders(result.validateStatsRequest(result.statsHandler))).Methods("GET") r.HandleFunc("/metrics", result.setCommonHeaders(result.validateStatsRequest(result.metricsHandler))).Methods("GET") return result, nil } func (s *ProxyServer) checkOrigin(r *http.Request) bool { // We allow any Origin to connect to the service. return true } func (s *ProxyServer) Start(config *goconf.ConfigFile) error { s.url, _ = config.GetString("mcu", "url") if s.url == "" { return fmt.Errorf("No MCU server url configured") } mcuType, _ := config.GetString("mcu", "type") if mcuType == "" { mcuType = signaling.McuTypeDefault } backoff, err := signaling.NewExponentialBackoff(initialMcuRetry, maxMcuRetry) if err != nil { return err } ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) defer stop() var mcu signaling.Mcu for { switch mcuType { case signaling.McuTypeJanus: mcu, err = signaling.NewMcuJanus(s.url, config) if err == nil { signaling.RegisterJanusMcuStats() } default: return fmt.Errorf("Unsupported MCU type: %s", mcuType) } if err == nil { mcu.SetOnConnected(s.onMcuConnected) mcu.SetOnDisconnected(s.onMcuDisconnected) err = mcu.Start() if err != nil { log.Printf("Could not create %s MCU at %s: %s", mcuType, s.url, err) } } if err == nil { break } log.Printf("Could not initialize %s MCU at %s (%s) will retry in %s", mcuType, s.url, err, backoff.NextWait()) backoff.Wait(ctx) if ctx.Err() != nil { return fmt.Errorf("Cancelled") } } s.mcu = mcu go s.run() return nil } func (s *ProxyServer) run() { updateLoadTicker := time.NewTicker(updateLoadInterval) expireSessionsTicker := time.NewTicker(expireSessionsInterval) loop: for { select { case <-updateLoadTicker.C: if s.stopped.Load() { break loop } s.updateLoad() case <-expireSessionsTicker.C: if s.stopped.Load() { break loop } s.expireSessions() } } } func (s *ProxyServer) updateLoad() { load := s.GetClientsLoad() if load == s.load.Load() { return } s.load.Store(load) if s.shutdownScheduled.Load() { // Server is scheduled to shutdown, no need to update clients with current load. return } msg := &signaling.ProxyServerMessage{ Type: "event", Event: &signaling.EventProxyServerMessage{ Type: "update-load", Load: load, }, } s.IterateSessions(func(session *ProxySession) { session.sendMessage(msg) }) } func (s *ProxyServer) getExpiredSessions() []*ProxySession { var expired []*ProxySession s.IterateSessions(func(session *ProxySession) { if session.IsExpired() { expired = append(expired, session) } }) return expired } func (s *ProxyServer) expireSessions() { expired := s.getExpiredSessions() if len(expired) == 0 { return } s.sessionsLock.Lock() defer s.sessionsLock.Unlock() for _, session := range expired { if !session.IsExpired() { // Session was used while waiting for the lock. continue } log.Printf("Delete expired session %s", session.PublicId()) s.deleteSessionLocked(session.Sid()) } } func (s *ProxyServer) Stop() { if !s.stopped.CompareAndSwap(false, true) { return } if s.mcu != nil { s.mcu.Stop() } s.tokens.Close() } func (s *ProxyServer) ShutdownChannel() <-chan struct{} { return s.shutdownChannel } func (s *ProxyServer) ScheduleShutdown() { if !s.shutdownScheduled.CompareAndSwap(false, true) { return } msg := &signaling.ProxyServerMessage{ Type: "event", Event: &signaling.EventProxyServerMessage{ Type: "shutdown-scheduled", }, } s.IterateSessions(func(session *ProxySession) { session.sendMessage(msg) }) if !s.HasClients() { go close(s.shutdownChannel) } } func (s *ProxyServer) Reload(config *goconf.ConfigFile) { s.tokens.Reload(config) } func (s *ProxyServer) setCommonHeaders(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Server", "nextcloud-spreed-signaling-proxy/"+s.version) f(w, r) } } func getRealUserIP(r *http.Request) string { // Note this function assumes it is running behind a trusted proxy, so // the headers can be trusted. if ip := r.Header.Get("X-Real-IP"); ip != "" { return ip } if ip := r.Header.Get("X-Forwarded-For"); ip != "" { // Result could be a list "clientip, proxy1, proxy2", so only use first element. if pos := strings.Index(ip, ","); pos >= 0 { ip = strings.TrimSpace(ip[:pos]) } return ip } return r.RemoteAddr } func (s *ProxyServer) welcomeHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=utf-8") w.WriteHeader(http.StatusOK) io.WriteString(w, s.welcomeMessage) // nolint } func (s *ProxyServer) proxyHandler(w http.ResponseWriter, r *http.Request) { addr := getRealUserIP(r) conn, err := s.upgrader.Upgrade(w, r, nil) if err != nil { log.Printf("Could not upgrade request from %s: %s", addr, err) return } client, err := NewProxyClient(s, conn, addr) if err != nil { log.Printf("Could not create client for %s: %s", addr, err) return } go client.WritePump() go client.ReadPump() } func (s *ProxyServer) clientClosed(client *signaling.Client) { log.Printf("Connection from %s closed", client.RemoteAddr()) } func (s *ProxyServer) onMcuConnected() { log.Printf("Connection to %s established", s.url) msg := &signaling.ProxyServerMessage{ Type: "event", Event: &signaling.EventProxyServerMessage{ Type: "backend-connected", }, } s.IterateSessions(func(session *ProxySession) { session.sendMessage(msg) }) } func (s *ProxyServer) onMcuDisconnected() { if s.stopped.Load() { // Shutting down, no need to notify. return } log.Printf("Connection to %s lost", s.url) msg := &signaling.ProxyServerMessage{ Type: "event", Event: &signaling.EventProxyServerMessage{ Type: "backend-disconnected", }, } s.IterateSessions(func(session *ProxySession) { session.sendMessage(msg) session.NotifyDisconnected() }) } func (s *ProxyServer) sendCurrentLoad(session *ProxySession) { msg := &signaling.ProxyServerMessage{ Type: "event", Event: &signaling.EventProxyServerMessage{ Type: "update-load", Load: s.load.Load(), }, } session.sendMessage(msg) } func (s *ProxyServer) sendShutdownScheduled(session *ProxySession) { msg := &signaling.ProxyServerMessage{ Type: "event", Event: &signaling.EventProxyServerMessage{ Type: "shutdown-scheduled", }, } session.sendMessage(msg) } func (s *ProxyServer) processMessage(client *ProxyClient, data []byte) { if proxyDebugMessages { log.Printf("Message: %s", string(data)) } var message signaling.ProxyClientMessage if err := message.UnmarshalJSON(data); err != nil { if session := client.GetSession(); session != nil { log.Printf("Error decoding message from client %s: %v", session.PublicId(), err) } else { log.Printf("Error decoding message from %s: %v", client.RemoteAddr(), err) } client.SendError(signaling.InvalidFormat) return } if err := message.CheckValid(); err != nil { if session := client.GetSession(); session != nil { log.Printf("Invalid message %+v from client %s: %v", message, session.PublicId(), err) } else { log.Printf("Invalid message %+v from %s: %v", message, client.RemoteAddr(), err) } client.SendMessage(message.NewErrorServerMessage(signaling.InvalidFormat)) return } session := client.GetSession() if session == nil { if message.Type != "hello" { client.SendMessage(message.NewErrorServerMessage(signaling.HelloExpected)) return } var session *ProxySession if resumeId := message.Hello.ResumeId; resumeId != "" { var data signaling.SessionIdData if s.cookie.Decode("session", resumeId, &data) == nil { session = s.GetSession(data.Sid) } if session == nil || resumeId != session.PublicId() { client.SendMessage(message.NewErrorServerMessage(signaling.NoSuchSession)) return } log.Printf("Resumed session %s", session.PublicId()) session.MarkUsed() if s.shutdownScheduled.Load() { s.sendShutdownScheduled(session) } else { s.sendCurrentLoad(session) } statsSessionsResumedTotal.Inc() } else { var err error if session, err = s.NewSession(message.Hello); err != nil { if e, ok := err.(*signaling.Error); ok { client.SendMessage(message.NewErrorServerMessage(e)) } else { client.SendMessage(message.NewWrappedErrorServerMessage(err)) } return } } prev := session.SetClient(client) if prev != nil { msg := &signaling.ProxyServerMessage{ Type: "bye", Bye: &signaling.ByeProxyServerMessage{ Reason: "session_resumed", }, } prev.SendMessage(msg) } response := &signaling.ProxyServerMessage{ Id: message.Id, Type: "hello", Hello: &signaling.HelloProxyServerMessage{ Version: signaling.HelloVersionV1, SessionId: session.PublicId(), Server: &signaling.WelcomeServerMessage{ Version: s.version, Country: s.country, }, }, } client.SendMessage(response) if s.shutdownScheduled.Load() { s.sendShutdownScheduled(session) } else { s.sendCurrentLoad(session) } return } ctx := context.WithValue(context.Background(), ContextKeySession, session) session.MarkUsed() switch message.Type { case "command": s.processCommand(ctx, client, session, &message) case "payload": s.processPayload(ctx, client, session, &message) default: session.sendMessage(message.NewErrorServerMessage(UnsupportedMessage)) } } type emptyInitiator struct{} func (i *emptyInitiator) Country() string { return "" } func (s *ProxyServer) processCommand(ctx context.Context, client *ProxyClient, session *ProxySession, message *signaling.ProxyClientMessage) { cmd := message.Command statsCommandMessagesTotal.WithLabelValues(cmd.Type).Inc() switch cmd.Type { case "create-publisher": if s.shutdownScheduled.Load() { session.sendMessage(message.NewErrorServerMessage(ShutdownScheduled)) return } id := uuid.New().String() publisher, err := s.mcu.NewPublisher(ctx, session, id, cmd.Sid, cmd.StreamType, cmd.Bitrate, cmd.MediaTypes, &emptyInitiator{}) if err == context.DeadlineExceeded { log.Printf("Timeout while creating %s publisher %s for %s", cmd.StreamType, id, session.PublicId()) session.sendMessage(message.NewErrorServerMessage(TimeoutCreatingPublisher)) return } else if err != nil { log.Printf("Error while creating %s publisher %s for %s: %s", cmd.StreamType, id, session.PublicId(), err) session.sendMessage(message.NewWrappedErrorServerMessage(err)) return } log.Printf("Created %s publisher %s as %s for %s", cmd.StreamType, publisher.Id(), id, session.PublicId()) session.StorePublisher(ctx, id, publisher) s.StoreClient(id, publisher) response := &signaling.ProxyServerMessage{ Id: message.Id, Type: "command", Command: &signaling.CommandProxyServerMessage{ Id: id, Bitrate: int(publisher.MaxBitrate()), }, } session.sendMessage(response) statsPublishersCurrent.WithLabelValues(string(cmd.StreamType)).Inc() statsPublishersTotal.WithLabelValues(string(cmd.StreamType)).Inc() case "create-subscriber": id := uuid.New().String() publisherId := cmd.PublisherId subscriber, err := s.mcu.NewSubscriber(ctx, session, publisherId, cmd.StreamType) if err == context.DeadlineExceeded { log.Printf("Timeout while creating %s subscriber on %s for %s", cmd.StreamType, publisherId, session.PublicId()) session.sendMessage(message.NewErrorServerMessage(TimeoutCreatingSubscriber)) return } else if err != nil { log.Printf("Error while creating %s subscriber on %s for %s: %s", cmd.StreamType, publisherId, session.PublicId(), err) session.sendMessage(message.NewWrappedErrorServerMessage(err)) return } log.Printf("Created %s subscriber %s as %s for %s", cmd.StreamType, subscriber.Id(), id, session.PublicId()) session.StoreSubscriber(ctx, id, subscriber) s.StoreClient(id, subscriber) response := &signaling.ProxyServerMessage{ Id: message.Id, Type: "command", Command: &signaling.CommandProxyServerMessage{ Id: id, Sid: subscriber.Sid(), }, } session.sendMessage(response) statsSubscribersCurrent.WithLabelValues(string(cmd.StreamType)).Inc() statsSubscribersTotal.WithLabelValues(string(cmd.StreamType)).Inc() case "delete-publisher": client := s.GetClient(cmd.ClientId) if client == nil { session.sendMessage(message.NewErrorServerMessage(UnknownClient)) return } publisher, ok := client.(signaling.McuPublisher) if !ok { session.sendMessage(message.NewErrorServerMessage(UnknownClient)) return } if session.DeletePublisher(publisher) == "" { session.sendMessage(message.NewErrorServerMessage(UnknownClient)) return } if s.DeleteClient(cmd.ClientId, client) { statsPublishersCurrent.WithLabelValues(string(client.StreamType())).Dec() } go func() { log.Printf("Closing %s publisher %s as %s", client.StreamType(), client.Id(), cmd.ClientId) client.Close(context.Background()) }() response := &signaling.ProxyServerMessage{ Id: message.Id, Type: "command", Command: &signaling.CommandProxyServerMessage{ Id: cmd.ClientId, }, } session.sendMessage(response) case "delete-subscriber": client := s.GetClient(cmd.ClientId) if client == nil { session.sendMessage(message.NewErrorServerMessage(UnknownClient)) return } subscriber, ok := client.(signaling.McuSubscriber) if !ok { session.sendMessage(message.NewErrorServerMessage(UnknownClient)) return } if session.DeleteSubscriber(subscriber) == "" { session.sendMessage(message.NewErrorServerMessage(UnknownClient)) return } if s.DeleteClient(cmd.ClientId, client) { statsSubscribersCurrent.WithLabelValues(string(client.StreamType())).Dec() } go func() { log.Printf("Closing %s subscriber %s as %s", client.StreamType(), client.Id(), cmd.ClientId) client.Close(context.Background()) }() response := &signaling.ProxyServerMessage{ Id: message.Id, Type: "command", Command: &signaling.CommandProxyServerMessage{ Id: cmd.ClientId, }, } session.sendMessage(response) default: log.Printf("Unsupported command %+v", message.Command) session.sendMessage(message.NewErrorServerMessage(UnsupportedCommand)) } } func (s *ProxyServer) processPayload(ctx context.Context, client *ProxyClient, session *ProxySession, message *signaling.ProxyClientMessage) { payload := message.Payload mcuClient := s.GetClient(payload.ClientId) if mcuClient == nil { session.sendMessage(message.NewErrorServerMessage(UnknownClient)) return } statsPayloadMessagesTotal.WithLabelValues(payload.Type).Inc() var mcuData *signaling.MessageClientMessageData switch payload.Type { case "offer": fallthrough case "answer": fallthrough case "selectStream": fallthrough case "candidate": mcuData = &signaling.MessageClientMessageData{ Type: payload.Type, Sid: payload.Sid, Payload: payload.Payload, } case "endOfCandidates": // Ignore but confirm, not passed along to Janus anyway. session.sendMessage(&signaling.ProxyServerMessage{ Id: message.Id, Type: "payload", Payload: &signaling.PayloadProxyServerMessage{ Type: payload.Type, ClientId: payload.ClientId, }, }) return case "requestoffer": fallthrough case "sendoffer": mcuData = &signaling.MessageClientMessageData{ Type: payload.Type, Sid: payload.Sid, } default: session.sendMessage(message.NewErrorServerMessage(UnsupportedPayload)) return } mcuClient.SendMessage(ctx, nil, mcuData, func(err error, response map[string]interface{}) { var responseMsg *signaling.ProxyServerMessage if err != nil { log.Printf("Error sending %+v to %s client %s: %s", mcuData, mcuClient.StreamType(), payload.ClientId, err) responseMsg = message.NewWrappedErrorServerMessage(err) } else { responseMsg = &signaling.ProxyServerMessage{ Id: message.Id, Type: "payload", Payload: &signaling.PayloadProxyServerMessage{ Type: payload.Type, ClientId: payload.ClientId, Payload: response, }, } } session.sendMessage(responseMsg) }) } func (s *ProxyServer) NewSession(hello *signaling.HelloProxyClientMessage) (*ProxySession, error) { if proxyDebugMessages { log.Printf("Hello: %+v", hello) } reason := "auth-failed" token, err := jwt.ParseWithClaims(hello.Token, &signaling.TokenClaims{}, func(token *jwt.Token) (interface{}, error) { // Don't forget to validate the alg is what you expect: if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { log.Printf("Unexpected signing method: %v", token.Header["alg"]) reason = "unsupported-signing-method" return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) } claims, ok := token.Claims.(*signaling.TokenClaims) if !ok { log.Printf("Unsupported claims type: %+v", token.Claims) reason = "unsupported-claims" return nil, fmt.Errorf("Unsupported claims type") } tokenKey, err := s.tokens.Get(claims.Issuer) if err != nil { log.Printf("Could not get token for %s: %s", claims.Issuer, err) reason = "missing-issuer" return nil, err } if tokenKey == nil || tokenKey.key == nil { log.Printf("Issuer %s is not supported", claims.Issuer) reason = "unsupported-issuer" return nil, fmt.Errorf("No key found for issuer") } return tokenKey.key, nil }) if err, ok := err.(*jwt.ValidationError); ok { if err.Errors&jwt.ValidationErrorIssuedAt == jwt.ValidationErrorIssuedAt { statsTokenErrorsTotal.WithLabelValues("not-valid-yet").Inc() return nil, TokenNotValidYet } } if err != nil { statsTokenErrorsTotal.WithLabelValues(reason).Inc() return nil, TokenAuthFailed } claims, ok := token.Claims.(*signaling.TokenClaims) if !ok || !token.Valid { statsTokenErrorsTotal.WithLabelValues("auth-failed").Inc() return nil, TokenAuthFailed } minIssuedAt := time.Now().Add(-maxTokenAge) if issuedAt := claims.IssuedAt; issuedAt != nil && issuedAt.Before(minIssuedAt) { statsTokenErrorsTotal.WithLabelValues("expired").Inc() return nil, TokenExpired } sid := s.sid.Add(1) for sid == 0 { sid = s.sid.Add(1) } sessionIdData := &signaling.SessionIdData{ Sid: sid, Created: time.Now(), } encoded, err := s.cookie.Encode("session", sessionIdData) if err != nil { return nil, err } log.Printf("Created session %s for %+v", encoded, claims) session := NewProxySession(s, sid, encoded) s.StoreSession(sid, session) statsSessionsCurrent.Inc() statsSessionsTotal.Inc() return session, nil } func (s *ProxyServer) StoreSession(id uint64, session *ProxySession) { s.sessionsLock.Lock() defer s.sessionsLock.Unlock() s.sessions[id] = session } func (s *ProxyServer) GetSession(id uint64) *ProxySession { s.sessionsLock.RLock() defer s.sessionsLock.RUnlock() return s.sessions[id] } func (s *ProxyServer) GetSessionsCount() int64 { s.sessionsLock.RLock() defer s.sessionsLock.RUnlock() return int64(len(s.sessions)) } func (s *ProxyServer) IterateSessions(f func(*ProxySession)) { s.sessionsLock.RLock() defer s.sessionsLock.RUnlock() for _, session := range s.sessions { f(session) } } func (s *ProxyServer) DeleteSession(id uint64) { s.sessionsLock.Lock() defer s.sessionsLock.Unlock() s.deleteSessionLocked(id) } func (s *ProxyServer) deleteSessionLocked(id uint64) { if session, found := s.sessions[id]; found { delete(s.sessions, id) session.Close() statsSessionsCurrent.Dec() } } func (s *ProxyServer) StoreClient(id string, client signaling.McuClient) { s.clientsLock.Lock() defer s.clientsLock.Unlock() s.clients[id] = client s.clientIds[client.Id()] = id } func (s *ProxyServer) DeleteClient(id string, client signaling.McuClient) bool { s.clientsLock.Lock() defer s.clientsLock.Unlock() if _, found := s.clients[id]; !found { return false } delete(s.clients, id) delete(s.clientIds, client.Id()) if len(s.clients) == 0 && s.shutdownScheduled.Load() { go close(s.shutdownChannel) } return true } func (s *ProxyServer) HasClients() bool { s.clientsLock.RLock() defer s.clientsLock.RUnlock() return len(s.clients) > 0 } func (s *ProxyServer) GetClientsLoad() int64 { s.clientsLock.RLock() defer s.clientsLock.RUnlock() var load int64 for _, c := range s.clients { load += int64(c.MaxBitrate()) } return load / 1024 } func (s *ProxyServer) GetClient(id string) signaling.McuClient { s.clientsLock.RLock() defer s.clientsLock.RUnlock() return s.clients[id] } func (s *ProxyServer) GetClientId(client signaling.McuClient) string { s.clientsLock.RLock() defer s.clientsLock.RUnlock() return s.clientIds[client.Id()] } func (s *ProxyServer) getStats() map[string]interface{} { result := map[string]interface{}{ "sessions": s.GetSessionsCount(), "load": s.load.Load(), "mcu": s.mcu.GetStats(), } return result } func (s *ProxyServer) allowStatsAccess(r *http.Request) bool { addr := getRealUserIP(r) if strings.Contains(addr, ":") { if host, _, err := net.SplitHostPort(addr); err == nil { addr = host } } ip := net.ParseIP(addr) if ip == nil { return false } return s.statsAllowedIps.Allowed(ip) } func (s *ProxyServer) validateStatsRequest(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { if !s.allowStatsAccess(r) { http.Error(w, "Authentication check failed", http.StatusForbidden) return } f(w, r) } } func (s *ProxyServer) statsHandler(w http.ResponseWriter, r *http.Request) { stats := s.getStats() statsData, err := json.MarshalIndent(stats, "", " ") if err != nil { log.Printf("Could not serialize stats %+v: %s", stats, err) http.Error(w, "Internal server error", http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Header().Set("X-Content-Type-Options", "nosniff") w.WriteHeader(http.StatusOK) w.Write(statsData) // nolint } func (s *ProxyServer) metricsHandler(w http.ResponseWriter, r *http.Request) { // Expose prometheus metrics at "/metrics". promhttp.Handler().ServeHTTP(w, r) } nextcloud-spreed-signaling-1.2.4/proxy/proxy_server_test.go000066400000000000000000000064031460321600400242710ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package main import ( "crypto/rand" "crypto/rsa" "crypto/x509" "encoding/pem" "os" "testing" "time" "github.com/dlintw/goconf" "github.com/golang-jwt/jwt/v4" "github.com/gorilla/mux" signaling "github.com/strukturag/nextcloud-spreed-signaling" ) const ( KeypairSizeForTest = 2048 TokenIdForTest = "foo" ) func newProxyServerForTest(t *testing.T) (*ProxyServer, *rsa.PrivateKey) { tempdir := t.TempDir() var server *ProxyServer t.Cleanup(func() { if server != nil { server.Stop() } }) r := mux.NewRouter() key, err := rsa.GenerateKey(rand.Reader, KeypairSizeForTest) if err != nil { t.Fatalf("could not generate key: %s", err) } priv := &pem.Block{ Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key), } privkey, err := os.CreateTemp(tempdir, "privkey*.pem") if err != nil { t.Fatalf("could not create temporary file for private key: %s", err) } if err := pem.Encode(privkey, priv); err != nil { t.Fatalf("could not encode private key: %s", err) } pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) if err != nil { t.Fatalf("could not marshal public key: %s", err) } pub := &pem.Block{ Type: "RSA PUBLIC KEY", Bytes: pubData, } pubkey, err := os.CreateTemp(tempdir, "pubkey*.pem") if err != nil { t.Fatalf("could not create temporary file for public key: %s", err) } if err := pem.Encode(pubkey, pub); err != nil { t.Fatalf("could not encode public key: %s", err) } config := goconf.NewConfigFile() config.AddOption("tokens", TokenIdForTest, pubkey.Name()) if server, err = NewProxyServer(r, "0.0", config); err != nil { t.Fatalf("could not create server: %s", err) } return server, key } func TestTokenInFuture(t *testing.T) { server, key := newProxyServerForTest(t) claims := &signaling.TokenClaims{ RegisteredClaims: jwt.RegisteredClaims{ IssuedAt: jwt.NewNumericDate(time.Now().Add(time.Hour)), Issuer: TokenIdForTest, }, } token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims) tokenString, err := token.SignedString(key) if err != nil { t.Fatalf("could not create token: %s", err) } hello := &signaling.HelloProxyClientMessage{ Version: "1.0", Token: tokenString, } session, err := server.NewSession(hello) if session != nil { defer session.Close() t.Errorf("should not have created session") } else if err != TokenNotValidYet { t.Errorf("could have failed with TokenNotValidYet, got %s", err) } } nextcloud-spreed-signaling-1.2.4/proxy/proxy_session.go000066400000000000000000000202741460321600400234110ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2020 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package main import ( "context" "log" "sync" "sync/atomic" "time" signaling "github.com/strukturag/nextcloud-spreed-signaling" ) const ( // Sessions expire if they have not been used for one minute. sessionExpirationTime = time.Minute ) type ProxySession struct { proxy *ProxyServer id string sid uint64 lastUsed atomic.Int64 clientLock sync.Mutex client *ProxyClient pendingMessages []*signaling.ProxyServerMessage publishersLock sync.Mutex publishers map[string]signaling.McuPublisher publisherIds map[signaling.McuPublisher]string subscribersLock sync.Mutex subscribers map[string]signaling.McuSubscriber subscriberIds map[signaling.McuSubscriber]string } func NewProxySession(proxy *ProxyServer, sid uint64, id string) *ProxySession { result := &ProxySession{ proxy: proxy, id: id, sid: sid, publishers: make(map[string]signaling.McuPublisher), publisherIds: make(map[signaling.McuPublisher]string), subscribers: make(map[string]signaling.McuSubscriber), subscriberIds: make(map[signaling.McuSubscriber]string), } result.MarkUsed() return result } func (s *ProxySession) PublicId() string { return s.id } func (s *ProxySession) Sid() uint64 { return s.sid } func (s *ProxySession) LastUsed() time.Time { lastUsed := s.lastUsed.Load() return time.Unix(0, lastUsed) } func (s *ProxySession) IsExpired() bool { expiresAt := s.LastUsed().Add(sessionExpirationTime) return expiresAt.Before(time.Now()) } func (s *ProxySession) MarkUsed() { now := time.Now() s.lastUsed.Store(now.UnixNano()) } func (s *ProxySession) Close() { s.clearPublishers() s.clearSubscribers() } func (s *ProxySession) SetClient(client *ProxyClient) *ProxyClient { s.clientLock.Lock() prev := s.client s.client = client var messages []*signaling.ProxyServerMessage if client != nil { messages, s.pendingMessages = s.pendingMessages, nil } s.clientLock.Unlock() if prev != nil { prev.SetSession(nil) } if client != nil { s.MarkUsed() client.SetSession(s) for _, msg := range messages { client.SendMessage(msg) } } return prev } func (s *ProxySession) OnUpdateOffer(client signaling.McuClient, offer map[string]interface{}) { id := s.proxy.GetClientId(client) if id == "" { log.Printf("Received offer %+v from unknown %s client %s (%+v)", offer, client.StreamType(), client.Id(), client) return } msg := &signaling.ProxyServerMessage{ Type: "payload", Payload: &signaling.PayloadProxyServerMessage{ Type: "offer", ClientId: id, Payload: map[string]interface{}{ "offer": offer, }, }, } s.sendMessage(msg) } func (s *ProxySession) OnIceCandidate(client signaling.McuClient, candidate interface{}) { id := s.proxy.GetClientId(client) if id == "" { log.Printf("Received candidate %+v from unknown %s client %s (%+v)", candidate, client.StreamType(), client.Id(), client) return } msg := &signaling.ProxyServerMessage{ Type: "payload", Payload: &signaling.PayloadProxyServerMessage{ Type: "candidate", ClientId: id, Payload: map[string]interface{}{ "candidate": candidate, }, }, } s.sendMessage(msg) } func (s *ProxySession) sendMessage(message *signaling.ProxyServerMessage) { var client *ProxyClient s.clientLock.Lock() client = s.client if client == nil { s.pendingMessages = append(s.pendingMessages, message) } s.clientLock.Unlock() if client != nil { client.SendMessage(message) } } func (s *ProxySession) OnIceCompleted(client signaling.McuClient) { id := s.proxy.GetClientId(client) if id == "" { log.Printf("Received ice completed event from unknown %s client %s (%+v)", client.StreamType(), client.Id(), client) return } msg := &signaling.ProxyServerMessage{ Type: "event", Event: &signaling.EventProxyServerMessage{ Type: "ice-completed", ClientId: id, }, } s.sendMessage(msg) } func (s *ProxySession) SubscriberSidUpdated(subscriber signaling.McuSubscriber) { id := s.proxy.GetClientId(subscriber) if id == "" { log.Printf("Received subscriber sid updated event from unknown %s subscriber %s (%+v)", subscriber.StreamType(), subscriber.Id(), subscriber) return } msg := &signaling.ProxyServerMessage{ Type: "event", Event: &signaling.EventProxyServerMessage{ Type: "subscriber-sid-updated", ClientId: id, Sid: subscriber.Sid(), }, } s.sendMessage(msg) } func (s *ProxySession) PublisherClosed(publisher signaling.McuPublisher) { if id := s.DeletePublisher(publisher); id != "" { if s.proxy.DeleteClient(id, publisher) { statsPublishersCurrent.WithLabelValues(string(publisher.StreamType())).Dec() } msg := &signaling.ProxyServerMessage{ Type: "event", Event: &signaling.EventProxyServerMessage{ Type: "publisher-closed", ClientId: id, }, } s.sendMessage(msg) } } func (s *ProxySession) SubscriberClosed(subscriber signaling.McuSubscriber) { if id := s.DeleteSubscriber(subscriber); id != "" { if s.proxy.DeleteClient(id, subscriber) { statsSubscribersCurrent.WithLabelValues(string(subscriber.StreamType())).Dec() } msg := &signaling.ProxyServerMessage{ Type: "event", Event: &signaling.EventProxyServerMessage{ Type: "subscriber-closed", ClientId: id, }, } s.sendMessage(msg) } } func (s *ProxySession) StorePublisher(ctx context.Context, id string, publisher signaling.McuPublisher) { s.publishersLock.Lock() defer s.publishersLock.Unlock() s.publishers[id] = publisher s.publisherIds[publisher] = id } func (s *ProxySession) DeletePublisher(publisher signaling.McuPublisher) string { s.publishersLock.Lock() defer s.publishersLock.Unlock() id, found := s.publisherIds[publisher] if !found { return "" } delete(s.publishers, id) delete(s.publisherIds, publisher) return id } func (s *ProxySession) StoreSubscriber(ctx context.Context, id string, subscriber signaling.McuSubscriber) { s.subscribersLock.Lock() defer s.subscribersLock.Unlock() s.subscribers[id] = subscriber s.subscriberIds[subscriber] = id } func (s *ProxySession) DeleteSubscriber(subscriber signaling.McuSubscriber) string { s.subscribersLock.Lock() defer s.subscribersLock.Unlock() id, found := s.subscriberIds[subscriber] if !found { return "" } delete(s.subscribers, id) delete(s.subscriberIds, subscriber) return id } func (s *ProxySession) clearPublishers() { s.publishersLock.Lock() defer s.publishersLock.Unlock() go func(publishers map[string]signaling.McuPublisher) { for id, publisher := range publishers { if s.proxy.DeleteClient(id, publisher) { statsPublishersCurrent.WithLabelValues(string(publisher.StreamType())).Dec() } publisher.Close(context.Background()) } }(s.publishers) s.publishers = make(map[string]signaling.McuPublisher) s.publisherIds = make(map[signaling.McuPublisher]string) } func (s *ProxySession) clearSubscribers() { s.publishersLock.Lock() defer s.publishersLock.Unlock() go func(subscribers map[string]signaling.McuSubscriber) { for id, subscriber := range subscribers { if s.proxy.DeleteClient(id, subscriber) { statsSubscribersCurrent.WithLabelValues(string(subscriber.StreamType())).Dec() } subscriber.Close(context.Background()) } }(s.subscribers) s.subscribers = make(map[string]signaling.McuSubscriber) s.subscriberIds = make(map[signaling.McuSubscriber]string) } func (s *ProxySession) NotifyDisconnected() { s.clearPublishers() s.clearSubscribers() } nextcloud-spreed-signaling-1.2.4/proxy/proxy_stats_prometheus.go000066400000000000000000000071541460321600400253410ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package main import ( "github.com/prometheus/client_golang/prometheus" ) var ( statsSessionsCurrent = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "signaling", Subsystem: "proxy", Name: "sessions", Help: "The current number of sessions", }) statsSessionsTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "proxy", Name: "sessions_total", Help: "The total number of created sessions", }) statsSessionsResumedTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "proxy", Name: "sessions_resumed_total", Help: "The total number of resumed sessions", }) statsPublishersCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "signaling", Subsystem: "proxy", Name: "publishers", Help: "The current number of publishers", }, []string{"type"}) statsPublishersTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "proxy", Name: "publishers_total", Help: "The total number of created publishers", }, []string{"type"}) statsSubscribersCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "signaling", Subsystem: "proxy", Name: "subscribers", Help: "The current number of subscribers", }, []string{"type"}) statsSubscribersTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "proxy", Name: "subscribers_total", Help: "The total number of created subscribers", }, []string{"type"}) statsCommandMessagesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "proxy", Name: "command_messages_total", Help: "The total number of command messages", }, []string{"type"}) statsPayloadMessagesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "proxy", Name: "payload_messages_total", Help: "The total number of payload messages", }, []string{"type"}) statsTokenErrorsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "proxy", Name: "token_errors_total", Help: "The total number of token errors", }, []string{"reason"}) ) func init() { prometheus.MustRegister(statsSessionsCurrent) prometheus.MustRegister(statsSessionsTotal) prometheus.MustRegister(statsSessionsResumedTotal) prometheus.MustRegister(statsPublishersCurrent) prometheus.MustRegister(statsPublishersTotal) prometheus.MustRegister(statsSubscribersCurrent) prometheus.MustRegister(statsSubscribersTotal) prometheus.MustRegister(statsCommandMessagesTotal) prometheus.MustRegister(statsPayloadMessagesTotal) prometheus.MustRegister(statsTokenErrorsTotal) } nextcloud-spreed-signaling-1.2.4/proxy/proxy_tokens.go000066400000000000000000000023041460321600400232230ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2020 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package main import ( "crypto/rsa" "github.com/dlintw/goconf" ) const ( TokenTypeEtcd = "etcd" TokenTypeStatic = "static" TokenTypeDefault = TokenTypeStatic ) type ProxyToken struct { id string key *rsa.PublicKey } type ProxyTokens interface { Get(id string) (*ProxyToken, error) Reload(config *goconf.ConfigFile) Close() } nextcloud-spreed-signaling-1.2.4/proxy/proxy_tokens_etcd.go000066400000000000000000000076421460321600400242340ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2020 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package main import ( "bytes" "context" "fmt" "log" "strings" "sync/atomic" "time" "github.com/dlintw/goconf" "github.com/golang-jwt/jwt/v4" signaling "github.com/strukturag/nextcloud-spreed-signaling" ) const ( tokenCacheSize = 4096 ) type tokenCacheEntry struct { keyValue []byte token *ProxyToken } type tokensEtcd struct { client *signaling.EtcdClient tokenFormats atomic.Value tokenCache *signaling.LruCache } func NewProxyTokensEtcd(config *goconf.ConfigFile) (ProxyTokens, error) { client, err := signaling.NewEtcdClient(config, "tokens") if err != nil { return nil, err } if !client.IsConfigured() { return nil, fmt.Errorf("No etcd endpoints configured") } result := &tokensEtcd{ client: client, tokenCache: signaling.NewLruCache(tokenCacheSize), } if err := result.load(config, false); err != nil { return nil, err } return result, nil } func (t *tokensEtcd) getKeys(id string) []string { format := t.tokenFormats.Load().([]string) var result []string for _, f := range format { result = append(result, fmt.Sprintf(f, id)) } return result } func (t *tokensEtcd) getByKey(id string, key string) (*ProxyToken, error) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() resp, err := t.client.Get(ctx, key) if err != nil { return nil, err } if len(resp.Kvs) == 0 { return nil, nil } else if len(resp.Kvs) > 1 { log.Printf("Received multiple keys for %s, using last", key) } keyValue := resp.Kvs[len(resp.Kvs)-1].Value cached, _ := t.tokenCache.Get(key).(*tokenCacheEntry) if cached == nil || !bytes.Equal(cached.keyValue, keyValue) { // Parsed public keys are cached to avoid the parse overhead. publicKey, err := jwt.ParseRSAPublicKeyFromPEM(keyValue) if err != nil { return nil, err } cached = &tokenCacheEntry{ keyValue: keyValue, token: &ProxyToken{ id: id, key: publicKey, }, } t.tokenCache.Set(key, cached) } return cached.token, nil } func (t *tokensEtcd) Get(id string) (*ProxyToken, error) { for _, k := range t.getKeys(id) { token, err := t.getByKey(id, k) if err != nil { log.Printf("Could not get public key from %s for %s: %s", k, id, err) continue } else if token == nil { continue } return token, nil } return nil, nil } func (t *tokensEtcd) load(config *goconf.ConfigFile, ignoreErrors bool) error { tokenFormat, _ := config.GetString("tokens", "keyformat") formats := strings.Split(tokenFormat, ",") var tokenFormats []string for _, f := range formats { f = strings.TrimSpace(f) if f != "" { tokenFormats = append(tokenFormats, f) } } if len(tokenFormats) == 0 { tokenFormats = []string{"/%s"} } t.tokenFormats.Store(tokenFormats) log.Printf("Using %v as token formats", tokenFormats) return nil } func (t *tokensEtcd) Reload(config *goconf.ConfigFile) { if err := t.load(config, true); err != nil { log.Printf("Error reloading etcd tokens: %s", err) } } func (t *tokensEtcd) Close() { if err := t.client.Close(); err != nil { log.Printf("Error while closing etcd client: %s", err) } } nextcloud-spreed-signaling-1.2.4/proxy/proxy_tokens_etcd_test.go000066400000000000000000000106171460321600400252670ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package main import ( "crypto" "crypto/rand" "crypto/rsa" "crypto/x509" "encoding/pem" "errors" "net" "net/url" "os" "runtime" "strconv" "syscall" "testing" "github.com/dlintw/goconf" "go.etcd.io/etcd/server/v3/embed" "go.etcd.io/etcd/server/v3/lease" ) var ( etcdListenUrl = "http://localhost:8080" ) func isErrorAddressAlreadyInUse(err error) bool { var eOsSyscall *os.SyscallError if !errors.As(err, &eOsSyscall) { return false } var errErrno syscall.Errno // doesn't need a "*" (ptr) because it's already a ptr (uintptr) if !errors.As(eOsSyscall, &errErrno) { return false } if errErrno == syscall.EADDRINUSE { return true } const WSAEADDRINUSE = 10048 if runtime.GOOS == "windows" && errErrno == WSAEADDRINUSE { return true } return false } func newEtcdForTesting(t *testing.T) *embed.Etcd { cfg := embed.NewConfig() cfg.Dir = t.TempDir() os.Chmod(cfg.Dir, 0700) // nolint cfg.LogLevel = "warn" u, err := url.Parse(etcdListenUrl) if err != nil { t.Fatal(err) } // Find a free port to bind the server to. var etcd *embed.Etcd for port := 50000; port < 50100; port++ { u.Host = net.JoinHostPort("localhost", strconv.Itoa(port)) cfg.ListenClientUrls = []url.URL{*u} httpListener := u httpListener.Host = net.JoinHostPort("localhost", strconv.Itoa(port+1)) cfg.ListenClientHttpUrls = []url.URL{*httpListener} peerListener := u peerListener.Host = net.JoinHostPort("localhost", strconv.Itoa(port+2)) cfg.ListenPeerUrls = []url.URL{*peerListener} etcd, err = embed.StartEtcd(cfg) if isErrorAddressAlreadyInUse(err) { continue } else if err != nil { t.Fatal(err) } break } if etcd == nil { t.Fatal("could not find free port") } t.Cleanup(func() { etcd.Close() }) // Wait for server to be ready. <-etcd.Server.ReadyNotify() return etcd } func newTokensEtcdForTesting(t *testing.T) (*tokensEtcd, *embed.Etcd) { etcd := newEtcdForTesting(t) cfg := goconf.NewConfigFile() cfg.AddOption("etcd", "endpoints", etcd.Config().ListenClientUrls[0].String()) cfg.AddOption("tokens", "keyformat", "/%s, /testing/%s/key") tokens, err := NewProxyTokensEtcd(cfg) if err != nil { t.Fatal(err) } t.Cleanup(func() { tokens.Close() }) return tokens.(*tokensEtcd), etcd } func storeKey(t *testing.T, etcd *embed.Etcd, key string, pubkey crypto.PublicKey) { var data []byte var err error switch pubkey := pubkey.(type) { case rsa.PublicKey: data, err = x509.MarshalPKIXPublicKey(&pubkey) if err != nil { t.Fatal(err) } default: t.Fatalf("unknown key type %T in %+v", pubkey, pubkey) } data = pem.EncodeToMemory(&pem.Block{ Type: "RSA PUBLIC KEY", Bytes: data, }) if kv := etcd.Server.KV(); kv != nil { kv.Put([]byte(key), data, lease.NoLease) kv.Commit() } } func generateAndSaveKey(t *testing.T, etcd *embed.Etcd, name string) *rsa.PrivateKey { key, err := rsa.GenerateKey(rand.Reader, 1024) if err != nil { t.Fatal(err) } storeKey(t, etcd, name, key.PublicKey) return key } func TestProxyTokensEtcd(t *testing.T) { tokens, etcd := newTokensEtcdForTesting(t) key1 := generateAndSaveKey(t, etcd, "/foo") key2 := generateAndSaveKey(t, etcd, "/testing/bar/key") if token, err := tokens.Get("foo"); err != nil { t.Error(err) } else if token == nil { t.Error("could not get token") } else if !key1.PublicKey.Equal(token.key) { t.Error("token keys mismatch") } if token, err := tokens.Get("bar"); err != nil { t.Error(err) } else if token == nil { t.Error("could not get token") } else if !key2.PublicKey.Equal(token.key) { t.Error("token keys mismatch") } } nextcloud-spreed-signaling-1.2.4/proxy/proxy_tokens_static.go000066400000000000000000000062611460321600400246000ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2020 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package main import ( "fmt" "log" "os" "sort" "sync/atomic" "github.com/dlintw/goconf" "github.com/golang-jwt/jwt/v4" signaling "github.com/strukturag/nextcloud-spreed-signaling" ) type tokensStatic struct { tokenKeys atomic.Value } func NewProxyTokensStatic(config *goconf.ConfigFile) (ProxyTokens, error) { result := &tokensStatic{} if err := result.load(config, false); err != nil { return nil, err } return result, nil } func (t *tokensStatic) setTokenKeys(keys map[string]*ProxyToken) { t.tokenKeys.Store(keys) } func (t *tokensStatic) getTokenKeys() map[string]*ProxyToken { return t.tokenKeys.Load().(map[string]*ProxyToken) } func (t *tokensStatic) Get(id string) (*ProxyToken, error) { tokenKeys := t.getTokenKeys() token := tokenKeys[id] return token, nil } func (t *tokensStatic) load(config *goconf.ConfigFile, ignoreErrors bool) error { options, err := signaling.GetStringOptions(config, "tokens", ignoreErrors) if err != nil { return err } tokenKeys := make(map[string]*ProxyToken) for id, filename := range options { if filename == "" { if !ignoreErrors { return fmt.Errorf("No filename given for token %s", id) } log.Printf("No filename given for token %s, ignoring", id) continue } keyData, err := os.ReadFile(filename) if err != nil { if !ignoreErrors { return fmt.Errorf("Could not read public key from %s: %s", filename, err) } log.Printf("Could not read public key from %s, ignoring: %s", filename, err) continue } key, err := jwt.ParseRSAPublicKeyFromPEM(keyData) if err != nil { if !ignoreErrors { return fmt.Errorf("Could not parse public key from %s: %s", filename, err) } log.Printf("Could not parse public key from %s, ignoring: %s", filename, err) continue } tokenKeys[id] = &ProxyToken{ id: id, key: key, } } if len(tokenKeys) == 0 { log.Printf("No token keys loaded") } else { var keyIds []string for k := range tokenKeys { keyIds = append(keyIds, k) } sort.Strings(keyIds) log.Printf("Enabled token keys: %v", keyIds) } t.setTokenKeys(tokenKeys) return nil } func (t *tokensStatic) Reload(config *goconf.ConfigFile) { if err := t.load(config, true); err != nil { log.Printf("Error reloading static tokens: %s", err) } } func (t *tokensStatic) Close() { t.setTokenKeys(map[string]*ProxyToken{}) } nextcloud-spreed-signaling-1.2.4/proxy_config.go000066400000000000000000000020071460321600400220040ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "github.com/dlintw/goconf" ) type ProxyConfig interface { Start() error Stop() Reload(config *goconf.ConfigFile) error } nextcloud-spreed-signaling-1.2.4/proxy_config_etcd.go000066400000000000000000000117701460321600400230120ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "encoding/json" "errors" "log" "sync" "time" "github.com/dlintw/goconf" clientv3 "go.etcd.io/etcd/client/v3" ) type proxyConfigEtcd struct { mu sync.Mutex proxy McuProxy client *EtcdClient keyPrefix string keyInfos map[string]*ProxyInformationEtcd urlToKey map[string]string } func NewProxyConfigEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient, proxy McuProxy) (ProxyConfig, error) { if !etcdClient.IsConfigured() { return nil, errors.New("No etcd endpoints configured") } result := &proxyConfigEtcd{ proxy: proxy, client: etcdClient, keyInfos: make(map[string]*ProxyInformationEtcd), urlToKey: make(map[string]string), } if err := result.configure(config, false); err != nil { return nil, err } return result, nil } func (p *proxyConfigEtcd) configure(config *goconf.ConfigFile, fromReload bool) error { keyPrefix, _ := config.GetString("mcu", "keyprefix") if keyPrefix == "" { keyPrefix = "/%s" } p.keyPrefix = keyPrefix return nil } func (p *proxyConfigEtcd) Start() error { p.client.AddListener(p) return nil } func (p *proxyConfigEtcd) Reload(config *goconf.ConfigFile) error { // not implemented return nil } func (p *proxyConfigEtcd) Stop() { p.client.RemoveListener(p) } func (p *proxyConfigEtcd) EtcdClientCreated(client *EtcdClient) { go func() { if err := client.Watch(context.Background(), p.keyPrefix, p, clientv3.WithPrefix()); err != nil { log.Printf("Error processing watch for %s: %s", p.keyPrefix, err) } }() go func() { if err := client.WaitForConnection(context.Background()); err != nil { panic(err) } backoff, err := NewExponentialBackoff(initialWaitDelay, maxWaitDelay) if err != nil { panic(err) } for { response, err := p.getProxyUrls(client, p.keyPrefix) if err != nil { if err == context.DeadlineExceeded { log.Printf("Timeout getting initial list of proxy URLs, retry in %s", backoff.NextWait()) } else { log.Printf("Could not get initial list of proxy URLs, retry in %s: %s", backoff.NextWait(), err) } backoff.Wait(context.Background()) continue } for _, ev := range response.Kvs { p.EtcdKeyUpdated(client, string(ev.Key), ev.Value) } return } }() } func (p *proxyConfigEtcd) EtcdWatchCreated(client *EtcdClient, key string) { } func (p *proxyConfigEtcd) getProxyUrls(client *EtcdClient, keyPrefix string) (*clientv3.GetResponse, error) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() return client.Get(ctx, keyPrefix, clientv3.WithPrefix()) } func (p *proxyConfigEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []byte) { var info ProxyInformationEtcd if err := json.Unmarshal(data, &info); err != nil { log.Printf("Could not decode proxy information %s: %s", string(data), err) return } if err := info.CheckValid(); err != nil { log.Printf("Received invalid proxy information %s: %s", string(data), err) return } p.mu.Lock() defer p.mu.Unlock() prev, found := p.keyInfos[key] if found && info.Address != prev.Address { // Address of a proxy has changed. p.removeEtcdProxyLocked(key) found = false } if otherKey, otherFound := p.urlToKey[info.Address]; otherFound && otherKey != key { log.Printf("Address %s is already registered for key %s, ignoring %s", info.Address, otherKey, key) return } if found { p.keyInfos[key] = &info p.proxy.KeepConnection(info.Address) } else { if err := p.proxy.AddConnection(false, info.Address); err != nil { log.Printf("Could not create proxy connection to %s: %s", info.Address, err) return } log.Printf("Added new connection to %s (from %s)", info.Address, key) p.keyInfos[key] = &info p.urlToKey[info.Address] = key } } func (p *proxyConfigEtcd) EtcdKeyDeleted(client *EtcdClient, key string) { p.mu.Lock() defer p.mu.Unlock() p.removeEtcdProxyLocked(key) } func (p *proxyConfigEtcd) removeEtcdProxyLocked(key string) { info, found := p.keyInfos[key] if !found { return } delete(p.keyInfos, key) delete(p.urlToKey, info.Address) log.Printf("Removing connection to %s (from %s)", info.Address, key) p.proxy.RemoveConnection(info.Address) } nextcloud-spreed-signaling-1.2.4/proxy_config_etcd_test.go000066400000000000000000000056511460321600400240520ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "encoding/json" "testing" "time" "github.com/dlintw/goconf" "go.etcd.io/etcd/server/v3/embed" ) type TestProxyInformationEtcd struct { Address string `json:"address"` OtherData string `json:"otherdata,omitempty"` } func newProxyConfigEtcd(t *testing.T, proxy McuProxy) (*embed.Etcd, ProxyConfig) { t.Helper() etcd, client := NewEtcdClientForTest(t) cfg := goconf.NewConfigFile() cfg.AddOption("mcu", "keyprefix", "proxies/") p, err := NewProxyConfigEtcd(cfg, client, proxy) if err != nil { t.Fatal(err) } t.Cleanup(func() { p.Stop() }) return etcd, p } func SetEtcdProxy(t *testing.T, etcd *embed.Etcd, path string, proxy *TestProxyInformationEtcd) { t.Helper() data, err := json.Marshal(proxy) if err != nil { t.Fatal(err) } SetEtcdValue(etcd, path, data) } func TestProxyConfigEtcd(t *testing.T) { proxy := newMcuProxyForConfig(t) etcd, config := newProxyConfigEtcd(t, proxy) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() SetEtcdProxy(t, etcd, "proxies/a", &TestProxyInformationEtcd{ Address: "https://foo/", }) proxy.Expect("add", "https://foo/") if err := config.Start(); err != nil { t.Fatal(err) } proxy.WaitForEvents(ctx) proxy.Expect("add", "https://bar/") SetEtcdProxy(t, etcd, "proxies/b", &TestProxyInformationEtcd{ Address: "https://bar/", }) proxy.WaitForEvents(ctx) proxy.Expect("keep", "https://bar/") SetEtcdProxy(t, etcd, "proxies/b", &TestProxyInformationEtcd{ Address: "https://bar/", OtherData: "ignore-me", }) proxy.WaitForEvents(ctx) proxy.Expect("remove", "https://foo/") DeleteEtcdValue(etcd, "proxies/a") proxy.WaitForEvents(ctx) proxy.Expect("remove", "https://bar/") proxy.Expect("add", "https://baz/") SetEtcdProxy(t, etcd, "proxies/b", &TestProxyInformationEtcd{ Address: "https://baz/", }) proxy.WaitForEvents(ctx) // Adding the same hostname multiple times should not trigger an event. SetEtcdProxy(t, etcd, "proxies/c", &TestProxyInformationEtcd{ Address: "https://baz/", }) time.Sleep(100 * time.Millisecond) } nextcloud-spreed-signaling-1.2.4/proxy_config_static.go000066400000000000000000000111031460321600400233500ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "errors" "log" "net" "net/url" "strings" "sync" "github.com/dlintw/goconf" ) type ipList struct { hostname string entry *DnsMonitorEntry ips []net.IP } type proxyConfigStatic struct { mu sync.Mutex proxy McuProxy dnsMonitor *DnsMonitor dnsDiscovery bool connectionsMap map[string]*ipList } func NewProxyConfigStatic(config *goconf.ConfigFile, proxy McuProxy, dnsMonitor *DnsMonitor) (ProxyConfig, error) { result := &proxyConfigStatic{ proxy: proxy, dnsMonitor: dnsMonitor, connectionsMap: make(map[string]*ipList), } if err := result.configure(config, false); err != nil { return nil, err } if len(result.connectionsMap) == 0 { return nil, errors.New("No MCU proxy connections configured") } return result, nil } func (p *proxyConfigStatic) configure(config *goconf.ConfigFile, fromReload bool) error { p.mu.Lock() defer p.mu.Unlock() dnsDiscovery, _ := config.GetBool("mcu", "dnsdiscovery") if dnsDiscovery != p.dnsDiscovery { if !dnsDiscovery { for _, ips := range p.connectionsMap { if ips.entry != nil { p.dnsMonitor.Remove(ips.entry) ips.entry = nil } } } p.dnsDiscovery = dnsDiscovery } remove := make(map[string]*ipList) for u, ips := range p.connectionsMap { remove[u] = ips } mcuUrl, _ := config.GetString("mcu", "url") for _, u := range strings.Split(mcuUrl, " ") { u = strings.TrimSpace(u) if u == "" { continue } if existing, found := remove[u]; found { // Proxy connection still exists in new configuration delete(remove, u) p.proxy.KeepConnection(u, existing.ips...) continue } parsed, err := url.Parse(u) if err != nil { if !fromReload { return err } log.Printf("Could not parse URL %s: %s", u, err) continue } if host, _, err := net.SplitHostPort(parsed.Host); err == nil { parsed.Host = host } if dnsDiscovery { p.connectionsMap[u] = &ipList{ hostname: parsed.Host, } continue } if fromReload { if err := p.proxy.AddConnection(fromReload, u); err != nil { if !fromReload { return err } log.Printf("Could not create proxy connection to %s: %s", u, err) continue } } p.connectionsMap[u] = &ipList{ hostname: parsed.Host, } } for u, entry := range remove { p.proxy.RemoveConnection(u, entry.ips...) delete(p.connectionsMap, u) } return nil } func (p *proxyConfigStatic) Start() error { p.mu.Lock() defer p.mu.Unlock() if p.dnsDiscovery { for u, ips := range p.connectionsMap { if ips.entry != nil { continue } entry, err := p.dnsMonitor.Add(u, p.onLookup) if err != nil { return err } ips.entry = entry } } else { for u, ipList := range p.connectionsMap { if err := p.proxy.AddConnection(false, u, ipList.ips...); err != nil { return err } } } return nil } func (p *proxyConfigStatic) Stop() { p.mu.Lock() defer p.mu.Unlock() if p.dnsDiscovery { for _, ips := range p.connectionsMap { if ips.entry == nil { continue } p.dnsMonitor.Remove(ips.entry) ips.entry = nil } } } func (p *proxyConfigStatic) Reload(config *goconf.ConfigFile) error { return p.configure(config, true) } func (p *proxyConfigStatic) onLookup(entry *DnsMonitorEntry, all []net.IP, added []net.IP, keep []net.IP, removed []net.IP) { p.mu.Lock() defer p.mu.Unlock() u := entry.URL() for _, ip := range keep { p.proxy.KeepConnection(u, ip) } if len(added) > 0 { if err := p.proxy.AddConnection(true, u, added...); err != nil { log.Printf("Could not add proxy connection to %s with %+v: %s", u, added, err) } } if len(removed) > 0 { p.proxy.RemoveConnection(u, removed...) } if ipList, found := p.connectionsMap[u]; found { ipList.ips = all } } nextcloud-spreed-signaling-1.2.4/proxy_config_static_test.go000066400000000000000000000064261460321600400244230ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "net" "strings" "testing" "time" "github.com/dlintw/goconf" ) func newProxyConfigStatic(t *testing.T, proxy McuProxy, dns bool, urls ...string) (ProxyConfig, *DnsMonitor) { cfg := goconf.NewConfigFile() cfg.AddOption("mcu", "url", strings.Join(urls, " ")) if dns { cfg.AddOption("mcu", "dnsdiscovery", "true") } dnsMonitor := newDnsMonitorForTest(t, time.Hour) // will be updated manually p, err := NewProxyConfigStatic(cfg, proxy, dnsMonitor) if err != nil { t.Fatal(err) } t.Cleanup(func() { p.Stop() }) return p, dnsMonitor } func updateProxyConfigStatic(t *testing.T, config ProxyConfig, dns bool, urls ...string) { cfg := goconf.NewConfigFile() cfg.AddOption("mcu", "url", strings.Join(urls, " ")) if dns { cfg.AddOption("mcu", "dnsdiscovery", "true") } if err := config.Reload(cfg); err != nil { t.Fatal(err) } } func TestProxyConfigStaticSimple(t *testing.T) { proxy := newMcuProxyForConfig(t) config, _ := newProxyConfigStatic(t, proxy, false, "https://foo/") proxy.Expect("add", "https://foo/") if err := config.Start(); err != nil { t.Fatal(err) } proxy.Expect("keep", "https://foo/") proxy.Expect("add", "https://bar/") updateProxyConfigStatic(t, config, false, "https://foo/", "https://bar/") proxy.Expect("keep", "https://bar/") proxy.Expect("add", "https://baz/") proxy.Expect("remove", "https://foo/") updateProxyConfigStatic(t, config, false, "https://bar/", "https://baz/") } func TestProxyConfigStaticDNS(t *testing.T) { lookup := newMockDnsLookupForTest(t) proxy := newMcuProxyForConfig(t) config, dnsMonitor := newProxyConfigStatic(t, proxy, true, "https://foo/") if err := config.Start(); err != nil { t.Fatal(err) } time.Sleep(time.Millisecond) lookup.Set("foo", []net.IP{ net.ParseIP("192.168.0.1"), net.ParseIP("10.1.2.3"), }) proxy.Expect("add", "https://foo/", lookup.Get("foo")...) dnsMonitor.checkHostnames() lookup.Set("foo", []net.IP{ net.ParseIP("192.168.0.1"), net.ParseIP("192.168.1.1"), net.ParseIP("192.168.1.2"), }) proxy.Expect("keep", "https://foo/", net.ParseIP("192.168.0.1")) proxy.Expect("add", "https://foo/", net.ParseIP("192.168.1.1"), net.ParseIP("192.168.1.2")) proxy.Expect("remove", "https://foo/", net.ParseIP("10.1.2.3")) dnsMonitor.checkHostnames() proxy.Expect("add", "https://bar/") proxy.Expect("remove", "https://foo/", lookup.Get("foo")...) updateProxyConfigStatic(t, config, false, "https://bar/") } nextcloud-spreed-signaling-1.2.4/proxy_config_test.go000066400000000000000000000074001460321600400230450ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "net" "reflect" "runtime" "strings" "sync" "testing" ) var ( thisFilename string ) func init() { pc := make([]uintptr, 1) count := runtime.Callers(1, pc) frames := runtime.CallersFrames(pc[:count]) frame, _ := frames.Next() thisFilename = frame.File } type proxyConfigEvent struct { action string url string ips []net.IP } type mcuProxyForConfig struct { t *testing.T expected []proxyConfigEvent mu sync.Mutex waiters []chan struct{} } func newMcuProxyForConfig(t *testing.T) *mcuProxyForConfig { proxy := &mcuProxyForConfig{ t: t, } t.Cleanup(func() { if len(proxy.expected) > 0 { t.Errorf("expected events %+v were not triggered", proxy.expected) } }) return proxy } func (p *mcuProxyForConfig) Expect(action string, url string, ips ...net.IP) { if len(ips) == 0 { ips = nil } p.mu.Lock() defer p.mu.Unlock() p.expected = append(p.expected, proxyConfigEvent{ action: action, url: url, ips: ips, }) } func (p *mcuProxyForConfig) WaitForEvents(ctx context.Context) { p.t.Helper() p.mu.Lock() defer p.mu.Unlock() if len(p.expected) == 0 { return } waiter := make(chan struct{}) p.waiters = append(p.waiters, waiter) p.mu.Unlock() defer p.mu.Lock() select { case <-ctx.Done(): p.t.Error(ctx.Err()) case <-waiter: } } func (p *mcuProxyForConfig) checkEvent(event *proxyConfigEvent) { p.t.Helper() pc := make([]uintptr, 32) count := runtime.Callers(2, pc) frames := runtime.CallersFrames(pc[:count]) var caller runtime.Frame for { frame, more := frames.Next() if frame.File != thisFilename && strings.HasSuffix(frame.File, "_test.go") { caller = frame break } if !more { break } } p.mu.Lock() defer p.mu.Unlock() if len(p.expected) == 0 { p.t.Errorf("no event expected, got %+v from %s:%d", event, caller.File, caller.Line) return } defer func() { if len(p.expected) == 0 { waiters := p.waiters p.waiters = nil p.mu.Unlock() defer p.mu.Lock() for _, ch := range waiters { ch <- struct{}{} } } }() expected := p.expected[0] p.expected = p.expected[1:] if !reflect.DeepEqual(expected, *event) { p.t.Errorf("expected %+v, got %+v from %s:%d", expected, event, caller.File, caller.Line) } } func (p *mcuProxyForConfig) AddConnection(ignoreErrors bool, url string, ips ...net.IP) error { p.t.Helper() if len(ips) == 0 { ips = nil } p.checkEvent(&proxyConfigEvent{ action: "add", url: url, ips: ips, }) return nil } func (p *mcuProxyForConfig) KeepConnection(url string, ips ...net.IP) { p.t.Helper() if len(ips) == 0 { ips = nil } p.checkEvent(&proxyConfigEvent{ action: "keep", url: url, ips: ips, }) } func (p *mcuProxyForConfig) RemoveConnection(url string, ips ...net.IP) { p.t.Helper() if len(ips) == 0 { ips = nil } p.checkEvent(&proxyConfigEvent{ action: "remove", url: url, ips: ips, }) } nextcloud-spreed-signaling-1.2.4/room.go000066400000000000000000000633361460321600400202660ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "bytes" "context" "encoding/json" "fmt" "log" "net/url" "strconv" "sync" "time" "github.com/prometheus/client_golang/prometheus" ) const ( // Must match values in "Participant.php" from Nextcloud Talk. FlagDisconnected = 0 FlagInCall = 1 FlagWithAudio = 2 FlagWithVideo = 4 FlagWithPhone = 8 ) type SessionChangeFlag int const ( SessionChangeFlags SessionChangeFlag = 1 SessionChangeInCall SessionChangeFlag = 2 ) var ( updateActiveSessionsInterval = 10 * time.Second ) func init() { RegisterRoomStats() } type Room struct { id string hub *Hub events AsyncEvents backend *Backend properties *json.RawMessage closer *Closer mu *sync.RWMutex sessions map[string]Session internalSessions map[Session]bool virtualSessions map[*VirtualSession]bool inCallSessions map[Session]bool roomSessionData map[string]*RoomSessionData statsRoomSessionsCurrent *prometheus.GaugeVec // Users currently in the room users []map[string]interface{} // Timestamps of last backend requests for the different types. lastRoomRequests map[string]int64 transientData *TransientData } func getRoomIdForBackend(id string, backend *Backend) string { if id == "" { return "" } return backend.Id() + "|" + id } func NewRoom(roomId string, properties *json.RawMessage, hub *Hub, events AsyncEvents, backend *Backend) (*Room, error) { room := &Room{ id: roomId, hub: hub, events: events, backend: backend, properties: properties, closer: NewCloser(), mu: &sync.RWMutex{}, sessions: make(map[string]Session), internalSessions: make(map[Session]bool), virtualSessions: make(map[*VirtualSession]bool), inCallSessions: make(map[Session]bool), roomSessionData: make(map[string]*RoomSessionData), statsRoomSessionsCurrent: statsRoomSessionsCurrent.MustCurryWith(prometheus.Labels{ "backend": backend.Id(), "room": roomId, }), lastRoomRequests: make(map[string]int64), transientData: NewTransientData(), } if err := events.RegisterBackendRoomListener(roomId, backend, room); err != nil { return nil, err } go room.run() return room, nil } func (r *Room) Id() string { return r.id } func (r *Room) Properties() *json.RawMessage { r.mu.RLock() defer r.mu.RUnlock() return r.properties } func (r *Room) Backend() *Backend { return r.backend } func (r *Room) IsEqual(other *Room) bool { if r == other { return true } else if other == nil { return false } else if r.Id() != other.Id() { return false } b1 := r.Backend() b2 := other.Backend() if b1 == b2 { return true } else if b1 == nil && b2 != nil { return false } else if b1 != nil && b2 == nil { return false } return b1.Id() == b2.Id() } func (r *Room) run() { ticker := time.NewTicker(updateActiveSessionsInterval) loop: for { select { case <-r.closer.C: break loop case <-ticker.C: r.publishActiveSessions() } } } func (r *Room) doClose() { r.closer.Close() } func (r *Room) unsubscribeBackend() { r.events.UnregisterBackendRoomListener(r.id, r.backend, r) } func (r *Room) Close() []Session { r.hub.removeRoom(r) r.doClose() r.mu.Lock() r.unsubscribeBackend() result := make([]Session, 0, len(r.sessions)) for _, s := range r.sessions { result = append(result, s) } r.sessions = nil r.statsRoomSessionsCurrent.Delete(prometheus.Labels{"clienttype": HelloClientTypeClient}) r.statsRoomSessionsCurrent.Delete(prometheus.Labels{"clienttype": HelloClientTypeInternal}) r.statsRoomSessionsCurrent.Delete(prometheus.Labels{"clienttype": HelloClientTypeVirtual}) r.mu.Unlock() return result } func (r *Room) ProcessBackendRoomRequest(message *AsyncMessage) { switch message.Type { case "room": r.processBackendRoomRequestRoom(message.Room) case "asyncroom": r.processBackendRoomRequestAsyncRoom(message.AsyncRoom) default: log.Printf("Unsupported backend room request with type %s in %s: %+v", message.Type, r.id, message) } } func (r *Room) processBackendRoomRequestRoom(message *BackendServerRoomRequest) { received := message.ReceivedTime if last, found := r.lastRoomRequests[message.Type]; found && last > received { if msg, err := json.Marshal(message); err == nil { log.Printf("Ignore old backend room request for %s: %s", r.Id(), string(msg)) } else { log.Printf("Ignore old backend room request for %s: %+v", r.Id(), message) } return } r.lastRoomRequests[message.Type] = received message.room = r switch message.Type { case "update": r.hub.roomUpdated <- message case "delete": r.notifyInternalRoomDeleted() r.hub.roomDeleted <- message case "incall": r.hub.roomInCall <- message case "participants": r.hub.roomParticipants <- message case "message": r.publishRoomMessage(message.Message) case "switchto": r.publishSwitchTo(message.SwitchTo) case "transient": switch message.Transient.Action { case TransientActionSet: r.SetTransientDataTTL(message.Transient.Key, message.Transient.Value, message.Transient.TTL) case TransientActionDelete: r.RemoveTransientData(message.Transient.Key) default: log.Printf("Unsupported transient action in room %s: %+v", r.Id(), message.Transient) } default: log.Printf("Unsupported backend room request with type %s in %s: %+v", message.Type, r.Id(), message) } } func (r *Room) processBackendRoomRequestAsyncRoom(message *AsyncRoomMessage) { switch message.Type { case "sessionjoined": r.notifySessionJoined(message.SessionId) if message.ClientType == HelloClientTypeInternal { r.publishUsersChangedWithInternal() } default: log.Printf("Unsupported async room request with type %s in %s: %+v", message.Type, r.Id(), message) } } func (r *Room) AddSession(session Session, sessionData *json.RawMessage) { var roomSessionData *RoomSessionData if sessionData != nil && len(*sessionData) > 0 { roomSessionData = &RoomSessionData{} if err := json.Unmarshal(*sessionData, roomSessionData); err != nil { log.Printf("Error decoding room session data \"%s\": %s", string(*sessionData), err) roomSessionData = nil } } sid := session.PublicId() r.mu.Lock() _, found := r.sessions[sid] r.sessions[sid] = session if !found { r.statsRoomSessionsCurrent.With(prometheus.Labels{"clienttype": session.ClientType()}).Inc() } var publishUsersChanged bool switch session.ClientType() { case HelloClientTypeInternal: r.internalSessions[session] = true case HelloClientTypeVirtual: virtualSession, ok := session.(*VirtualSession) if !ok { delete(r.sessions, sid) r.mu.Unlock() panic(fmt.Sprintf("Expected a virtual session, got %v", session)) } r.virtualSessions[virtualSession] = true publishUsersChanged = true } if roomSessionData != nil { r.roomSessionData[sid] = roomSessionData log.Printf("Session %s sent room session data %+v", session.PublicId(), roomSessionData) } r.mu.Unlock() if !found { r.PublishSessionJoined(session, roomSessionData) if publishUsersChanged { r.publishUsersChangedWithInternal() if session, ok := session.(*VirtualSession); ok && session.Flags() != 0 { r.publishSessionFlagsChanged(session) } } if clientSession, ok := session.(*ClientSession); ok { r.transientData.AddListener(clientSession) } } // Trigger notifications that the session joined. if err := r.events.PublishBackendRoomMessage(r.id, r.backend, &AsyncMessage{ Type: "asyncroom", AsyncRoom: &AsyncRoomMessage{ Type: "sessionjoined", SessionId: sid, ClientType: session.ClientType(), }, }); err != nil { log.Printf("Error publishing joined event for session %s: %s", sid, err) } } func (r *Room) getOtherSessions(ignoreSessionId string) (Session, []Session) { r.mu.Lock() defer r.mu.Unlock() sessions := make([]Session, 0, len(r.sessions)) for _, s := range r.sessions { if s.PublicId() == ignoreSessionId { continue } sessions = append(sessions, s) } return r.sessions[ignoreSessionId], sessions } func (r *Room) notifySessionJoined(sessionId string) { session, sessions := r.getOtherSessions(sessionId) if len(sessions) == 0 { return } if session != nil && session.ClientType() != HelloClientTypeClient { session = nil } events := make([]*EventServerMessageSessionEntry, 0, len(sessions)) for _, s := range sessions { entry := &EventServerMessageSessionEntry{ SessionId: s.PublicId(), UserId: s.UserId(), User: s.UserData(), } if s, ok := s.(*ClientSession); ok { entry.RoomSessionId = s.RoomSessionId() } events = append(events, entry) } msg := &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "room", Type: "join", Join: events, }, } if err := r.events.PublishSessionMessage(sessionId, r.backend, &AsyncMessage{ Type: "message", Message: msg, }); err != nil { log.Printf("Error publishing joined events to session %s: %s", sessionId, err) } // Notify about initial flags of virtual sessions. for _, s := range sessions { vsess, ok := s.(*VirtualSession) if !ok { continue } flags := vsess.Flags() if flags == 0 { continue } msg := &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "participants", Type: "flags", Flags: &RoomFlagsServerMessage{ RoomId: r.id, SessionId: vsess.PublicId(), Flags: vsess.Flags(), }, }, } if err := r.events.PublishSessionMessage(sessionId, r.backend, &AsyncMessage{ Type: "message", Message: msg, }); err != nil { log.Printf("Error publishing initial flags to session %s: %s", sessionId, err) } } } func (r *Room) HasSession(session Session) bool { r.mu.RLock() _, result := r.sessions[session.PublicId()] r.mu.RUnlock() return result } func (r *Room) IsSessionInCall(session Session) bool { r.mu.RLock() _, result := r.inCallSessions[session] r.mu.RUnlock() return result } // Returns "true" if there are still clients in the room. func (r *Room) RemoveSession(session Session) bool { r.mu.Lock() if _, found := r.sessions[session.PublicId()]; !found { r.mu.Unlock() return true } sid := session.PublicId() r.statsRoomSessionsCurrent.With(prometheus.Labels{"clienttype": session.ClientType()}).Dec() delete(r.sessions, sid) delete(r.internalSessions, session) if virtualSession, ok := session.(*VirtualSession); ok { delete(r.virtualSessions, virtualSession) } if clientSession, ok := session.(*ClientSession); ok { r.transientData.RemoveListener(clientSession) } delete(r.inCallSessions, session) delete(r.roomSessionData, sid) if len(r.sessions) > 0 { r.mu.Unlock() r.PublishSessionLeft(session) return true } r.hub.removeRoom(r) r.statsRoomSessionsCurrent.Delete(prometheus.Labels{"clienttype": HelloClientTypeClient}) r.statsRoomSessionsCurrent.Delete(prometheus.Labels{"clienttype": HelloClientTypeInternal}) r.statsRoomSessionsCurrent.Delete(prometheus.Labels{"clienttype": HelloClientTypeVirtual}) r.unsubscribeBackend() r.doClose() r.mu.Unlock() // Still need to publish an event so sessions on other servers get notified. r.PublishSessionLeft(session) return false } func (r *Room) publish(message *ServerMessage) error { return r.events.PublishRoomMessage(r.id, r.backend, &AsyncMessage{ Type: "message", Message: message, }) } func (r *Room) UpdateProperties(properties *json.RawMessage) { r.mu.Lock() defer r.mu.Unlock() if (r.properties == nil && properties == nil) || (r.properties != nil && properties != nil && bytes.Equal(*r.properties, *properties)) { // Don't notify if properties didn't change. return } r.properties = properties message := &ServerMessage{ Type: "room", Room: &RoomServerMessage{ RoomId: r.id, Properties: r.properties, }, } if err := r.publish(message); err != nil { log.Printf("Could not publish update properties message in room %s: %s", r.Id(), err) } } func (r *Room) GetRoomSessionData(session Session) *RoomSessionData { r.mu.RLock() defer r.mu.RUnlock() return r.roomSessionData[session.PublicId()] } func (r *Room) PublishSessionJoined(session Session, sessionData *RoomSessionData) { sessionId := session.PublicId() if sessionId == "" { return } userid := session.UserId() if userid == "" && sessionData != nil { userid = sessionData.UserId } message := &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "room", Type: "join", Join: []*EventServerMessageSessionEntry{ { SessionId: sessionId, UserId: userid, User: session.UserData(), }, }, }, } if session, ok := session.(*ClientSession); ok { message.Event.Join[0].RoomSessionId = session.RoomSessionId() } if err := r.publish(message); err != nil { log.Printf("Could not publish session joined message in room %s: %s", r.Id(), err) } } func (r *Room) PublishSessionLeft(session Session) { sessionId := session.PublicId() if sessionId == "" { return } message := &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "room", Type: "leave", Leave: []string{ sessionId, }, }, } if err := r.publish(message); err != nil { log.Printf("Could not publish session left message in room %s: %s", r.Id(), err) } if session.ClientType() == HelloClientTypeInternal { r.publishUsersChangedWithInternal() } } func (r *Room) addInternalSessions(users []map[string]interface{}) []map[string]interface{} { now := time.Now().Unix() r.mu.Lock() defer r.mu.Unlock() for _, user := range users { sessionid, found := user["sessionId"] if !found || sessionid == "" { continue } if userid, found := user["userId"]; !found || userid == "" { if roomSessionData, found := r.roomSessionData[sessionid.(string)]; found { user["userId"] = roomSessionData.UserId } } } for session := range r.internalSessions { users = append(users, map[string]interface{}{ "inCall": session.(*ClientSession).GetInCall(), "sessionId": session.PublicId(), "lastPing": now, "internal": true, }) } for session := range r.virtualSessions { users = append(users, map[string]interface{}{ "inCall": session.GetInCall(), "sessionId": session.PublicId(), "lastPing": now, "virtual": true, }) } return users } func (r *Room) filterPermissions(users []map[string]interface{}) []map[string]interface{} { for _, user := range users { delete(user, "permissions") } return users } func IsInCall(value interface{}) (bool, bool) { switch value := value.(type) { case bool: return value, true case float64: // Default JSON decoder unmarshals numbers to float64. return (int(value) & FlagInCall) == FlagInCall, true case int: return (value & FlagInCall) == FlagInCall, true case json.Number: // Expect integer when using numeric JSON decoder. if flags, err := value.Int64(); err == nil { return (flags & FlagInCall) == FlagInCall, true } return false, false default: return false, false } } func (r *Room) PublishUsersInCallChanged(changed []map[string]interface{}, users []map[string]interface{}) { r.users = users for _, user := range changed { inCallInterface, found := user["inCall"] if !found { continue } inCall, ok := IsInCall(inCallInterface) if !ok { continue } sessionIdInterface, found := user["sessionId"] if !found { sessionIdInterface, found = user["sessionid"] if !found { continue } } sessionId, ok := sessionIdInterface.(string) if !ok { continue } session := r.hub.GetSessionByPublicId(sessionId) if session == nil { continue } if inCall { r.mu.Lock() if !r.inCallSessions[session] { r.inCallSessions[session] = true log.Printf("Session %s joined call %s", session.PublicId(), r.id) } r.mu.Unlock() } else { r.mu.Lock() delete(r.inCallSessions, session) r.mu.Unlock() if clientSession, ok := session.(*ClientSession); ok { clientSession.LeaveCall() } } } changed = r.filterPermissions(changed) users = r.filterPermissions(users) message := &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "participants", Type: "update", Update: &RoomEventServerMessage{ RoomId: r.id, Changed: changed, Users: r.addInternalSessions(users), }, }, } if err := r.publish(message); err != nil { log.Printf("Could not publish incall message in room %s: %s", r.Id(), err) } } func (r *Room) PublishUsersInCallChangedAll(inCall int) { r.mu.Lock() defer r.mu.Unlock() var notify []*ClientSession if inCall&FlagInCall != 0 { // All connected sessions join the call. var joined []string for _, session := range r.sessions { clientSession, ok := session.(*ClientSession) if !ok { continue } if session.ClientType() == HelloClientTypeInternal { continue } if !r.inCallSessions[session] { r.inCallSessions[session] = true joined = append(joined, session.PublicId()) } notify = append(notify, clientSession) } if len(joined) == 0 { return } log.Printf("Sessions %v joined call %s", joined, r.id) } else if len(r.inCallSessions) > 0 { // Perform actual leaving asynchronously. ch := make(chan *ClientSession, 1) go func() { for { session := <-ch if session == nil { break } session.LeaveCall() } }() for _, session := range r.sessions { clientSession, ok := session.(*ClientSession) if !ok { continue } notify = append(notify, clientSession) } for session := range r.inCallSessions { if clientSession, ok := session.(*ClientSession); ok { ch <- clientSession } } close(ch) r.inCallSessions = make(map[Session]bool) } else { // All sessions already left the call, no need to notify. return } inCallMsg := json.RawMessage(strconv.FormatInt(int64(inCall), 10)) message := &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "participants", Type: "update", Update: &RoomEventServerMessage{ RoomId: r.id, InCall: &inCallMsg, All: true, }, }, } for _, session := range notify { if !session.SendMessage(message) { log.Printf("Could not send incall message from room %s to %s", r.Id(), session.PublicId()) } } } func (r *Room) PublishUsersChanged(changed []map[string]interface{}, users []map[string]interface{}) { changed = r.filterPermissions(changed) users = r.filterPermissions(users) message := &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "participants", Type: "update", Update: &RoomEventServerMessage{ RoomId: r.id, Changed: changed, Users: r.addInternalSessions(users), }, }, } if err := r.publish(message); err != nil { log.Printf("Could not publish users changed message in room %s: %s", r.Id(), err) } } func (r *Room) getParticipantsUpdateMessage(users []map[string]interface{}) *ServerMessage { users = r.filterPermissions(users) message := &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "participants", Type: "update", Update: &RoomEventServerMessage{ RoomId: r.id, Users: r.addInternalSessions(users), }, }, } return message } func (r *Room) NotifySessionResumed(session *ClientSession) { message := r.getParticipantsUpdateMessage(r.users) if len(message.Event.Update.Users) == 0 { return } session.SendMessage(message) } func (r *Room) NotifySessionChanged(session Session, flags SessionChangeFlag) { if flags&SessionChangeFlags != 0 && session.ClientType() == HelloClientTypeVirtual { // Only notify if a virtual session has changed. if virtual, ok := session.(*VirtualSession); ok { r.publishSessionFlagsChanged(virtual) } } if flags&SessionChangeInCall != 0 { joinLeave := 0 if clientSession, ok := session.(*ClientSession); ok { if clientSession.GetInCall()&FlagInCall != 0 { joinLeave = 1 } else { joinLeave = 2 } } else if virtual, ok := session.(*VirtualSession); ok { if virtual.GetInCall()&FlagInCall != 0 { joinLeave = 1 } else { joinLeave = 2 } } if joinLeave != 0 { if joinLeave == 1 { r.mu.Lock() if !r.inCallSessions[session] { r.inCallSessions[session] = true log.Printf("Session %s joined call %s", session.PublicId(), r.id) } r.mu.Unlock() } else if joinLeave == 2 { r.mu.Lock() delete(r.inCallSessions, session) r.mu.Unlock() if clientSession, ok := session.(*ClientSession); ok { clientSession.LeaveCall() } } // TODO: Check if we could send a smaller update message with only the changed session. r.publishUsersChangedWithInternal() } } } func (r *Room) publishUsersChangedWithInternal() { message := r.getParticipantsUpdateMessage(r.users) if len(message.Event.Update.Users) == 0 { return } if err := r.publish(message); err != nil { log.Printf("Could not publish users changed message in room %s: %s", r.Id(), err) } } func (r *Room) publishSessionFlagsChanged(session *VirtualSession) { message := &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "participants", Type: "flags", Flags: &RoomFlagsServerMessage{ RoomId: r.id, SessionId: session.PublicId(), Flags: session.Flags(), }, }, } if err := r.publish(message); err != nil { log.Printf("Could not publish flags changed message in room %s: %s", r.Id(), err) } } func (r *Room) publishActiveSessions() (int, *sync.WaitGroup) { r.mu.RLock() defer r.mu.RUnlock() entries := make(map[string][]BackendPingEntry) urls := make(map[string]*url.URL) for _, session := range r.sessions { u := session.BackendUrl() if u == "" { continue } var sid string var uid string switch sess := session.(type) { case *ClientSession: // Use Nextcloud session id and user id sid = sess.RoomSessionId() uid = sess.AuthUserId() case *VirtualSession: // Use our internal generated session id (will be added to Nextcloud). sid = sess.PublicId() uid = sess.UserId() default: continue } if sid == "" { continue } e, found := entries[u] if !found { p := session.ParsedBackendUrl() if p == nil { // Should not happen, invalid URLs should get rejected earlier. continue } urls[u] = p } entries[u] = append(e, BackendPingEntry{ SessionId: sid, UserId: uid, }) } var wg sync.WaitGroup if len(urls) == 0 { return 0, &wg } for u, e := range entries { wg.Add(1) go func(url *url.URL, entries []BackendPingEntry) { defer wg.Done() ctx, cancel := context.WithTimeout(context.Background(), r.hub.backendTimeout) defer cancel() if err := r.hub.roomPing.SendPings(ctx, r, url, entries); err != nil { log.Printf("Error pinging room %s for active entries %+v: %s", r.id, entries, err) } }(urls[u], e) } return len(entries), &wg } func (r *Room) publishRoomMessage(message *BackendRoomMessageRequest) { if message == nil || message.Data == nil { return } msg := &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "room", Type: "message", Message: &RoomEventMessage{ RoomId: r.id, Data: message.Data, }, }, } if err := r.publish(msg); err != nil { log.Printf("Could not publish room message in room %s: %s", r.Id(), err) } } func (r *Room) publishSwitchTo(message *BackendRoomSwitchToMessageRequest) { var wg sync.WaitGroup if len(message.SessionsList) > 0 { msg := &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "room", Type: "switchto", SwitchTo: &EventServerMessageSwitchTo{ RoomId: message.RoomId, }, }, } for _, sessionId := range message.SessionsList { wg.Add(1) go func(sessionId string) { defer wg.Done() if err := r.events.PublishSessionMessage(sessionId, r.backend, &AsyncMessage{ Type: "message", Message: msg, }); err != nil { log.Printf("Error publishing switchto event to session %s: %s", sessionId, err) } }(sessionId) } } if len(message.SessionsMap) > 0 { for sessionId, details := range message.SessionsMap { wg.Add(1) go func(sessionId string, details json.RawMessage) { defer wg.Done() msg := &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "room", Type: "switchto", SwitchTo: &EventServerMessageSwitchTo{ RoomId: message.RoomId, Details: details, }, }, } if err := r.events.PublishSessionMessage(sessionId, r.backend, &AsyncMessage{ Type: "message", Message: msg, }); err != nil { log.Printf("Error publishing switchto event to session %s: %s", sessionId, err) } }(sessionId, details) } } wg.Wait() } func (r *Room) notifyInternalRoomDeleted() { msg := &ServerMessage{ Type: "event", Event: &EventServerMessage{ Target: "room", Type: "delete", }, } r.mu.Lock() defer r.mu.Unlock() for s := range r.internalSessions { s.(*ClientSession).SendMessage(msg) } } func (r *Room) SetTransientData(key string, value interface{}) { r.transientData.Set(key, value) } func (r *Room) SetTransientDataTTL(key string, value interface{}, ttl time.Duration) { r.transientData.SetTTL(key, value, ttl) } func (r *Room) RemoveTransientData(key string) { r.transientData.Remove(key) } nextcloud-spreed-signaling-1.2.4/room_ping.go000066400000000000000000000134561460321600400213010ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "log" "net/url" "sync" "time" ) type pingEntries struct { url *url.URL entries map[*Room][]BackendPingEntry } func newPingEntries(url *url.URL, room *Room, entries []BackendPingEntry) *pingEntries { return &pingEntries{ url: url, entries: map[*Room][]BackendPingEntry{ room: entries, }, } } func (e *pingEntries) Add(room *Room, entries []BackendPingEntry) { if existing, found := e.entries[room]; found { e.entries[room] = append(existing, entries...) } else { e.entries[room] = entries } } func (e *pingEntries) RemoveRoom(room *Room) { delete(e.entries, room) } // RoomPing sends ping requests for active sessions in rooms. It evaluates the // capabilities of the Nextcloud server to determine if sessions from different // rooms can be grouped together. // // For that, all ping requests across rooms of enabled instances are combined // and sent out batched every "updateActiveSessionsInterval" seconds. type RoomPing struct { mu sync.Mutex closer *Closer backend *BackendClient capabilities *Capabilities entries map[string]*pingEntries } func NewRoomPing(backend *BackendClient, capabilities *Capabilities) (*RoomPing, error) { result := &RoomPing{ closer: NewCloser(), backend: backend, capabilities: capabilities, } return result, nil } func (p *RoomPing) Start() { go p.run() } func (p *RoomPing) Stop() { p.closer.Close() } func (p *RoomPing) run() { ticker := time.NewTicker(updateActiveSessionsInterval) loop: for { select { case <-p.closer.C: break loop case <-ticker.C: p.publishActiveSessions() } } } func (p *RoomPing) getAndClearEntries() map[string]*pingEntries { p.mu.Lock() defer p.mu.Unlock() entries := p.entries p.entries = nil return entries } func (p *RoomPing) publishEntries(entries *pingEntries, timeout time.Duration) { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() limit, _, found := p.capabilities.GetIntegerConfig(ctx, entries.url, ConfigGroupSignaling, ConfigKeySessionPingLimit) if !found || limit <= 0 { // Limit disabled while waiting for the next iteration, fallback to sending // one request per room. for room, e := range entries.entries { ctx2, cancel2 := context.WithTimeout(context.Background(), timeout) defer cancel2() if err := p.sendPingsDirect(ctx2, room, entries.url, e); err != nil { log.Printf("Error pinging room %s for active entries %+v: %s", room.Id(), e, err) } } return } var allEntries []BackendPingEntry for _, e := range entries.entries { allEntries = append(allEntries, e...) } p.sendPingsCombined(entries.url, allEntries, limit, timeout) } func (p *RoomPing) publishActiveSessions() { var timeout time.Duration if p.backend.hub != nil { timeout = p.backend.hub.backendTimeout } else { // Running from tests. timeout = time.Second * time.Duration(defaultBackendTimeoutSeconds) } entries := p.getAndClearEntries() var wg sync.WaitGroup wg.Add(len(entries)) for _, e := range entries { go func(e *pingEntries) { defer wg.Done() p.publishEntries(e, timeout) }(e) } wg.Wait() } func (p *RoomPing) sendPingsDirect(ctx context.Context, room *Room, url *url.URL, entries []BackendPingEntry) error { request := NewBackendClientPingRequest(room.Id(), entries) var response BackendClientResponse return p.backend.PerformJSONRequest(ctx, url, request, &response) } func (p *RoomPing) sendPingsCombined(url *url.URL, entries []BackendPingEntry, limit int, timeout time.Duration) { total := len(entries) for idx := 0; idx < total; idx += limit { end := idx + limit if end > total { end = total } tosend := entries[idx:end] ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() request := NewBackendClientPingRequest("", tosend) var response BackendClientResponse if err := p.backend.PerformJSONRequest(ctx, url, request, &response); err != nil { log.Printf("Error sending combined ping session entries %+v to %s: %s", tosend, url, err) } } } func (p *RoomPing) SendPings(ctx context.Context, room *Room, url *url.URL, entries []BackendPingEntry) error { limit, _, found := p.capabilities.GetIntegerConfig(ctx, url, ConfigGroupSignaling, ConfigKeySessionPingLimit) if !found || limit <= 0 { // Old-style Nextcloud or session limit not configured. Perform one request // per room. Don't queue to avoid sending all ping requests to old-style // instances at the same time but distribute across the interval. return p.sendPingsDirect(ctx, room, url, entries) } key := url.String() p.mu.Lock() defer p.mu.Unlock() if existing, found := p.entries[key]; found { existing.Add(room, entries) return nil } if p.entries == nil { p.entries = make(map[string]*pingEntries) } p.entries[key] = newPingEntries(url, room, entries) return nil } func (p *RoomPing) DeleteRoom(room *Room) { p.mu.Lock() defer p.mu.Unlock() for _, entries := range p.entries { entries.RemoveRoom(room) } } nextcloud-spreed-signaling-1.2.4/room_ping_test.go000066400000000000000000000136671460321600400223440ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "net/http/httptest" "net/url" "testing" "github.com/gorilla/mux" ) func NewRoomPingForTest(t *testing.T) (*url.URL, *RoomPing) { r := mux.NewRouter() registerBackendHandler(t, r) server := httptest.NewServer(r) t.Cleanup(func() { server.Close() }) config, err := getTestConfig(server) if err != nil { t.Fatal(err) } backend, err := NewBackendClient(config, 1, "0.0", nil) if err != nil { t.Fatal(err) } p, err := NewRoomPing(backend, backend.capabilities) if err != nil { t.Fatal(err) } u, err := url.Parse(server.URL) if err != nil { t.Fatal(err) } return u, p } func TestSingleRoomPing(t *testing.T) { u, ping := NewRoomPingForTest(t) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() room1 := &Room{ id: "sample-room-1", } entries1 := []BackendPingEntry{ { UserId: "foo", SessionId: "123", }, } if err := ping.SendPings(ctx, room1, u, entries1); err != nil { t.Error(err) } if requests := getPingRequests(t); len(requests) != 1 { t.Errorf("expected one ping request, got %+v", requests) } else if len(requests[0].Ping.Entries) != 1 { t.Errorf("expected one entry, got %+v", requests[0].Ping.Entries) } clearPingRequests(t) room2 := &Room{ id: "sample-room-2", } entries2 := []BackendPingEntry{ { UserId: "bar", SessionId: "456", }, } if err := ping.SendPings(ctx, room2, u, entries2); err != nil { t.Error(err) } if requests := getPingRequests(t); len(requests) != 1 { t.Errorf("expected one ping request, got %+v", requests) } else if len(requests[0].Ping.Entries) != 1 { t.Errorf("expected one entry, got %+v", requests[0].Ping.Entries) } clearPingRequests(t) ping.publishActiveSessions() if requests := getPingRequests(t); len(requests) != 0 { t.Errorf("expected no ping requests, got %+v", requests) } } func TestMultiRoomPing(t *testing.T) { u, ping := NewRoomPingForTest(t) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() room1 := &Room{ id: "sample-room-1", } entries1 := []BackendPingEntry{ { UserId: "foo", SessionId: "123", }, } if err := ping.SendPings(ctx, room1, u, entries1); err != nil { t.Error(err) } if requests := getPingRequests(t); len(requests) != 0 { t.Errorf("expected no ping requests, got %+v", requests) } room2 := &Room{ id: "sample-room-2", } entries2 := []BackendPingEntry{ { UserId: "bar", SessionId: "456", }, } if err := ping.SendPings(ctx, room2, u, entries2); err != nil { t.Error(err) } if requests := getPingRequests(t); len(requests) != 0 { t.Errorf("expected no ping requests, got %+v", requests) } ping.publishActiveSessions() if requests := getPingRequests(t); len(requests) != 1 { t.Errorf("expected one ping request, got %+v", requests) } else if len(requests[0].Ping.Entries) != 2 { t.Errorf("expected two entries, got %+v", requests[0].Ping.Entries) } } func TestMultiRoomPing_Separate(t *testing.T) { u, ping := NewRoomPingForTest(t) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() room1 := &Room{ id: "sample-room-1", } entries1 := []BackendPingEntry{ { UserId: "foo", SessionId: "123", }, } if err := ping.SendPings(ctx, room1, u, entries1); err != nil { t.Error(err) } if requests := getPingRequests(t); len(requests) != 0 { t.Errorf("expected no ping requests, got %+v", requests) } entries2 := []BackendPingEntry{ { UserId: "bar", SessionId: "456", }, } if err := ping.SendPings(ctx, room1, u, entries2); err != nil { t.Error(err) } if requests := getPingRequests(t); len(requests) != 0 { t.Errorf("expected no ping requests, got %+v", requests) } ping.publishActiveSessions() if requests := getPingRequests(t); len(requests) != 1 { t.Errorf("expected one ping request, got %+v", requests) } else if len(requests[0].Ping.Entries) != 2 { t.Errorf("expected two entries, got %+v", requests[0].Ping.Entries) } } func TestMultiRoomPing_DeleteRoom(t *testing.T) { u, ping := NewRoomPingForTest(t) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() room1 := &Room{ id: "sample-room-1", } entries1 := []BackendPingEntry{ { UserId: "foo", SessionId: "123", }, } if err := ping.SendPings(ctx, room1, u, entries1); err != nil { t.Error(err) } if requests := getPingRequests(t); len(requests) != 0 { t.Errorf("expected no ping requests, got %+v", requests) } room2 := &Room{ id: "sample-room-2", } entries2 := []BackendPingEntry{ { UserId: "bar", SessionId: "456", }, } if err := ping.SendPings(ctx, room2, u, entries2); err != nil { t.Error(err) } if requests := getPingRequests(t); len(requests) != 0 { t.Errorf("expected no ping requests, got %+v", requests) } ping.DeleteRoom(room2) ping.publishActiveSessions() if requests := getPingRequests(t); len(requests) != 1 { t.Errorf("expected one ping request, got %+v", requests) } else if len(requests[0].Ping.Entries) != 1 { t.Errorf("expected two entries, got %+v", requests[0].Ping.Entries) } } nextcloud-spreed-signaling-1.2.4/room_stats_prometheus.go000066400000000000000000000024741460321600400237530ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "github.com/prometheus/client_golang/prometheus" ) var ( statsRoomSessionsCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "signaling", Subsystem: "room", Name: "sessions", Help: "The current number of sessions in a room", }, []string{"backend", "room", "clienttype"}) roomStats = []prometheus.Collector{ statsRoomSessionsCurrent, } ) func RegisterRoomStats() { registerAll(roomStats...) } nextcloud-spreed-signaling-1.2.4/room_test.go000066400000000000000000000337051460321600400213220ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "bytes" "context" "encoding/json" "fmt" "io" "strconv" "testing" "time" "github.com/gorilla/websocket" ) func TestRoom_InCall(t *testing.T) { type Testcase struct { Value interface{} InCall bool Valid bool } tests := []Testcase{ {nil, false, false}, {"a", false, false}, {true, true, true}, {false, false, true}, {0, false, true}, {FlagDisconnected, false, true}, {1, true, true}, {FlagInCall, true, true}, {2, false, true}, {FlagWithAudio, false, true}, {3, true, true}, {FlagInCall | FlagWithAudio, true, true}, {4, false, true}, {FlagWithVideo, false, true}, {5, true, true}, {FlagInCall | FlagWithVideo, true, true}, {1.1, true, true}, {json.Number("1"), true, true}, {json.Number("1.1"), false, false}, } for _, test := range tests { inCall, ok := IsInCall(test.Value) if ok != test.Valid { t.Errorf("%+v should be valid %v, got %v", test.Value, test.Valid, ok) } if inCall != test.InCall { t.Errorf("%+v should convert to %v, got %v", test.Value, test.InCall, inCall) } } } func TestRoom_Update(t *testing.T) { hub, _, router, server := CreateHubForTest(t) config, err := getTestConfig(server) if err != nil { t.Fatal(err) } b, err := NewBackendServer(config, hub, "no-version") if err != nil { t.Fatal(err) } if err := b.Start(router); err != nil { t.Fatal(err) } client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event. if err := client.RunUntilJoined(ctx, hello.Hello); err != nil { t.Error(err) } // Simulate backend request from Nextcloud to update the room. roomProperties := json.RawMessage("{\"foo\":\"bar\"}") msg := &BackendServerRoomRequest{ Type: "update", Update: &BackendRoomUpdateRequest{ UserIds: []string{ testDefaultUserId, }, Properties: &roomProperties, }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } // The client receives a roomlist update and a changed room event. The // ordering is not defined because messages are sent by asynchronous event // handlers. message1, err := client.RunUntilMessage(ctx) if err != nil { t.Error(err) } message2, err := client.RunUntilMessage(ctx) if err != nil { t.Error(err) } if msg, err := checkMessageRoomlistUpdate(message1); err != nil { if err := checkMessageRoomId(message1, roomId); err != nil { t.Error(err) } if msg, err := checkMessageRoomlistUpdate(message2); err != nil { t.Error(err) } else if msg.RoomId != roomId { t.Errorf("Expected room id %s, got %+v", roomId, msg) } else if msg.Properties == nil || !bytes.Equal(*msg.Properties, roomProperties) { t.Errorf("Expected room properties %s, got %+v", string(roomProperties), msg) } } else { if msg.RoomId != roomId { t.Errorf("Expected room id %s, got %+v", roomId, msg) } else if msg.Properties == nil || !bytes.Equal(*msg.Properties, roomProperties) { t.Errorf("Expected room properties %s, got %+v", string(roomProperties), msg) } if err := checkMessageRoomId(message2, roomId); err != nil { t.Error(err) } } // Allow up to 100 milliseconds for asynchronous event processing. ctx2, cancel2 := context.WithTimeout(ctx, 100*time.Millisecond) defer cancel2() loop: for { select { case <-ctx2.Done(): break loop default: // The internal room has been updated with the new properties. if room := hub.getRoom(roomId); room == nil { err = fmt.Errorf("Room %s not found in hub", roomId) } else if room.Properties() == nil || !bytes.Equal(*room.Properties(), roomProperties) { err = fmt.Errorf("Expected room properties %s, got %+v", string(roomProperties), room.Properties()) } else { err = nil } } if err == nil { break } time.Sleep(time.Millisecond) } if err != nil { t.Error(err) } } func TestRoom_Delete(t *testing.T) { hub, _, router, server := CreateHubForTest(t) config, err := getTestConfig(server) if err != nil { t.Fatal(err) } b, err := NewBackendServer(config, hub, "no-version") if err != nil { t.Fatal(err) } if err := b.Start(router); err != nil { t.Fatal(err) } client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event. if err := client.RunUntilJoined(ctx, hello.Hello); err != nil { t.Error(err) } // Simulate backend request from Nextcloud to update the room. msg := &BackendServerRoomRequest{ Type: "delete", Delete: &BackendRoomDeleteRequest{ UserIds: []string{ testDefaultUserId, }, }, } data, err := json.Marshal(msg) if err != nil { t.Fatal(err) } res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data) if err != nil { t.Fatal(err) } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { t.Error(err) } if res.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res.Status, string(body)) } // The client is no longer invited to the room and leaves it. The ordering // of messages is not defined as they get published through events and handled // by asynchronous channels. message1, err := client.RunUntilMessage(ctx) if err != nil { t.Error(err) } if err := checkMessageType(message1, "event"); err != nil { // Ordering should be "leave room", "disinvited". if err := checkMessageRoomId(message1, ""); err != nil { t.Error(err) } message2, err := client.RunUntilMessage(ctx) if err != nil { t.Error(err) } if _, err := checkMessageRoomlistDisinvite(message2); err != nil { t.Error(err) } } else { // Ordering should be "disinvited", "leave room". if _, err := checkMessageRoomlistDisinvite(message1); err != nil { t.Error(err) } message2, err := client.RunUntilMessage(ctx) if err != nil { // The connection should get closed after the "disinvited". if websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway, websocket.CloseNoStatusReceived) { t.Error(err) } } else if err := checkMessageRoomId(message2, ""); err != nil { t.Error(err) } } // Allow up to 100 milliseconds for asynchronous event processing. ctx2, cancel2 := context.WithTimeout(ctx, 100*time.Millisecond) defer cancel2() loop: for { select { case <-ctx2.Done(): break loop default: // The internal room has been updated with the new properties. hub.ru.Lock() _, found := hub.rooms[roomId] hub.ru.Unlock() if found { err = fmt.Errorf("Room %s still found in hub", roomId) } else { err = nil } } if err == nil { break } time.Sleep(time.Millisecond) } if err != nil { t.Error(err) } } func TestRoom_RoomSessionData(t *testing.T) { hub, _, router, server := CreateHubForTest(t) config, err := getTestConfig(server) if err != nil { t.Fatal(err) } b, err := NewBackendServer(config, hub, "no-version") if err != nil { t.Fatal(err) } if err := b.Start(router); err != nil { t.Fatal(err) } client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(authAnonymousUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello, err := client.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room-with-sessiondata" if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // We will receive a "joined" event with the userid from the room session data. expected := "userid-from-sessiondata" if message, err := client.RunUntilMessage(ctx); err != nil { t.Error(err) } else if err := client.checkMessageJoinedSession(message, hello.Hello.SessionId, expected); err != nil { t.Error(err) } else if message.Event.Join[0].RoomSessionId != roomId+"-"+hello.Hello.SessionId { t.Errorf("Expected join room session id %s, got %+v", roomId+"-"+hello.Hello.SessionId, message.Event.Join[0]) } session := hub.GetSessionByPublicId(hello.Hello.SessionId) if session == nil { t.Fatalf("Could not find session %s", hello.Hello.SessionId) } if userid := session.UserId(); userid != expected { t.Errorf("Expected userid %s, got %s", expected, userid) } room := hub.getRoom(roomId) if room == nil { t.Fatalf("Room not found") } entries, wg := room.publishActiveSessions() if entries != 1 { t.Errorf("expected 1 entries, got %d", entries) } wg.Wait() } func TestRoom_InCallAll(t *testing.T) { hub, _, router, server := CreateHubForTest(t) config, err := getTestConfig(server) if err != nil { t.Fatal(err) } b, err := NewBackendServer(config, hub, "no-version") if err != nil { t.Fatal(err) } if err := b.Start(router); err != nil { t.Fatal(err) } client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } if err := client1.RunUntilJoined(ctx, hello1.Hello); err != nil { t.Error(err) } if room, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } if err := client2.RunUntilJoined(ctx, hello1.Hello, hello2.Hello); err != nil { t.Error(err) } if err := client1.RunUntilJoined(ctx, hello2.Hello); err != nil { t.Error(err) } // Simulate backend request from Nextcloud to update the "inCall" flag of all participants. msg1 := &BackendServerRoomRequest{ Type: "incall", InCall: &BackendRoomInCallRequest{ All: true, InCall: json.RawMessage(strconv.FormatInt(FlagInCall, 10)), }, } data1, err := json.Marshal(msg1) if err != nil { t.Fatal(err) } res1, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data1) if err != nil { t.Fatal(err) } defer res1.Body.Close() body1, err := io.ReadAll(res1.Body) if err != nil { t.Error(err) } if res1.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res1.Status, string(body1)) } if msg, err := client1.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else if err := checkMessageInCallAll(msg, roomId, FlagInCall); err != nil { t.Fatal(err) } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else if err := checkMessageInCallAll(msg, roomId, FlagInCall); err != nil { t.Fatal(err) } // Simulate backend request from Nextcloud to update the "inCall" flag of all participants. msg2 := &BackendServerRoomRequest{ Type: "incall", InCall: &BackendRoomInCallRequest{ All: true, InCall: json.RawMessage(strconv.FormatInt(0, 10)), }, } data2, err := json.Marshal(msg2) if err != nil { t.Fatal(err) } res2, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data2) if err != nil { t.Fatal(err) } defer res2.Body.Close() body2, err := io.ReadAll(res2.Body) if err != nil { t.Error(err) } if res2.StatusCode != 200 { t.Errorf("Expected successful request, got %s: %s", res2.Status, string(body2)) } if msg, err := client1.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else if err := checkMessageInCallAll(msg, roomId, 0); err != nil { t.Fatal(err) } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else if err := checkMessageInCallAll(msg, roomId, 0); err != nil { t.Fatal(err) } } nextcloud-spreed-signaling-1.2.4/roomsessions.go000066400000000000000000000023771460321600400220530ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2019 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "fmt" ) var ( ErrNoSuchRoomSession = fmt.Errorf("unknown room session id") ) type RoomSessions interface { SetRoomSession(session Session, roomSessionId string) error DeleteRoomSession(session Session) GetSessionId(roomSessionId string) (string, error) LookupSessionId(ctx context.Context, roomSessionId string, disconnectReason string) (string, error) } nextcloud-spreed-signaling-1.2.4/roomsessions_builtin.go000066400000000000000000000072301460321600400235720ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2019 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "errors" "log" "sync" "sync/atomic" ) type BuiltinRoomSessions struct { sessionIdToRoomSession map[string]string roomSessionToSessionid map[string]string mu sync.RWMutex clients *GrpcClients } func NewBuiltinRoomSessions(clients *GrpcClients) (RoomSessions, error) { return &BuiltinRoomSessions{ sessionIdToRoomSession: make(map[string]string), roomSessionToSessionid: make(map[string]string), clients: clients, }, nil } func (r *BuiltinRoomSessions) SetRoomSession(session Session, roomSessionId string) error { if roomSessionId == "" { r.DeleteRoomSession(session) return nil } if sid := session.PublicId(); sid != "" { r.mu.Lock() defer r.mu.Unlock() if prev, found := r.sessionIdToRoomSession[sid]; found { if prev == roomSessionId { return nil } delete(r.roomSessionToSessionid, prev) } r.sessionIdToRoomSession[sid] = roomSessionId r.roomSessionToSessionid[roomSessionId] = sid } return nil } func (r *BuiltinRoomSessions) DeleteRoomSession(session Session) { if sid := session.PublicId(); sid != "" { r.mu.Lock() defer r.mu.Unlock() if roomSessionId, found := r.sessionIdToRoomSession[sid]; found { delete(r.sessionIdToRoomSession, sid) if r.roomSessionToSessionid[roomSessionId] == sid { delete(r.roomSessionToSessionid, roomSessionId) } } } } func (r *BuiltinRoomSessions) GetSessionId(roomSessionId string) (string, error) { r.mu.RLock() defer r.mu.RUnlock() sid, found := r.roomSessionToSessionid[roomSessionId] if !found { return "", ErrNoSuchRoomSession } return sid, nil } func (r *BuiltinRoomSessions) LookupSessionId(ctx context.Context, roomSessionId string, disconnectReason string) (string, error) { sid, err := r.GetSessionId(roomSessionId) if err == nil { return sid, nil } if r.clients == nil { return "", ErrNoSuchRoomSession } clients := r.clients.GetClients() if len(clients) == 0 { return "", ErrNoSuchRoomSession } lookupctx, cancel := context.WithCancel(ctx) defer cancel() var wg sync.WaitGroup var result atomic.Value for _, client := range clients { wg.Add(1) go func(client *GrpcClient) { defer wg.Done() sid, err := client.LookupSessionId(lookupctx, roomSessionId, disconnectReason) if errors.Is(err, context.Canceled) { return } else if err != nil { log.Printf("Received error while checking for room session id %s on %s: %s", roomSessionId, client.Target(), err) return } else if sid == "" { log.Printf("Received empty session id for room session id %s from %s", roomSessionId, client.Target()) return } cancel() // Cancel pending RPC calls. result.Store(sid) }(client) } wg.Wait() value := result.Load() if value == nil { return "", ErrNoSuchRoomSession } return value.(string), nil } nextcloud-spreed-signaling-1.2.4/roomsessions_builtin_test.go000066400000000000000000000020661460321600400246330ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2019 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "testing" ) func TestBuiltinRoomSessions(t *testing.T) { sessions, err := NewBuiltinRoomSessions(nil) if err != nil { t.Fatal(err) } testRoomSessions(t, sessions) } nextcloud-spreed-signaling-1.2.4/roomsessions_test.go000066400000000000000000000111351460321600400231020ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2019 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "encoding/json" "errors" "net/url" "testing" "time" ) type DummySession struct { publicId string } func (s *DummySession) PrivateId() string { return "" } func (s *DummySession) PublicId() string { return s.publicId } func (s *DummySession) ClientType() string { return "" } func (s *DummySession) Data() *SessionIdData { return nil } func (s *DummySession) UserId() string { return "" } func (s *DummySession) UserData() *json.RawMessage { return nil } func (s *DummySession) Backend() *Backend { return nil } func (s *DummySession) BackendUrl() string { return "" } func (s *DummySession) ParsedBackendUrl() *url.URL { return nil } func (s *DummySession) SetRoom(room *Room) { } func (s *DummySession) GetRoom() *Room { return nil } func (s *DummySession) LeaveRoom(notify bool) *Room { return nil } func (s *DummySession) IsExpired(now time.Time) bool { return false } func (s *DummySession) Close() { } func (s *DummySession) HasPermission(permission Permission) bool { return false } func checkSession(t *testing.T, sessions RoomSessions, sessionId string, roomSessionId string) Session { session := &DummySession{ publicId: sessionId, } if err := sessions.SetRoomSession(session, roomSessionId); err != nil { t.Fatalf("Expected no error, got %s", err) } if sid, err := sessions.GetSessionId(roomSessionId); err != nil { t.Errorf("Expected session id %s, got error %s", sessionId, err) } else if sid != sessionId { t.Errorf("Expected session id %s, got %s", sessionId, sid) } return session } func testRoomSessions(t *testing.T, sessions RoomSessions) { if sid, err := sessions.GetSessionId("unknown"); err != nil && err != ErrNoSuchRoomSession { t.Errorf("Expected error about invalid room session, got %s", err) } else if err == nil { t.Errorf("Expected error about invalid room session, got session id %s", sid) } s1 := checkSession(t, sessions, "session1", "room1") s2 := checkSession(t, sessions, "session2", "room2") if sid, err := sessions.GetSessionId("room1"); err != nil { t.Errorf("Expected session id %s, got error %s", s1.PublicId(), err) } else if sid != s1.PublicId() { t.Errorf("Expected session id %s, got %s", s1.PublicId(), sid) } sessions.DeleteRoomSession(s1) if sid, err := sessions.GetSessionId("room1"); err != nil && err != ErrNoSuchRoomSession { t.Errorf("Expected error about invalid room session, got %s", err) } else if err == nil { t.Errorf("Expected error about invalid room session, got session id %s", sid) } if sid, err := sessions.GetSessionId("room2"); err != nil { t.Errorf("Expected session id %s, got error %s", s2.PublicId(), err) } else if sid != s2.PublicId() { t.Errorf("Expected session id %s, got %s", s2.PublicId(), sid) } if err := sessions.SetRoomSession(s1, "room-session"); err != nil { t.Error(err) } if err := sessions.SetRoomSession(s2, "room-session"); err != nil { t.Error(err) } sessions.DeleteRoomSession(s1) if sid, err := sessions.GetSessionId("room-session"); err != nil { t.Errorf("Expected session id %s, got error %s", s2.PublicId(), err) } else if sid != s2.PublicId() { t.Errorf("Expected session id %s, got %s", s2.PublicId(), sid) } if err := sessions.SetRoomSession(s2, "room-session2"); err != nil { t.Error(err) } if sid, err := sessions.GetSessionId("room-session"); err == nil { t.Errorf("expected error %s, got sid %s", ErrNoSuchRoomSession, sid) } else if !errors.Is(err, ErrNoSuchRoomSession) { t.Errorf("expected %s, got %s", ErrNoSuchRoomSession, err) } if sid, err := sessions.GetSessionId("room-session2"); err != nil { t.Errorf("Expected session id %s, got error %s", s2.PublicId(), err) } else if sid != s2.PublicId() { t.Errorf("Expected session id %s, got %s", s2.PublicId(), sid) } } nextcloud-spreed-signaling-1.2.4/scripts/000077500000000000000000000000001460321600400204375ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/scripts/get-version.sh000077500000000000000000000013531460321600400232420ustar00rootroot00000000000000#!/usr/bin/env bash set -e ROOT="$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" VERSION= if [ -s "$ROOT/../version.txt" ]; then VERSION=$(tr -d '[:space:]' < "$ROOT/../version.txt") fi if [ -z "$VERSION" ] && [ -d "$ROOT/../.git" ]; then TAG=$(git tag --points-at HEAD | sed 's/v//') if [ "$1" == "--tar" ]; then VERSION=$(git describe --dirty --tags --always | sed 's/debian\///g') elif [ -n "$TAG" ]; then VERSION="$TAG" else VERSION=$(git log -1 --pretty=%H) fi if [ -f "/.dockerenv" ]; then VERSION="$VERSION~docker" elif grep -sq 'docker\|lxc' /proc/1/cgroup; then VERSION="$VERSION~docker" fi fi if [ -z "$VERSION" ]; then VERSION=unknown fi echo $VERSION nextcloud-spreed-signaling-1.2.4/scripts/get_continent_map.py000077500000000000000000000052451460321600400245170ustar00rootroot00000000000000#!/usr/bin/env python3 # # Standalone signaling server for the Nextcloud Spreed app. # Copyright (C) 2019 struktur AG # # @author Joachim Bauch # # @license GNU AGPL version 3 or any later version # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # try: # Fallback for Python2 from cStringIO import StringIO except ImportError: from io import StringIO import csv import subprocess import sys URL = 'https://github.com/datasets/country-codes/raw/master/data/country-codes.csv' def tostr(s): if isinstance(s, bytes) and not isinstance(s, str): s = s.decode('utf-8') return s try: unicode except NameError: # Python 3 files are returning bytes by default. def opentextfile(filename, mode): if 'b' in mode: mode = mode.replace('b', '') return open(filename, mode, encoding='utf-8') else: def opentextfile(filename, mode): return open(filename, mode) def generate_map(filename): data = subprocess.check_output([ 'curl', '-L', URL, ]) reader = csv.DictReader(StringIO(tostr(data)), delimiter=',') continents = {} for entry in reader: country = entry['ISO3166-1-Alpha-2'] continent = entry['Continent'] if not country and not continent: continue continents.setdefault(country, []).append(continent) out = StringIO() out.write('package signaling\n') out.write('\n') out.write('// This file has been automatically generated, do not modify.\n') out.write('// Source: %s\n' % (URL)) out.write('\n') out.write('var (\n') out.write('\tContinentMap = map[string][]string{\n') for country, continents in sorted(continents.items()): value = [] for continent in continents: value.append('"%s"' % (continent)) out.write('\t\t"%s": {%s},\n' % (country, ', '.join(value))) out.write('\t}\n') out.write(')\n') with opentextfile(filename, 'wb') as fp: fp.write(out.getvalue()) def main(): if len(sys.argv) != 2: sys.stderr.write('USAGE: %s \n' % (sys.argv[0])) sys.exit(1) filename = sys.argv[1] generate_map(filename) if __name__ == '__main__': main() nextcloud-spreed-signaling-1.2.4/scripts/log-simplifier.sh000066400000000000000000000055311460321600400237210ustar00rootroot00000000000000#!/usr/bin/env bash # @copyright Copyright (c) 2023, Joas Schilling # # @license GNU AGPL version 3 or any later version # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # # Creates a neutralized and simplified log from the HPB logs by: # 1. Replacing "user sessions" with "userX" # 2. Replacing "room sessions" with "sessionX" # # E.g. the following line: # May 26 13:31:36 server nextcloud-spreed-signaling[726]: clientsession.go:425: Session tooJzGsaUllvfGXdh3-74-yjAt-L9gCQKrs_U-DlLkZ8PTBabmJub3J6bEFGNEV6UnRZdEl2ZHpvUnJKMDNtSkFxZHBxZWR1X3VWUkJuZWllc2VUNko0RTFxLURaRXJyYjJ6ckc4dFBJRVVhT0lOLWR5RGtCM1R2MFpRemd3ZUFSLU9qWDZkR3FEQjR6MGt6c3p0VG92NmhqeEFEQkotN1JHM0lnbHl2ODRlbVRDaUlnQlhFQ3M1U1U2MDl4eWc4SU5MR0xjdTlUa0xaSmJqWGJ8Njk4NzAxNTg2MQ== joined room token123 with room session id P9roBo5O0EnRR8N4r+64MMdSHO2tu2ffNqjtICwSG43AHWL3XKn6XYv9xdYCgUYufxiCzIvg/QQk7cv8Uda1uhyDgh1FLPLCdjUe4uHJWKXb31rHig3gm+FdvOEO3GHEcKlJyPtSZzTupiatpanalRvMi6xR3jIXYoGcuvc//R2gzKFYNZQKwGdXXLMHNNHTlHPSAqIoYyj3vo5B+BeG9G1zo9Pq1WC3Akr2dghASkc+KJTHtpT3NbFBCAAH7jH # is converted to: # May 26 13:31:36 server nextcloud-spreed-signaling[726]: clientsession.go:425: Session User151 joined room token123 with room session id Session120 # # Afterwards the script also creates a file per user and session # LOG_CONTENT="`cat $1`" USER_SESSIONS=$(echo "$LOG_CONTENT" | egrep -o '[-a-zA-Z0-9_]{294,}==' | sort | uniq) NUM_USER_SESSIONS=$(echo "$USER_SESSIONS" | wc -l) echo "User sessions found: $NUM_USER_SESSIONS" for i in $(seq 1 $NUM_USER_SESSIONS); do SESSION_NAME=$(echo "$USER_SESSIONS" | head -n $i | tail -n 1) LOG_CONTENT=$(echo "${LOG_CONTENT//$SESSION_NAME/user$i}") done ROOM_SESSIONS=$(echo "$LOG_CONTENT" | egrep -o '[-a-zA-Z0-9_+\/]{255}( |$)' | sort | uniq) NUM_ROOM_SESSIONS=$(echo "$ROOM_SESSIONS" | wc -l) echo "Room sessions found: $NUM_ROOM_SESSIONS" for i in $(seq 1 $NUM_ROOM_SESSIONS); do SESSION_NAME=$(echo "$ROOM_SESSIONS" | head -n $i | tail -n 1) LOG_CONTENT=$(echo "${LOG_CONTENT//$SESSION_NAME/session$i}") done echo "$LOG_CONTENT" > simple.log for i in $(seq 1 $NUM_USER_SESSIONS); do echo "$LOG_CONTENT" | egrep "user$i( |$)" > user$i.log done for i in $(seq 1 $NUM_ROOM_SESSIONS); do echo "$LOG_CONTENT" | egrep "session$i( |$)" > session$i.log done nextcloud-spreed-signaling-1.2.4/scripts/pre-commit.hook000077500000000000000000000027261460321600400234070ustar00rootroot00000000000000#!/bin/sh # # Check that Go files have been formatted # for file in `git diff-index --cached --name-only HEAD --diff-filter=ACMR| grep "\.go$"` ; do echo "Checking ${file} ..." # nf is the temporary checkout. This makes sure we check against the # revision in the index (and not the checked out version). nf=`git checkout-index --temp "${file}" | cut -f 1` newfile=`mktemp "/tmp/${nf}.XXXXXX"` || exit 1 gofmt ${nf} > "${newfile}" 2>> /dev/null diff -u -p "${nf}" "${newfile}" r=$? rm "${newfile}" rm "${nf}" if [ $r != 0 ] ; then echo "=================================================================================================" echo " Code format error in: $file " echo " " echo " Please fix before committing. Don't forget to run git add before trying to commit again. " echo " If the whole file is to be committed, this should work (run from the top-level directory): " echo " " echo " go fmt $file; git add $file; git commit" echo " " echo "=================================================================================================" exit 1 fi done nextcloud-spreed-signaling-1.2.4/server.conf.in000066400000000000000000000271151460321600400215400ustar00rootroot00000000000000[http] # IP and port to listen on for HTTP requests. # Comment line to disable the listener. #listen = 127.0.0.1:8080 # HTTP socket read timeout in seconds. #readtimeout = 15 # HTTP socket write timeout in seconds. #writetimeout = 15 [https] # IP and port to listen on for HTTPS requests. # Comment line to disable the listener. #listen = 127.0.0.1:8443 # HTTPS socket read timeout in seconds. #readtimeout = 15 # HTTPS socket write timeout in seconds. #writetimeout = 15 # Certificate / private key to use for the HTTPS server. certificate = /etc/nginx/ssl/server.crt key = /etc/nginx/ssl/server.key [app] # Set to "true" to install pprof debug handlers. # See "https://golang.org/pkg/net/http/pprof/" for further information. debug = false # Set to "true" to allow subscribing any streams. This is insecure and should # only be enabled for testing. By default only streams of users in the same # room and call can be subscribed. #allowsubscribeany = false [sessions] # Secret value used to generate checksums of sessions. This should be a random # string of 32 or 64 bytes. hashkey = the-secret-for-session-checksums # Optional key for encrypting data in the sessions. Must be either 16, 24 or # 32 bytes. # If no key is specified, data will not be encrypted (not recommended). blockkey = -encryption-key- [clients] # Shared secret for connections from internal clients. This must be the same # value as configured in the respective internal services. internalsecret = the-shared-secret-for-internal-clients [backend] # Type of backend configuration. # Defaults to "static". # # Possible values: # - static: A comma-separated list of backends is given in the "backends" option. # - etcd: Backends are retrieved from an etcd cluster. #backendtype = static # For backend type "static": # Comma-separated list of backend ids from which clients are allowed to connect # from. Each backend will have isolated rooms, i.e. clients connecting to room # "abc12345" on backend 1 will be in a different room than clients connected to # a room with the same name on backend 2. Also sessions connected from different # backends will not be able to communicate with each other. #backends = backend-id, another-backend # For backend type "etcd": # Key prefix of backend entries. All keys below will be watched and assumed to # contain a JSON document with the following entries: # - "url": Url of the Nextcloud instance. # - "secret": Shared secret for requests from and to the backend servers. # # Additional optional entries: # - "maxstreambitrate": Maximum bitrate per publishing stream (in bits per second). # - "maxscreenbitrate": Maximum bitrate per screensharing stream (in bits per second). # - "sessionlimit": Number of sessions that are allowed to connect. # # Example: # "/signaling/backend/one" -> {"url": "https://nextcloud.domain1.invalid", ...} # "/signaling/backend/two" -> {"url": "https://domain2.invalid/nextcloud", ...} #backendprefix = /signaling/backend # Allow any hostname as backend endpoint. This is extremely insecure and should # only be used while running the benchmark client against the server. allowall = false # Common shared secret for requests from and to the backend servers. Used if # "allowall" is enabled or as fallback for individual backends that don't have # their own secret set. # This must be the same value as configured in the Nextcloud admin ui. #secret = the-shared-secret-for-allowall # Timeout in seconds for requests to the backend. timeout = 10 # Maximum number of concurrent backend connections per host. connectionsperhost = 8 # If set to "true", certificate validation of backend endpoints will be skipped. # This should only be enabled during development, e.g. to work with self-signed # certificates. #skipverify = false # For backendtype "static": # Backend configurations as defined in the "[backend]" section above. The # section names must match the ids used in "backends" above. #[backend-id] # URL of the Nextcloud instance #url = https://cloud.domain.invalid # Shared secret for requests from and to the backend servers. Leave empty to use # the common shared secret from above. # This must be the same value as configured in the Nextcloud admin ui. #secret = the-shared-secret # Limit the number of sessions that are allowed to connect to this backend. # Omit or set to 0 to not limit the number of sessions. #sessionlimit = 10 # The maximum bitrate per publishing stream (in bits per second). # Defaults to the maximum bitrate configured for the proxy / MCU. #maxstreambitrate = 1048576 # The maximum bitrate per screensharing stream (in bits per second). # Defaults to the maximum bitrate configured for the proxy / MCU. #maxscreenbitrate = 2097152 #[another-backend] # URL of the Nextcloud instance #url = https://cloud.otherdomain.invalid # Shared secret for requests from and to the backend servers. Leave empty to use # the common shared secret from above. # This must be the same value as configured in the Nextcloud admin ui. #secret = the-shared-secret [nats] # Url of NATS backend to use. This can also be a list of URLs to connect to # multiple backends. For local development, this can be set to "nats://loopback" # to process NATS messages internally instead of sending them through an # external NATS backend. #url = nats://localhost:4222 [mcu] # The type of the MCU to use. Currently only "janus" and "proxy" are supported. # Leave empty to disable MCU functionality. #type = # For type "janus": the URL to the websocket endpoint of the MCU server. # For type "proxy": a space-separated list of proxy URLs to connect to. #url = # The maximum bitrate per publishing stream (in bits per second). # Defaults to 1 mbit/sec. # For type "proxy": will be capped to the maximum bitrate configured at the # proxy server that is used. #maxstreambitrate = 1048576 # The maximum bitrate per screensharing stream (in bits per second). # Default is 2 mbit/sec. # For type "proxy": will be capped to the maximum bitrate configured at the # proxy server that is used. #maxscreenbitrate = 2097152 # For type "proxy": timeout in seconds for requests to the proxy server. #proxytimeout = 2 # For type "proxy": type of URL configuration for proxy servers. # Defaults to "static". # # Possible values: # - static: A space-separated list of proxy URLs is given in the "url" option. # - etcd: Proxy URLs are retrieved from an etcd cluster (see below). #urltype = static # If set to "true", certificate validation of proxy servers will be skipped. # This should only be enabled during development, e.g. to work with self-signed # certificates. #skipverify = false # For type "proxy": the id of the token to use when connecting to proxy servers. #token_id = server1 # For type "proxy": the private key for the configured token id to use when # connecting to proxy servers. #token_key = privkey.pem # For url type "static": Enable DNS discovery on hostname of configured URL. # If the hostname resolves to multiple IP addresses, a connection is established # to each of them. # Changes to the DNS are monitored regularly and proxy connections are created # or deleted as necessary. #dnsdiscovery = true # For url type "etcd": Key prefix of MCU proxy entries. All keys below will be # watched and assumed to contain a JSON document. The entry "address" from this # document will be used as proxy URL, other contents in the document will be # ignored. # # Example: # "/signaling/proxy/server/one" -> {"address": "https://proxy1.domain.invalid"} # "/signaling/proxy/server/two" -> {"address": "https://proxy2.domain.invalid"} #keyprefix = /signaling/proxy/server [turn] # API key that the MCU will need to send when requesting TURN credentials. #apikey = the-api-key-for-the-rest-service # The shared secret to use for generating TURN credentials. This must be the # same as on the TURN server. #secret = 6d1c17a7-c736-4e22-b02c-e2955b7ecc64 # A comma-separated list of TURN servers to use. Leave empty to disable the # TURN REST API. #servers = turn:1.2.3.4:9991?transport=udp,turn:1.2.3.4:9991?transport=tcp [geoip] # License key to use when downloading the MaxMind GeoIP database. You can # register an account at "https://www.maxmind.com/en/geolite2/signup" for # free. See "https://dev.maxmind.com/geoip/geoip2/geolite2/" for further # information. # You can also get a free GeoIP database from https://db-ip.com/ without # registration. Provide the URL below in this case. # Leave empty to disable GeoIP lookups. #license = # Optional URL to download a MaxMind GeoIP database from. Will be generated if # "license" is provided above. Can be a "file://" url if a local file should # be used. Please note that the database must provide a country field when # looking up IP addresses. #url = [geoip-overrides] # Optional overrides for GeoIP lookups. The key is an IP address / range, the # value the associated country code. #127.0.0.1 = DE #192.168.0.0/24 = DE [continent-overrides] # Optional overrides for continent mappings. The key is a continent code, the # value a comma-separated list of continent codes to map the continent to. # Use European servers for clients in Africa. #AF = EU # Use servers in North Africa for clients in South America. #SA = NA [stats] # Comma-separated list of IP addresses that are allowed to access the stats # endpoint. Leave empty (or commented) to only allow access from "127.0.0.1". #allowed_ips = [etcd] # Comma-separated list of static etcd endpoints to connect to. #endpoints = 127.0.0.1:2379,127.0.0.1:22379,127.0.0.1:32379 # Options to perform endpoint discovery through DNS SRV. # Only used if no endpoints are configured manually. #discoverysrv = example.com #discoveryservice = foo # Path to private key, client certificate and CA certificate if TLS # authentication should be used. #clientkey = /path/to/etcd-client.key #clientcert = /path/to/etcd-client.crt #cacert = /path/to/etcd-ca.crt [grpc] # IP and port to listen on for GRPC requests. # Comment line to disable the listener. #listen = 0.0.0.0:9090 # Certificate / private key to use for the GRPC server. # Omit to use unencrypted connections. #servercertificate = /path/to/grpc-server.crt #serverkey = /path/to/grpc-server.key # CA certificate that is allowed to issue certificates of GRPC servers. # Omit to expect unencrypted connections. #serverca = /path/to/grpc-ca.crt # Certificate / private key to use for the GRPC client. # Omit if clients don't need to authenticate on the server. #clientcertificate = /path/to/grpc-client.crt #clientkey = /path/to/grpc-client.key # CA certificate that is allowed to issue certificates of GRPC clients. # Omit to allow any clients to connect. #clientca = /path/to/grpc-ca.crt # Type of GRPC target configuration. # Defaults to "static". # # Possible values: # - static: A comma-separated list of targets is given in the "targets" option. # - etcd: Target URLs are retrieved from an etcd cluster. #targettype = static # For target type "static": Comma-separated list of GRPC targets to connect to # for clustering mode. #targets = 192.168.0.1:9090, 192.168.0.2:9090 # For target type "static": Enable DNS discovery on hostnames of GRPC target. # If a hostname resolves to multiple IP addresses, a connection is established # to each of them. # Changes to the DNS are monitored regularly and GRPC clients are created or # deleted as necessary. #dnsdiscovery = true # For target type "etcd": Key prefix of GRPC target entries. All keys below will # be watched and assumed to contain a JSON document. The entry "address" from # this document will be used as target URL, other contents in the document will # be ignored. # # Example: # "/signaling/cluster/grpc/one" -> {"address": "192.168.0.1:9090"} # "/signaling/cluster/grpc/two" -> {"address": "192.168.0.2:9090"} #targetprefix = /signaling/cluster/grpc nextcloud-spreed-signaling-1.2.4/server/000077500000000000000000000000001460321600400202565ustar00rootroot00000000000000nextcloud-spreed-signaling-1.2.4/server/main.go000066400000000000000000000246521460321600400215420ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package main import ( "crypto/tls" "flag" "fmt" "log" "net" "net/http" "net/http/pprof" "os" "os/signal" "runtime" runtimepprof "runtime/pprof" "strings" "syscall" "time" "github.com/dlintw/goconf" "github.com/gorilla/mux" "github.com/nats-io/nats.go" signaling "github.com/strukturag/nextcloud-spreed-signaling" ) var ( version = "unreleased" configFlag = flag.String("config", "server.conf", "config file to use") cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file") memprofile = flag.String("memprofile", "", "write memory profile to file") showVersion = flag.Bool("version", false, "show version and quit") ) const ( defaultReadTimeout = 15 defaultWriteTimeout = 15 initialMcuRetry = time.Second maxMcuRetry = time.Second * 16 dnsMonitorInterval = time.Second ) func createListener(addr string) (net.Listener, error) { if addr[0] == '/' { os.Remove(addr) return net.Listen("unix", addr) } return net.Listen("tcp", addr) } func createTLSListener(addr string, certFile, keyFile string) (net.Listener, error) { cert, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { return nil, err } config := tls.Config{ Certificates: []tls.Certificate{cert}, } if addr[0] == '/' { os.Remove(addr) return tls.Listen("unix", addr, &config) } return tls.Listen("tcp", addr, &config) } func main() { log.SetFlags(log.Lshortfile) flag.Parse() if *showVersion { fmt.Printf("nextcloud-spreed-signaling version %s/%s\n", version, runtime.Version()) os.Exit(0) } sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, os.Interrupt) signal.Notify(sigChan, syscall.SIGHUP) if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { log.Fatal(err) } if err := runtimepprof.StartCPUProfile(f); err != nil { log.Fatalf("Error writing CPU profile to %s: %s", *cpuprofile, err) } log.Printf("Writing CPU profile to %s ...", *cpuprofile) defer runtimepprof.StopCPUProfile() } if *memprofile != "" { f, err := os.Create(*memprofile) if err != nil { log.Fatal(err) } defer func() { log.Printf("Writing Memory profile to %s ...", *memprofile) runtime.GC() if err := runtimepprof.WriteHeapProfile(f); err != nil { log.Printf("Error writing Memory profile to %s: %s", *memprofile, err) } }() } log.Printf("Starting up version %s/%s as pid %d", version, runtime.Version(), os.Getpid()) config, err := goconf.ReadConfigFile(*configFlag) if err != nil { log.Fatal("Could not read configuration: ", err) } cpus := runtime.NumCPU() runtime.GOMAXPROCS(cpus) log.Printf("Using a maximum of %d CPUs", cpus) signaling.RegisterStats() natsUrl, _ := config.GetString("nats", "url") if natsUrl == "" { natsUrl = nats.DefaultURL } events, err := signaling.NewAsyncEvents(natsUrl) if err != nil { log.Fatal("Could not create async events client: ", err) } defer events.Close() dnsMonitor, err := signaling.NewDnsMonitor(dnsMonitorInterval) if err != nil { log.Fatal("Could not create DNS monitor: ", err) } if err := dnsMonitor.Start(); err != nil { log.Fatal("Could not start DNS monitor: ", err) } defer dnsMonitor.Stop() etcdClient, err := signaling.NewEtcdClient(config, "mcu") if err != nil { log.Fatalf("Could not create etcd client: %s", err) } defer func() { if err := etcdClient.Close(); err != nil { log.Printf("Error while closing etcd client: %s", err) } }() rpcServer, err := signaling.NewGrpcServer(config) if err != nil { log.Fatalf("Could not create RPC server: %s", err) } go func() { if err := rpcServer.Run(); err != nil { log.Fatalf("Could not start RPC server: %s", err) } }() defer rpcServer.Close() rpcClients, err := signaling.NewGrpcClients(config, etcdClient, dnsMonitor) if err != nil { log.Fatalf("Could not create RPC clients: %s", err) } defer rpcClients.Close() r := mux.NewRouter() hub, err := signaling.NewHub(config, events, rpcServer, rpcClients, etcdClient, r, version) if err != nil { log.Fatal("Could not create hub: ", err) } mcuUrl, _ := config.GetString("mcu", "url") mcuType, _ := config.GetString("mcu", "type") if mcuType == "" && mcuUrl != "" { log.Printf("WARNING: Old-style MCU configuration detected with url but no type, defaulting to type %s", signaling.McuTypeJanus) mcuType = signaling.McuTypeJanus } else if mcuType == signaling.McuTypeJanus && mcuUrl == "" { log.Printf("WARNING: Old-style MCU configuration detected with type but no url, disabling") mcuType = "" } if mcuType != "" { var mcu signaling.Mcu mcuRetry := initialMcuRetry mcuRetryTimer := time.NewTimer(mcuRetry) mcuTypeLoop: for { switch mcuType { case signaling.McuTypeJanus: mcu, err = signaling.NewMcuJanus(mcuUrl, config) signaling.UnregisterProxyMcuStats() signaling.RegisterJanusMcuStats() case signaling.McuTypeProxy: mcu, err = signaling.NewMcuProxy(config, etcdClient, rpcClients, dnsMonitor) signaling.UnregisterJanusMcuStats() signaling.RegisterProxyMcuStats() default: log.Fatal("Unsupported MCU type: ", mcuType) } if err == nil { err = mcu.Start() if err != nil { log.Printf("Could not create %s MCU: %s", mcuType, err) } } if err == nil { break } log.Printf("Could not initialize %s MCU (%s) will retry in %s", mcuType, err, mcuRetry) mcuRetryTimer.Reset(mcuRetry) select { case sig := <-sigChan: switch sig { case os.Interrupt: log.Fatalf("Cancelled") case syscall.SIGHUP: log.Printf("Received SIGHUP, reloading %s", *configFlag) if config, err = goconf.ReadConfigFile(*configFlag); err != nil { log.Printf("Could not read configuration from %s: %s", *configFlag, err) } else { mcuUrl, _ = config.GetString("mcu", "url") mcuType, _ = config.GetString("mcu", "type") if mcuType == "" && mcuUrl != "" { log.Printf("WARNING: Old-style MCU configuration detected with url but no type, defaulting to type %s", signaling.McuTypeJanus) mcuType = signaling.McuTypeJanus } else if mcuType == signaling.McuTypeJanus && mcuUrl == "" { log.Printf("WARNING: Old-style MCU configuration detected with type but no url, disabling") mcuType = "" break mcuTypeLoop } } } case <-mcuRetryTimer.C: // Retry connection mcuRetry = mcuRetry * 2 if mcuRetry > maxMcuRetry { mcuRetry = maxMcuRetry } } } if mcu != nil { defer mcu.Stop() log.Printf("Using %s MCU", mcuType) hub.SetMcu(mcu) } } go hub.Run() defer hub.Stop() server, err := signaling.NewBackendServer(config, hub, version) if err != nil { log.Fatal("Could not create backend server: ", err) } if err := server.Start(r); err != nil { log.Fatal("Could not start backend server: ", err) } if debug, _ := config.GetBool("app", "debug"); debug { log.Println("Installing debug handlers in \"/debug/pprof\"") r.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index)) r.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) r.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) r.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) r.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) for _, profile := range runtimepprof.Profiles() { name := profile.Name() r.Handle("/debug/pprof/"+name, pprof.Handler(name)) } } if saddr, _ := config.GetString("https", "listen"); saddr != "" { cert, _ := config.GetString("https", "certificate") key, _ := config.GetString("https", "key") if cert == "" || key == "" { log.Fatal("Need a certificate and key for the HTTPS listener") } readTimeout, _ := config.GetInt("https", "readtimeout") if readTimeout <= 0 { readTimeout = defaultReadTimeout } writeTimeout, _ := config.GetInt("https", "writetimeout") if writeTimeout <= 0 { writeTimeout = defaultWriteTimeout } for _, address := range strings.Split(saddr, " ") { go func(address string) { log.Println("Listening on", address) listener, err := createTLSListener(address, cert, key) if err != nil { log.Fatal("Could not start listening: ", err) } srv := &http.Server{ Handler: r, ReadTimeout: time.Duration(readTimeout) * time.Second, WriteTimeout: time.Duration(writeTimeout) * time.Second, } if err := srv.Serve(listener); err != nil { log.Fatal("Could not start server: ", err) } }(address) } } if addr, _ := config.GetString("http", "listen"); addr != "" { readTimeout, _ := config.GetInt("http", "readtimeout") if readTimeout <= 0 { readTimeout = defaultReadTimeout } writeTimeout, _ := config.GetInt("http", "writetimeout") if writeTimeout <= 0 { writeTimeout = defaultWriteTimeout } for _, address := range strings.Split(addr, " ") { go func(address string) { log.Println("Listening on", address) listener, err := createListener(address) if err != nil { log.Fatal("Could not start listening: ", err) } srv := &http.Server{ Handler: r, Addr: addr, ReadTimeout: time.Duration(readTimeout) * time.Second, WriteTimeout: time.Duration(writeTimeout) * time.Second, } if err := srv.Serve(listener); err != nil { log.Fatal("Could not start server: ", err) } }(address) } } loop: for sig := range sigChan { switch sig { case os.Interrupt: log.Println("Interrupted") break loop case syscall.SIGHUP: log.Printf("Received SIGHUP, reloading %s", *configFlag) if config, err := goconf.ReadConfigFile(*configFlag); err != nil { log.Printf("Could not read configuration from %s: %s", *configFlag, err) } else { hub.Reload(config) } } } } nextcloud-spreed-signaling-1.2.4/session.go000066400000000000000000000041741460321600400207700ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2019 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "encoding/json" "net/url" "time" ) type Permission string var ( PERMISSION_MAY_PUBLISH_MEDIA Permission = "publish-media" PERMISSION_MAY_PUBLISH_AUDIO Permission = "publish-audio" PERMISSION_MAY_PUBLISH_VIDEO Permission = "publish-video" PERMISSION_MAY_PUBLISH_SCREEN Permission = "publish-screen" PERMISSION_MAY_CONTROL Permission = "control" PERMISSION_TRANSIENT_DATA Permission = "transient-data" PERMISSION_HIDE_DISPLAYNAMES Permission = "hide-displaynames" // DefaultPermissionOverrides contains permission overrides for users where // no permissions have been set by the server. If a permission is not set in // this map, it's assumed the user has that permission. DefaultPermissionOverrides = map[Permission]bool{ PERMISSION_HIDE_DISPLAYNAMES: false, } ) type SessionIdData struct { Sid uint64 Created time.Time BackendId string } type Session interface { PrivateId() string PublicId() string ClientType() string Data() *SessionIdData UserId() string UserData() *json.RawMessage Backend() *Backend BackendUrl() string ParsedBackendUrl() *url.URL SetRoom(room *Room) GetRoom() *Room LeaveRoom(notify bool) *Room IsExpired(now time.Time) bool Close() HasPermission(permission Permission) bool } nextcloud-spreed-signaling-1.2.4/session_test.go000066400000000000000000000025521460321600400220250ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2019 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "testing" ) func assertSessionHasPermission(t *testing.T, session Session, permission Permission) { t.Helper() if !session.HasPermission(permission) { t.Errorf("Session %s doesn't have permission %s", session.PublicId(), permission) } } func assertSessionHasNotPermission(t *testing.T, session Session, permission Permission) { t.Helper() if session.HasPermission(permission) { t.Errorf("Session %s has permission %s but shouldn't", session.PublicId(), permission) } } nextcloud-spreed-signaling-1.2.4/single_notifier.go000066400000000000000000000044701460321600400224640ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "sync" ) type SingleWaiter struct { root bool ch chan struct{} once sync.Once } func newSingleWaiter() *SingleWaiter { return &SingleWaiter{ root: true, ch: make(chan struct{}), } } func (w *SingleWaiter) subWaiter() *SingleWaiter { return &SingleWaiter{ ch: w.ch, } } func (w *SingleWaiter) Wait(ctx context.Context) error { select { case <-w.ch: return nil case <-ctx.Done(): return ctx.Err() } } func (w *SingleWaiter) cancel() { if !w.root { return } w.once.Do(func() { close(w.ch) }) } type SingleNotifier struct { sync.Mutex waiter *SingleWaiter waiters map[*SingleWaiter]bool } func (n *SingleNotifier) NewWaiter() *SingleWaiter { n.Lock() defer n.Unlock() if n.waiter == nil { n.waiter = newSingleWaiter() } if n.waiters == nil { n.waiters = make(map[*SingleWaiter]bool) } w := n.waiter.subWaiter() n.waiters[w] = true return w } func (n *SingleNotifier) Reset() { n.Lock() defer n.Unlock() if n.waiter != nil { n.waiter.cancel() n.waiter = nil } n.waiters = nil } func (n *SingleNotifier) Release(w *SingleWaiter) { n.Lock() defer n.Unlock() if _, found := n.waiters[w]; found { delete(n.waiters, w) if len(n.waiters) == 0 { n.waiters = nil if n.waiter != nil { n.waiter.cancel() n.waiter = nil } } } } func (n *SingleNotifier) Notify() { n.Lock() defer n.Unlock() if n.waiter != nil { n.waiter.cancel() } n.waiters = nil } nextcloud-spreed-signaling-1.2.4/single_notifier_test.go000066400000000000000000000064621460321600400235260ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "sync" "testing" "time" ) func TestSingleNotifierNoWaiter(t *testing.T) { var notifier SingleNotifier // Notifications can be sent even if no waiter exists. notifier.Notify() } func TestSingleNotifierSimple(t *testing.T) { var notifier SingleNotifier var wg sync.WaitGroup wg.Add(1) waiter := notifier.NewWaiter() defer notifier.Release(waiter) go func() { defer wg.Done() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if err := waiter.Wait(ctx); err != nil { t.Error(err) } }() notifier.Notify() wg.Wait() } func TestSingleNotifierMultiNotify(t *testing.T) { var notifier SingleNotifier waiter := notifier.NewWaiter() defer notifier.Release(waiter) notifier.Notify() // The second notification will be ignored while the first is still pending. notifier.Notify() } func TestSingleNotifierWaitClosed(t *testing.T) { var notifier SingleNotifier waiter := notifier.NewWaiter() notifier.Release(waiter) if err := waiter.Wait(context.Background()); err != nil { t.Error(err) } } func TestSingleNotifierWaitClosedMulti(t *testing.T) { var notifier SingleNotifier waiter1 := notifier.NewWaiter() waiter2 := notifier.NewWaiter() notifier.Release(waiter1) notifier.Release(waiter2) if err := waiter1.Wait(context.Background()); err != nil { t.Error(err) } if err := waiter2.Wait(context.Background()); err != nil { t.Error(err) } } func TestSingleNotifierResetWillNotify(t *testing.T) { var notifier SingleNotifier var wg sync.WaitGroup wg.Add(1) waiter := notifier.NewWaiter() defer notifier.Release(waiter) go func() { defer wg.Done() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if err := waiter.Wait(ctx); err != nil { t.Error(err) } }() notifier.Reset() wg.Wait() } func TestSingleNotifierDuplicate(t *testing.T) { var notifier SingleNotifier var wgStart sync.WaitGroup var wgEnd sync.WaitGroup for i := 0; i < 2; i++ { wgStart.Add(1) wgEnd.Add(1) go func() { defer wgEnd.Done() waiter := notifier.NewWaiter() defer notifier.Release(waiter) // Goroutine has created the waiter and is ready. wgStart.Done() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if err := waiter.Wait(ctx); err != nil { t.Error(err) } }() } wgStart.Wait() time.Sleep(100 * time.Millisecond) notifier.Notify() wgEnd.Wait() } nextcloud-spreed-signaling-1.2.4/stats_prometheus.go000066400000000000000000000031671460321600400227170ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "github.com/prometheus/client_golang/prometheus" ) var ( statsMessagesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "signaling", Subsystem: "server", Name: "messages_total", Help: "The total number of signaling messages", }, []string{"type"}) signalingStats = []prometheus.Collector{ statsMessagesTotal, } ) func registerAll(cs ...prometheus.Collector) { for _, c := range cs { if err := prometheus.DefaultRegisterer.Register(c); err != nil { if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { panic(err) } } } } func unregisterAll(cs ...prometheus.Collector) { for _, c := range cs { prometheus.Unregister(c) } } func RegisterStats() { registerAll(signalingStats...) } nextcloud-spreed-signaling-1.2.4/stats_prometheus_test.go000066400000000000000000000041461460321600400237540ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "fmt" "runtime" "strings" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" ) func checkStatsValue(t *testing.T, collector prometheus.Collector, value float64) { ch := make(chan *prometheus.Desc, 1) collector.Describe(ch) desc := <-ch v := testutil.ToFloat64(collector) if v != value { pc := make([]uintptr, 10) n := runtime.Callers(2, pc) if n == 0 { t.Errorf("Expected value %f for %s, got %f", value, desc, v) return } pc = pc[:n] frames := runtime.CallersFrames(pc) stack := "" for { frame, more := frames.Next() if !strings.Contains(frame.File, "nextcloud-spreed-signaling") { break } stack += fmt.Sprintf("%s:%d\n", frame.File, frame.Line) if !more { break } } t.Errorf("Expected value %f for %s, got %f at\n%s", value, desc, v, stack) } } func collectAndLint(t *testing.T, collectors ...prometheus.Collector) { for _, collector := range collectors { problems, err := testutil.CollectAndLint(collector) if err != nil { t.Errorf("Error linting %+v: %s", collector, err) continue } for _, problem := range problems { t.Errorf("Problem with %s: %s", problem.Metric, problem.Text) } } } nextcloud-spreed-signaling-1.2.4/syscallconn.go000066400000000000000000000034111460321600400216260ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package signaling import ( "net" "syscall" ) type sysConn = syscall.Conn // syscallConn keeps reference of rawConn to support syscall.Conn for channelz. // SyscallConn() (the method in interface syscall.Conn) is explicitly // implemented on this type, // // Interface syscall.Conn is implemented by most net.Conn implementations (e.g. // TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns // that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn // doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't // help here). type syscallConn struct { net.Conn // sysConn is a type alias of syscall.Conn. It's necessary because the name // `Conn` collides with `net.Conn`. sysConn } // WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that // implements syscall.Conn. rawConn will be used to support syscall, and newConn // will be used for read/write. // // This function returns newConn if rawConn doesn't implement syscall.Conn. func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { sc, ok := rawConn.(sysConn) if !ok { return newConn } return &syscallConn{ Conn: newConn, sysConn: sc, } } nextcloud-spreed-signaling-1.2.4/testclient_test.go000066400000000000000000000777001460321600400225270ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2017 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "bytes" "context" "crypto/hmac" "crypto/sha256" "encoding/hex" "encoding/json" "fmt" "net" "net/http/httptest" "reflect" "strconv" "strings" "sync" "testing" "time" "github.com/golang-jwt/jwt/v4" "github.com/gorilla/websocket" ) var ( testBackendSecret = []byte("secret") testInternalSecret = []byte("internal-secret") ErrNoMessageReceived = fmt.Errorf("no message was received by the server") ) type TestBackendClientAuthParams struct { UserId string `json:"userid"` } func getWebsocketUrl(url string) string { if strings.HasPrefix(url, "http://") { return "ws://" + url[7:] + "/spreed" } else if strings.HasPrefix(url, "https://") { return "wss://" + url[8:] + "/spreed" } else { panic("Unsupported URL: " + url) } } func getPubliceSessionIdData(h *Hub, publicId string) *SessionIdData { decodedPublic := h.decodeSessionId(publicId, publicSessionName) if decodedPublic == nil { panic("invalid public session id") } return decodedPublic } func checkUnexpectedClose(err error) error { if err != nil && websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway, websocket.CloseNoStatusReceived) { return fmt.Errorf("Connection was closed with unexpected error: %s", err) } return nil } func checkMessageType(message *ServerMessage, expectedType string) error { if message == nil { return ErrNoMessageReceived } if message.Type != expectedType { return fmt.Errorf("Expected \"%s\" message, got %+v", expectedType, message) } switch message.Type { case "hello": if message.Hello == nil { return fmt.Errorf("Expected \"%s\" message, got %+v", expectedType, message) } case "message": if message.Message == nil { return fmt.Errorf("Expected \"%s\" message, got %+v", expectedType, message) } else if message.Message.Data == nil || len(*message.Message.Data) == 0 { return fmt.Errorf("Received message without data") } case "room": if message.Room == nil { return fmt.Errorf("Expected \"%s\" message, got %+v", expectedType, message) } case "event": if message.Event == nil { return fmt.Errorf("Expected \"%s\" message, got %+v", expectedType, message) } case "transient": if message.TransientData == nil { return fmt.Errorf("Expected \"%s\" message, got %+v", expectedType, message) } } return nil } func checkMessageSender(hub *Hub, sender *MessageServerMessageSender, senderType string, hello *HelloServerMessage) error { if sender.Type != senderType { return fmt.Errorf("Expected sender type %s, got %s", senderType, sender.SessionId) } else if sender.SessionId != hello.SessionId { return fmt.Errorf("Expected session id %+v, got %+v", getPubliceSessionIdData(hub, hello.SessionId), getPubliceSessionIdData(hub, sender.SessionId)) } else if sender.UserId != hello.UserId { return fmt.Errorf("Expected user id %s, got %s", hello.UserId, sender.UserId) } return nil } func checkReceiveClientMessageWithSenderAndRecipient(ctx context.Context, client *TestClient, senderType string, hello *HelloServerMessage, payload interface{}, sender **MessageServerMessageSender, recipient **MessageClientMessageRecipient) error { message, err := client.RunUntilMessage(ctx) if err := checkUnexpectedClose(err); err != nil { return err } else if err := checkMessageType(message, "message"); err != nil { return err } else if err := checkMessageSender(client.hub, message.Message.Sender, senderType, hello); err != nil { return err } else { if err := json.Unmarshal(*message.Message.Data, payload); err != nil { return err } } if sender != nil { *sender = message.Message.Sender } if recipient != nil { *recipient = message.Message.Recipient } return nil } func checkReceiveClientMessageWithSender(ctx context.Context, client *TestClient, senderType string, hello *HelloServerMessage, payload interface{}, sender **MessageServerMessageSender) error { return checkReceiveClientMessageWithSenderAndRecipient(ctx, client, senderType, hello, payload, sender, nil) } func checkReceiveClientMessage(ctx context.Context, client *TestClient, senderType string, hello *HelloServerMessage, payload interface{}) error { return checkReceiveClientMessageWithSenderAndRecipient(ctx, client, senderType, hello, payload, nil, nil) } func checkReceiveClientControlWithSenderAndRecipient(ctx context.Context, client *TestClient, senderType string, hello *HelloServerMessage, payload interface{}, sender **MessageServerMessageSender, recipient **MessageClientMessageRecipient) error { message, err := client.RunUntilMessage(ctx) if err := checkUnexpectedClose(err); err != nil { return err } else if err := checkMessageType(message, "control"); err != nil { return err } else if err := checkMessageSender(client.hub, message.Control.Sender, senderType, hello); err != nil { return err } else { if err := json.Unmarshal(*message.Control.Data, payload); err != nil { return err } } if sender != nil { *sender = message.Control.Sender } if recipient != nil { *recipient = message.Control.Recipient } return nil } func checkReceiveClientControlWithSender(ctx context.Context, client *TestClient, senderType string, hello *HelloServerMessage, payload interface{}, sender **MessageServerMessageSender) error { // nolint return checkReceiveClientControlWithSenderAndRecipient(ctx, client, senderType, hello, payload, sender, nil) } func checkReceiveClientControl(ctx context.Context, client *TestClient, senderType string, hello *HelloServerMessage, payload interface{}) error { return checkReceiveClientControlWithSenderAndRecipient(ctx, client, senderType, hello, payload, nil, nil) } func checkReceiveClientEvent(ctx context.Context, client *TestClient, eventType string, msg **EventServerMessage) error { message, err := client.RunUntilMessage(ctx) if err := checkUnexpectedClose(err); err != nil { return err } else if err := checkMessageType(message, "event"); err != nil { return err } else if message.Event.Type != eventType { return fmt.Errorf("Expected \"%s\" event type, got \"%s\"", eventType, message.Event.Type) } else { if msg != nil { *msg = message.Event } } return nil } type TestClient struct { t *testing.T hub *Hub server *httptest.Server mu sync.Mutex conn *websocket.Conn localAddr net.Addr messageChan chan []byte readErrorChan chan error publicId string } func NewTestClientContext(ctx context.Context, t *testing.T, server *httptest.Server, hub *Hub) *TestClient { // Reference "hub" to prevent compiler error. conn, _, err := websocket.DefaultDialer.DialContext(ctx, getWebsocketUrl(server.URL), nil) if err != nil { t.Fatal(err) } messageChan := make(chan []byte) readErrorChan := make(chan error, 1) go func() { for { messageType, data, err := conn.ReadMessage() if err != nil { readErrorChan <- err return } else if messageType != websocket.TextMessage { t.Errorf("Expect text message, got %d", messageType) return } messageChan <- data } }() return &TestClient{ t: t, hub: hub, server: server, conn: conn, localAddr: conn.LocalAddr(), messageChan: messageChan, readErrorChan: readErrorChan, } } func NewTestClient(t *testing.T, server *httptest.Server, hub *Hub) *TestClient { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() client := NewTestClientContext(ctx, t, server, hub) msg, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } if msg.Type != "welcome" { t.Errorf("Expected welcome message, got %+v", msg) } return client } func (c *TestClient) CloseWithBye() { c.SendBye() // nolint c.Close() } func (c *TestClient) Close() { c.mu.Lock() defer c.mu.Unlock() if err := c.conn.WriteMessage(websocket.CloseMessage, []byte{}); err == websocket.ErrCloseSent { // Already closed return } // Wait a bit for close message to be processed. time.Sleep(100 * time.Millisecond) c.conn.Close() // Drain any entries in the channels to terminate the read goroutine. loop: for { select { case <-c.readErrorChan: case <-c.messageChan: default: break loop } } } func (c *TestClient) WaitForClientRemoved(ctx context.Context) error { c.hub.mu.Lock() defer c.hub.mu.Unlock() for { found := false for _, client := range c.hub.clients { client.mu.Lock() conn := client.conn client.mu.Unlock() if conn != nil && conn.RemoteAddr().String() == c.localAddr.String() { found = true break } } if !found { break } c.hub.mu.Unlock() select { case <-ctx.Done(): c.hub.mu.Lock() return ctx.Err() default: time.Sleep(time.Millisecond) } c.hub.mu.Lock() } return nil } func (c *TestClient) WaitForSessionRemoved(ctx context.Context, sessionId string) error { data := c.hub.decodeSessionId(sessionId, publicSessionName) if data == nil { return fmt.Errorf("Invalid session id passed") } c.hub.mu.Lock() defer c.hub.mu.Unlock() for { _, found := c.hub.sessions[data.Sid] if !found { break } c.hub.mu.Unlock() select { case <-ctx.Done(): c.hub.mu.Lock() return ctx.Err() default: time.Sleep(time.Millisecond) } c.hub.mu.Lock() } return nil } func (c *TestClient) WriteJSON(data interface{}) error { if !strings.Contains(c.t.Name(), "HelloUnsupportedVersion") { if msg, ok := data.(*ClientMessage); ok { if err := msg.CheckValid(); err != nil { return err } } } c.mu.Lock() defer c.mu.Unlock() return c.conn.WriteJSON(data) } func (c *TestClient) EnsuerWriteJSON(data interface{}) { if err := c.WriteJSON(data); err != nil { c.t.Fatalf("Could not write JSON %+v: %s", data, err) } } func (c *TestClient) SendHello(userid string) error { return c.SendHelloV1(userid) } func (c *TestClient) SendHelloV1(userid string) error { params := TestBackendClientAuthParams{ UserId: userid, } return c.SendHelloParams(c.server.URL, HelloVersionV1, "", nil, params) } func (c *TestClient) SendHelloV2(userid string) error { now := time.Now() return c.SendHelloV2WithTimes(userid, now, now.Add(time.Minute)) } func (c *TestClient) SendHelloV2WithTimes(userid string, issuedAt time.Time, expiresAt time.Time) error { userdata := map[string]string{ "displayname": "Displayname " + userid, } data, err := json.Marshal(userdata) if err != nil { c.t.Fatal(err) } claims := &HelloV2TokenClaims{ RegisteredClaims: jwt.RegisteredClaims{ Issuer: c.server.URL, Subject: userid, }, UserData: (*json.RawMessage)(&data), } if !issuedAt.IsZero() { claims.IssuedAt = jwt.NewNumericDate(issuedAt) } if !expiresAt.IsZero() { claims.ExpiresAt = jwt.NewNumericDate(expiresAt) } var token *jwt.Token if strings.Contains(c.t.Name(), "ECDSA") { token = jwt.NewWithClaims(jwt.SigningMethodES256, claims) } else if strings.Contains(c.t.Name(), "Ed25519") { token = jwt.NewWithClaims(jwt.SigningMethodEdDSA, claims) } else { token = jwt.NewWithClaims(jwt.SigningMethodRS256, claims) } private := getPrivateAuthToken(c.t) tokenString, err := token.SignedString(private) if err != nil { c.t.Fatal(err) } params := HelloV2AuthParams{ Token: tokenString, } return c.SendHelloParams(c.server.URL, HelloVersionV2, "", nil, params) } func (c *TestClient) SendHelloResume(resumeId string) error { hello := &ClientMessage{ Id: "1234", Type: "hello", Hello: &HelloClientMessage{ Version: HelloVersionV1, ResumeId: resumeId, }, } return c.WriteJSON(hello) } func (c *TestClient) SendHelloClient(userid string) error { params := TestBackendClientAuthParams{ UserId: userid, } return c.SendHelloParams(c.server.URL, HelloVersionV1, "client", nil, params) } func (c *TestClient) SendHelloInternal() error { return c.SendHelloInternalWithFeatures(nil) } func (c *TestClient) SendHelloInternalWithFeatures(features []string) error { random := newRandomString(48) mac := hmac.New(sha256.New, testInternalSecret) mac.Write([]byte(random)) // nolint token := hex.EncodeToString(mac.Sum(nil)) backend := c.server.URL params := ClientTypeInternalAuthParams{ Random: random, Token: token, Backend: backend, } return c.SendHelloParams("", HelloVersionV1, "internal", features, params) } func (c *TestClient) SendHelloParams(url string, version string, clientType string, features []string, params interface{}) error { data, err := json.Marshal(params) if err != nil { c.t.Fatal(err) } hello := &ClientMessage{ Id: "1234", Type: "hello", Hello: &HelloClientMessage{ Version: version, Features: features, Auth: HelloClientMessageAuth{ Type: clientType, Url: url, Params: (*json.RawMessage)(&data), }, }, } return c.WriteJSON(hello) } func (c *TestClient) SendBye() error { hello := &ClientMessage{ Id: "9876", Type: "bye", Bye: &ByeClientMessage{}, } return c.WriteJSON(hello) } func (c *TestClient) SendMessage(recipient MessageClientMessageRecipient, data interface{}) error { payload, err := json.Marshal(data) if err != nil { c.t.Fatal(err) } message := &ClientMessage{ Id: "abcd", Type: "message", Message: &MessageClientMessage{ Recipient: recipient, Data: (*json.RawMessage)(&payload), }, } return c.WriteJSON(message) } func (c *TestClient) SendControl(recipient MessageClientMessageRecipient, data interface{}) error { payload, err := json.Marshal(data) if err != nil { c.t.Fatal(err) } message := &ClientMessage{ Id: "abcd", Type: "control", Control: &ControlClientMessage{ MessageClientMessage: MessageClientMessage{ Recipient: recipient, Data: (*json.RawMessage)(&payload), }, }, } return c.WriteJSON(message) } func (c *TestClient) SendInternalAddSession(msg *AddSessionInternalClientMessage) error { message := &ClientMessage{ Id: "abcd", Type: "internal", Internal: &InternalClientMessage{ Type: "addsession", AddSession: msg, }, } return c.WriteJSON(message) } func (c *TestClient) SendInternalUpdateSession(msg *UpdateSessionInternalClientMessage) error { message := &ClientMessage{ Id: "abcd", Type: "internal", Internal: &InternalClientMessage{ Type: "updatesession", UpdateSession: msg, }, } return c.WriteJSON(message) } func (c *TestClient) SendInternalRemoveSession(msg *RemoveSessionInternalClientMessage) error { message := &ClientMessage{ Id: "abcd", Type: "internal", Internal: &InternalClientMessage{ Type: "removesession", RemoveSession: msg, }, } return c.WriteJSON(message) } func (c *TestClient) SendInternalDialout(msg *DialoutInternalClientMessage) error { message := &ClientMessage{ Id: "abcd", Type: "internal", Internal: &InternalClientMessage{ Type: "dialout", Dialout: msg, }, } return c.WriteJSON(message) } func (c *TestClient) SetTransientData(key string, value interface{}, ttl time.Duration) error { payload, err := json.Marshal(value) if err != nil { c.t.Fatal(err) } message := &ClientMessage{ Id: "efgh", Type: "transient", TransientData: &TransientDataClientMessage{ Type: "set", Key: key, Value: (*json.RawMessage)(&payload), TTL: ttl, }, } return c.WriteJSON(message) } func (c *TestClient) RemoveTransientData(key string) error { message := &ClientMessage{ Id: "ijkl", Type: "transient", TransientData: &TransientDataClientMessage{ Type: "remove", Key: key, }, } return c.WriteJSON(message) } func (c *TestClient) DrainMessages(ctx context.Context) error { select { case err := <-c.readErrorChan: return err case <-c.messageChan: n := len(c.messageChan) for i := 0; i < n; i++ { <-c.messageChan } case <-ctx.Done(): return ctx.Err() } return nil } func (c *TestClient) GetPendingMessages(ctx context.Context) ([]*ServerMessage, error) { var result []*ServerMessage select { case err := <-c.readErrorChan: return nil, err case msg := <-c.messageChan: var m ServerMessage if err := json.Unmarshal(msg, &m); err != nil { return nil, err } result = append(result, &m) n := len(c.messageChan) for i := 0; i < n; i++ { var m ServerMessage msg = <-c.messageChan if err := json.Unmarshal(msg, &m); err != nil { return nil, err } result = append(result, &m) } case <-ctx.Done(): return nil, ctx.Err() } return result, nil } func (c *TestClient) RunUntilMessage(ctx context.Context) (message *ServerMessage, err error) { select { case err = <-c.readErrorChan: case msg := <-c.messageChan: var m ServerMessage if err = json.Unmarshal(msg, &m); err == nil { message = &m } case <-ctx.Done(): err = ctx.Err() } return } func (c *TestClient) RunUntilHello(ctx context.Context) (message *ServerMessage, err error) { if message, err = c.RunUntilMessage(ctx); err != nil { return nil, err } if err := checkUnexpectedClose(err); err != nil { return nil, err } if err := checkMessageType(message, "hello"); err != nil { return nil, err } c.publicId = message.Hello.SessionId return message, nil } func (c *TestClient) JoinRoom(ctx context.Context, roomId string) (message *ServerMessage, err error) { return c.JoinRoomWithRoomSession(ctx, roomId, roomId+"-"+c.publicId) } func (c *TestClient) JoinRoomWithRoomSession(ctx context.Context, roomId string, roomSessionId string) (message *ServerMessage, err error) { msg := &ClientMessage{ Id: "ABCD", Type: "room", Room: &RoomClientMessage{ RoomId: roomId, SessionId: roomSessionId, }, } if err := c.WriteJSON(msg); err != nil { return nil, err } if message, err = c.RunUntilMessage(ctx); err != nil { return nil, err } if err := checkUnexpectedClose(err); err != nil { return nil, err } if err := checkMessageType(message, "room"); err != nil { return nil, err } return message, nil } func checkMessageRoomId(message *ServerMessage, roomId string) error { if err := checkMessageType(message, "room"); err != nil { return err } if message.Room.RoomId != roomId { return fmt.Errorf("Expected room id %s, got %+v", roomId, message.Room) } return nil } func (c *TestClient) RunUntilRoom(ctx context.Context, roomId string) error { message, err := c.RunUntilMessage(ctx) if err != nil { return err } if err := checkUnexpectedClose(err); err != nil { return err } return checkMessageRoomId(message, roomId) } func (c *TestClient) checkMessageJoined(message *ServerMessage, hello *HelloServerMessage) error { return c.checkMessageJoinedSession(message, hello.SessionId, hello.UserId) } func (c *TestClient) checkSingleMessageJoined(message *ServerMessage) error { if err := checkMessageType(message, "event"); err != nil { return err } else if message.Event.Target != "room" { return fmt.Errorf("Expected event target room, got %+v", message.Event) } else if message.Event.Type != "join" { return fmt.Errorf("Expected event type join, got %+v", message.Event) } else if len(message.Event.Join) != 1 { return fmt.Errorf("Expected one join event entry, got %+v", message.Event) } return nil } func (c *TestClient) checkMessageJoinedSession(message *ServerMessage, sessionId string, userId string) error { if err := c.checkSingleMessageJoined(message); err != nil { return err } evt := message.Event.Join[0] if sessionId != "" && evt.SessionId != sessionId { return fmt.Errorf("Expected join session id %+v, got %+v", getPubliceSessionIdData(c.hub, sessionId), getPubliceSessionIdData(c.hub, evt.SessionId)) } if evt.UserId != userId { return fmt.Errorf("Expected join user id %s, got %+v", userId, evt) } return nil } func (c *TestClient) RunUntilJoinedAndReturn(ctx context.Context, hello ...*HelloServerMessage) ([]*EventServerMessageSessionEntry, []*ServerMessage, error) { received := make([]*EventServerMessageSessionEntry, len(hello)) var ignored []*ServerMessage hellos := make(map[*HelloServerMessage]int, len(hello)) for idx, h := range hello { hellos[h] = idx } for len(hellos) > 0 { message, err := c.RunUntilMessage(ctx) if err != nil { return nil, nil, fmt.Errorf("got error while waiting for %+v: %w", hellos, err) } if err := checkMessageType(message, "event"); err != nil { ignored = append(ignored, message) continue } else if message.Event.Target != "room" || message.Event.Type != "join" { ignored = append(ignored, message) continue } for len(message.Event.Join) > 0 { found := false loop: for h, idx := range hellos { for idx2, evt := range message.Event.Join { if evt.SessionId == h.SessionId && evt.UserId == h.UserId { received[idx] = evt delete(hellos, h) message.Event.Join = append(message.Event.Join[:idx2], message.Event.Join[idx2+1:]...) found = true break loop } } } if !found { return nil, nil, fmt.Errorf("expected one of the passed hello sessions, got %+v", message.Event.Join[0]) } } } return received, ignored, nil } func (c *TestClient) RunUntilJoined(ctx context.Context, hello ...*HelloServerMessage) error { _, unexpected, err := c.RunUntilJoinedAndReturn(ctx, hello...) if err != nil { return err } if len(unexpected) > 0 { return fmt.Errorf("Received unexpected messages: %+v", unexpected) } return nil } func (c *TestClient) checkMessageRoomLeave(message *ServerMessage, hello *HelloServerMessage) error { return c.checkMessageRoomLeaveSession(message, hello.SessionId) } func (c *TestClient) checkMessageRoomLeaveSession(message *ServerMessage, sessionId string) error { if err := checkMessageType(message, "event"); err != nil { return err } else if message.Event.Target != "room" { return fmt.Errorf("Expected event target room, got %+v", message.Event) } else if message.Event.Type != "leave" { return fmt.Errorf("Expected event type leave, got %+v", message.Event) } else if len(message.Event.Leave) != 1 { return fmt.Errorf("Expected one leave event entry, got %+v", message.Event) } else if message.Event.Leave[0] != sessionId { return fmt.Errorf("Expected leave session id %+v, got %+v", getPubliceSessionIdData(c.hub, sessionId), getPubliceSessionIdData(c.hub, message.Event.Leave[0])) } return nil } func (c *TestClient) RunUntilLeft(ctx context.Context, hello *HelloServerMessage) error { message, err := c.RunUntilMessage(ctx) if err != nil { return err } return c.checkMessageRoomLeave(message, hello) } func checkMessageRoomlistUpdate(message *ServerMessage) (*RoomEventServerMessage, error) { if err := checkMessageType(message, "event"); err != nil { return nil, err } else if message.Event.Target != "roomlist" { return nil, fmt.Errorf("Expected event target room, got %+v", message.Event) } else if message.Event.Type != "update" || message.Event.Update == nil { return nil, fmt.Errorf("Expected event type update, got %+v", message.Event) } else { return message.Event.Update, nil } } func (c *TestClient) RunUntilRoomlistUpdate(ctx context.Context) (*RoomEventServerMessage, error) { message, err := c.RunUntilMessage(ctx) if err != nil { return nil, err } return checkMessageRoomlistUpdate(message) } func checkMessageRoomlistDisinvite(message *ServerMessage) (*RoomDisinviteEventServerMessage, error) { if err := checkMessageType(message, "event"); err != nil { return nil, err } else if message.Event.Target != "roomlist" { return nil, fmt.Errorf("Expected event target room, got %+v", message.Event) } else if message.Event.Type != "disinvite" || message.Event.Disinvite == nil { return nil, fmt.Errorf("Expected event type disinvite, got %+v", message.Event) } return message.Event.Disinvite, nil } func (c *TestClient) RunUntilRoomlistDisinvite(ctx context.Context) (*RoomDisinviteEventServerMessage, error) { message, err := c.RunUntilMessage(ctx) if err != nil { return nil, err } return checkMessageRoomlistDisinvite(message) } func checkMessageParticipantsInCall(message *ServerMessage) (*RoomEventServerMessage, error) { if err := checkMessageType(message, "event"); err != nil { return nil, err } else if message.Event.Target != "participants" { return nil, fmt.Errorf("Expected event target participants, got %+v", message.Event) } else if message.Event.Type != "update" || message.Event.Update == nil { return nil, fmt.Errorf("Expected event type update, got %+v", message.Event) } return message.Event.Update, nil } func checkMessageParticipantFlags(message *ServerMessage) (*RoomFlagsServerMessage, error) { if err := checkMessageType(message, "event"); err != nil { return nil, err } else if message.Event.Target != "participants" { return nil, fmt.Errorf("Expected event target room, got %+v", message.Event) } else if message.Event.Type != "flags" || message.Event.Flags == nil { return nil, fmt.Errorf("Expected event type flags, got %+v", message.Event) } return message.Event.Flags, nil } func checkMessageRoomMessage(message *ServerMessage) (*RoomEventMessage, error) { if err := checkMessageType(message, "event"); err != nil { return nil, err } else if message.Event.Target != "room" { return nil, fmt.Errorf("Expected event target room, got %+v", message.Event) } else if message.Event.Type != "message" || message.Event.Message == nil { return nil, fmt.Errorf("Expected event type message, got %+v", message.Event) } return message.Event.Message, nil } func (c *TestClient) RunUntilRoomMessage(ctx context.Context) (*RoomEventMessage, error) { message, err := c.RunUntilMessage(ctx) if err != nil { return nil, err } return checkMessageRoomMessage(message) } func checkMessageError(message *ServerMessage, msgid string) error { if err := checkMessageType(message, "error"); err != nil { return err } else if message.Error.Code != msgid { return fmt.Errorf("Expected error \"%s\", got \"%s\" (%+v)", msgid, message.Error.Code, message.Error) } return nil } func (c *TestClient) RunUntilOffer(ctx context.Context, offer string) error { message, err := c.RunUntilMessage(ctx) if err != nil { return err } if err := checkUnexpectedClose(err); err != nil { return err } else if err := checkMessageType(message, "message"); err != nil { return err } var data map[string]interface{} if err := json.Unmarshal(*message.Message.Data, &data); err != nil { return err } if data["type"].(string) != "offer" { return fmt.Errorf("expected data type offer, got %+v", data) } payload := data["payload"].(map[string]interface{}) if payload["type"].(string) != "offer" { return fmt.Errorf("expected payload type offer, got %+v", payload) } if payload["sdp"].(string) != offer { return fmt.Errorf("expected payload answer %s, got %+v", offer, payload) } return nil } func (c *TestClient) RunUntilAnswer(ctx context.Context, answer string) error { message, err := c.RunUntilMessage(ctx) if err != nil { return err } if err := checkUnexpectedClose(err); err != nil { return err } else if err := checkMessageType(message, "message"); err != nil { return err } var data map[string]interface{} if err := json.Unmarshal(*message.Message.Data, &data); err != nil { return err } if data["type"].(string) != "answer" { return fmt.Errorf("expected data type answer, got %+v", data) } payload := data["payload"].(map[string]interface{}) if payload["type"].(string) != "answer" { return fmt.Errorf("expected payload type answer, got %+v", payload) } if payload["sdp"].(string) != answer { return fmt.Errorf("expected payload answer %s, got %+v", answer, payload) } return nil } func checkMessageTransientSet(message *ServerMessage, key string, value interface{}, oldValue interface{}) error { if err := checkMessageType(message, "transient"); err != nil { return err } else if message.TransientData.Type != "set" { return fmt.Errorf("Expected transient set, got %+v", message.TransientData) } else if message.TransientData.Key != key { return fmt.Errorf("Expected transient set key %s, got %+v", key, message.TransientData) } else if !reflect.DeepEqual(message.TransientData.Value, value) { return fmt.Errorf("Expected transient set value %+v, got %+v", value, message.TransientData.Value) } else if !reflect.DeepEqual(message.TransientData.OldValue, oldValue) { return fmt.Errorf("Expected transient set old value %+v, got %+v", oldValue, message.TransientData.OldValue) } return nil } func checkMessageTransientRemove(message *ServerMessage, key string, oldValue interface{}) error { if err := checkMessageType(message, "transient"); err != nil { return err } else if message.TransientData.Type != "remove" { return fmt.Errorf("Expected transient remove, got %+v", message.TransientData) } else if message.TransientData.Key != key { return fmt.Errorf("Expected transient remove key %s, got %+v", key, message.TransientData) } else if !reflect.DeepEqual(message.TransientData.OldValue, oldValue) { return fmt.Errorf("Expected transient remove old value %+v, got %+v", oldValue, message.TransientData.OldValue) } return nil } func checkMessageTransientInitial(message *ServerMessage, data map[string]interface{}) error { if err := checkMessageType(message, "transient"); err != nil { return err } else if message.TransientData.Type != "initial" { return fmt.Errorf("Expected transient initial, got %+v", message.TransientData) } else if !reflect.DeepEqual(message.TransientData.Data, data) { return fmt.Errorf("Expected transient initial data %+v, got %+v", data, message.TransientData.Data) } return nil } func checkMessageInCallAll(message *ServerMessage, roomId string, inCall int) error { if err := checkMessageType(message, "event"); err != nil { return err } else if message.Event.Type != "update" { return fmt.Errorf("Expected update event, got %+v", message.Event) } else if message.Event.Target != "participants" { return fmt.Errorf("Expected participants update event, got %+v", message.Event) } else if message.Event.Update.RoomId != roomId { return fmt.Errorf("Expected participants update event for room %s, got %+v", roomId, message.Event.Update) } else if !message.Event.Update.All { return fmt.Errorf("Expected participants update event for all, got %+v", message.Event.Update) } else if !bytes.Equal(*message.Event.Update.InCall, []byte(strconv.FormatInt(int64(inCall), 10))) { return fmt.Errorf("Expected incall flags %d, got %+v", inCall, message.Event.Update) } return nil } func checkMessageSwitchTo(message *ServerMessage, roomId string, details json.RawMessage) (*EventServerMessageSwitchTo, error) { if err := checkMessageType(message, "event"); err != nil { return nil, err } else if message.Event.Type != "switchto" { return nil, fmt.Errorf("Expected switchto event, got %+v", message.Event) } else if message.Event.Target != "room" { return nil, fmt.Errorf("Expected room switchto event, got %+v", message.Event) } else if message.Event.SwitchTo.RoomId != roomId { return nil, fmt.Errorf("Expected room switchto event for room %s, got %+v", roomId, message.Event) } if details != nil { if message.Event.SwitchTo.Details == nil || !bytes.Equal(details, message.Event.SwitchTo.Details) { return nil, fmt.Errorf("Expected details %s, got %+v", string(details), message.Event) } } else if message.Event.SwitchTo.Details != nil { return nil, fmt.Errorf("Expected no details, got %+v", message.Event) } return message.Event.SwitchTo, nil } func (c *TestClient) RunUntilSwitchTo(ctx context.Context, roomId string, details json.RawMessage) (*EventServerMessageSwitchTo, error) { message, err := c.RunUntilMessage(ctx) if err != nil { return nil, err } return checkMessageSwitchTo(message, roomId, details) } nextcloud-spreed-signaling-1.2.4/testutils_test.go000066400000000000000000000043751460321600400224070ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "bytes" "io" "os" "os/signal" "runtime/pprof" "sync" "testing" "time" ) var listenSignalOnce sync.Once func ensureNoGoroutinesLeak(t *testing.T, f func(t *testing.T)) { t.Helper() // The signal package will start a goroutine the first time "signal.Notify" // is called. Do so outside the function under test so the signal goroutine // will not be shown as "leaking". listenSignalOnce.Do(func() { ch := make(chan os.Signal, 1) signal.Notify(ch, os.Interrupt) go func() { for { <-ch } }() }) profile := pprof.Lookup("goroutine") // Give time for things to settle before capturing the number of // go routines time.Sleep(500 * time.Millisecond) before := profile.Count() var prev bytes.Buffer dumpGoroutines("Before:", &prev) t.Run("leakcheck", f) var after int // Give time for things to settle before capturing the number of // go routines timeout := time.Now().Add(time.Second) for time.Now().Before(timeout) { after = profile.Count() if after == before { break } } if after != before { io.Copy(os.Stderr, &prev) // nolint dumpGoroutines("After:", os.Stderr) t.Fatalf("Number of Go routines has changed from %d to %d", before, after) } } func dumpGoroutines(prefix string, w io.Writer) { if prefix != "" { io.WriteString(w, prefix+"\n") // nolint } profile := pprof.Lookup("goroutine") profile.WriteTo(w, 2) // nolint } nextcloud-spreed-signaling-1.2.4/tools.go000066400000000000000000000021721460321600400204410ustar00rootroot00000000000000//go:build tools /** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2023 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling // Import applications that would otherwise not be detected by "go mod vendor". import ( _ "github.com/mailru/easyjson/easyjson" _ "google.golang.org/grpc/cmd/protoc-gen-go-grpc" _ "google.golang.org/protobuf/cmd/protoc-gen-go" ) nextcloud-spreed-signaling-1.2.4/transient_data.go000066400000000000000000000141301460321600400222760ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "reflect" "sync" "time" ) type TransientListener interface { SendMessage(message *ServerMessage) bool } type TransientData struct { mu sync.Mutex data map[string]interface{} listeners map[TransientListener]bool timers map[string]*time.Timer ttlCh chan<- struct{} } // NewTransientData creates a new transient data container. func NewTransientData() *TransientData { return &TransientData{} } func (t *TransientData) notifySet(key string, prev, value interface{}) { msg := &ServerMessage{ Type: "transient", TransientData: &TransientDataServerMessage{ Type: "set", Key: key, OldValue: prev, Value: value, }, } for listener := range t.listeners { listener.SendMessage(msg) } } func (t *TransientData) notifyDeleted(key string, prev interface{}) { msg := &ServerMessage{ Type: "transient", TransientData: &TransientDataServerMessage{ Type: "remove", Key: key, OldValue: prev, }, } for listener := range t.listeners { listener.SendMessage(msg) } } // AddListener adds a new listener to be notified about changes. func (t *TransientData) AddListener(listener TransientListener) { t.mu.Lock() defer t.mu.Unlock() if t.listeners == nil { t.listeners = make(map[TransientListener]bool) } t.listeners[listener] = true if len(t.data) > 0 { msg := &ServerMessage{ Type: "transient", TransientData: &TransientDataServerMessage{ Type: "initial", Data: t.data, }, } listener.SendMessage(msg) } } // RemoveListener removes a previously registered listener. func (t *TransientData) RemoveListener(listener TransientListener) { t.mu.Lock() defer t.mu.Unlock() delete(t.listeners, listener) } func (t *TransientData) updateTTL(key string, value interface{}, ttl time.Duration) { if ttl <= 0 { delete(t.timers, key) } else { t.removeAfterTTL(key, value, ttl) } } func (t *TransientData) removeAfterTTL(key string, value interface{}, ttl time.Duration) { if ttl <= 0 { return } if old, found := t.timers[key]; found { old.Stop() } timer := time.AfterFunc(ttl, func() { t.mu.Lock() defer t.mu.Unlock() t.compareAndRemove(key, value) if t.ttlCh != nil { select { case t.ttlCh <- struct{}{}: default: } } }) if t.timers == nil { t.timers = make(map[string]*time.Timer) } t.timers[key] = timer } func (t *TransientData) doSet(key string, value interface{}, prev interface{}, ttl time.Duration) { if t.data == nil { t.data = make(map[string]interface{}) } t.data[key] = value t.notifySet(key, prev, value) t.removeAfterTTL(key, value, ttl) } // Set sets a new value for the given key and notifies listeners // if the value has been changed. func (t *TransientData) Set(key string, value interface{}) bool { return t.SetTTL(key, value, 0) } // SetTTL sets a new value for the given key with a time-to-live and notifies // listeners if the value has been changed. func (t *TransientData) SetTTL(key string, value interface{}, ttl time.Duration) bool { if value == nil { return t.Remove(key) } t.mu.Lock() defer t.mu.Unlock() prev, found := t.data[key] if found && reflect.DeepEqual(prev, value) { t.updateTTL(key, value, ttl) return false } t.doSet(key, value, prev, ttl) return true } // CompareAndSet sets a new value for the given key only for a given old value // and notifies listeners if the value has been changed. func (t *TransientData) CompareAndSet(key string, old, value interface{}) bool { return t.CompareAndSetTTL(key, old, value, 0) } // CompareAndSetTTL sets a new value for the given key with a time-to-live, // only for a given old value and notifies listeners if the value has been // changed. func (t *TransientData) CompareAndSetTTL(key string, old, value interface{}, ttl time.Duration) bool { if value == nil { return t.CompareAndRemove(key, old) } t.mu.Lock() defer t.mu.Unlock() prev, found := t.data[key] if old != nil && (!found || !reflect.DeepEqual(prev, old)) { return false } else if old == nil && found { return false } t.doSet(key, value, prev, ttl) return true } func (t *TransientData) doRemove(key string, prev interface{}) { delete(t.data, key) if old, found := t.timers[key]; found { old.Stop() delete(t.timers, key) } t.notifyDeleted(key, prev) } // Remove deletes the value with the given key and notifies listeners // if the key was removed. func (t *TransientData) Remove(key string) bool { t.mu.Lock() defer t.mu.Unlock() prev, found := t.data[key] if !found { return false } t.doRemove(key, prev) return true } // CompareAndRemove deletes the value with the given key if it has a given value // and notifies listeners if the key was removed. func (t *TransientData) CompareAndRemove(key string, old interface{}) bool { t.mu.Lock() defer t.mu.Unlock() return t.compareAndRemove(key, old) } func (t *TransientData) compareAndRemove(key string, old interface{}) bool { prev, found := t.data[key] if !found || !reflect.DeepEqual(prev, old) { return false } t.doRemove(key, prev) return true } // GetData returns a copy of the internal data. func (t *TransientData) GetData() map[string]interface{} { t.mu.Lock() defer t.mu.Unlock() result := make(map[string]interface{}) for k, v := range t.data { result[k] = v } return result } nextcloud-spreed-signaling-1.2.4/transient_data_test.go000066400000000000000000000231371460321600400233440ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2021 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "testing" "time" ) func (t *TransientData) SetTTLChannel(ch chan<- struct{}) { t.mu.Lock() defer t.mu.Unlock() t.ttlCh = ch } func Test_TransientData(t *testing.T) { data := NewTransientData() if data.Set("foo", nil) { t.Errorf("should not have set value") } if !data.Set("foo", "bar") { t.Errorf("should have set value") } if data.Set("foo", "bar") { t.Errorf("should not have set value") } if !data.Set("foo", "baz") { t.Errorf("should have set value") } if data.CompareAndSet("foo", "bar", "lala") { t.Errorf("should not have set value") } if !data.CompareAndSet("foo", "baz", "lala") { t.Errorf("should have set value") } if data.CompareAndSet("test", nil, nil) { t.Errorf("should not have set value") } if !data.CompareAndSet("test", nil, "123") { t.Errorf("should have set value") } if data.CompareAndSet("test", nil, "456") { t.Errorf("should not have set value") } if data.CompareAndRemove("test", "1234") { t.Errorf("should not have removed value") } if !data.CompareAndRemove("test", "123") { t.Errorf("should have removed value") } if data.Remove("lala") { t.Errorf("should not have removed value") } if !data.Remove("foo") { t.Errorf("should have removed value") } ttlCh := make(chan struct{}) data.SetTTLChannel(ttlCh) if !data.SetTTL("test", "1234", time.Millisecond) { t.Errorf("should have set value") } if value := data.GetData()["test"]; value != "1234" { t.Errorf("expected 1234, got %v", value) } // Data is removed after the TTL <-ttlCh if value := data.GetData()["test"]; value != nil { t.Errorf("expected no value, got %v", value) } if !data.SetTTL("test", "1234", time.Millisecond) { t.Errorf("should have set value") } if value := data.GetData()["test"]; value != "1234" { t.Errorf("expected 1234, got %v", value) } if !data.SetTTL("test", "2345", 3*time.Millisecond) { t.Errorf("should have set value") } if value := data.GetData()["test"]; value != "2345" { t.Errorf("expected 2345, got %v", value) } // Data is removed after the TTL only if the value still matches time.Sleep(2 * time.Millisecond) if value := data.GetData()["test"]; value != "2345" { t.Errorf("expected 2345, got %v", value) } // Data is removed after the (second) TTL <-ttlCh if value := data.GetData()["test"]; value != nil { t.Errorf("expected no value, got %v", value) } // Setting existing key will update the TTL if !data.SetTTL("test", "1234", time.Millisecond) { t.Errorf("should have set value") } if data.SetTTL("test", "1234", 3*time.Millisecond) { t.Errorf("should not have set value") } // Data still exists after the first TTL time.Sleep(2 * time.Millisecond) if value := data.GetData()["test"]; value != "1234" { t.Errorf("expected 1234, got %v", value) } // Data is removed after the (updated) TTL <-ttlCh if value := data.GetData()["test"]; value != nil { t.Errorf("expected no value, got %v", value) } } func Test_TransientMessages(t *testing.T) { hub, _, _, server := CreateHubForTest(t) ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() client1 := NewTestClient(t, server, hub) defer client1.CloseWithBye() if err := client1.SendHello(testDefaultUserId + "1"); err != nil { t.Fatal(err) } hello1, err := client1.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if err := client1.SetTransientData("foo", "bar", 0); err != nil { t.Fatal(err) } if msg, err := client1.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageError(msg, "not_in_room"); err != nil { t.Fatal(err) } } client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } hello2, err := client2.RunUntilHello(ctx) if err != nil { t.Fatal(err) } // Join room by id. roomId := "test-room" if room, err := client1.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // Give message processing some time. time.Sleep(10 * time.Millisecond) if room, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } WaitForUsersJoined(ctx, t, client1, hello1, client2, hello2) session1 := hub.GetSessionByPublicId(hello1.Hello.SessionId).(*ClientSession) if session1 == nil { t.Fatalf("Session %s does not exist", hello1.Hello.SessionId) } session2 := hub.GetSessionByPublicId(hello2.Hello.SessionId).(*ClientSession) if session2 == nil { t.Fatalf("Session %s does not exist", hello2.Hello.SessionId) } // Client 1 may modify transient data. session1.SetPermissions([]Permission{PERMISSION_TRANSIENT_DATA}) // Client 2 may not modify transient data. session2.SetPermissions([]Permission{}) if err := client2.SetTransientData("foo", "bar", 0); err != nil { t.Fatal(err) } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageError(msg, "not_allowed"); err != nil { t.Fatal(err) } } if err := client1.SetTransientData("foo", "bar", 0); err != nil { t.Fatal(err) } if msg, err := client1.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageTransientSet(msg, "foo", "bar", nil); err != nil { t.Fatal(err) } } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageTransientSet(msg, "foo", "bar", nil); err != nil { t.Fatal(err) } } if err := client2.RemoveTransientData("foo"); err != nil { t.Fatal(err) } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageError(msg, "not_allowed"); err != nil { t.Fatal(err) } } // Setting the same value is ignored by the server. if err := client1.SetTransientData("foo", "bar", 0); err != nil { t.Fatal(err) } ctx2, cancel2 := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel2() if msg, err := client1.RunUntilMessage(ctx2); err != nil { if err != context.DeadlineExceeded { t.Fatal(err) } } else { t.Errorf("Expected no payload, got %+v", msg) } data := map[string]interface{}{ "hello": "world", } if err := client1.SetTransientData("foo", data, 0); err != nil { t.Fatal(err) } if msg, err := client1.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageTransientSet(msg, "foo", data, "bar"); err != nil { t.Fatal(err) } } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageTransientSet(msg, "foo", data, "bar"); err != nil { t.Fatal(err) } } if err := client1.RemoveTransientData("foo"); err != nil { t.Fatal(err) } if msg, err := client1.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageTransientRemove(msg, "foo", data); err != nil { t.Fatal(err) } } if msg, err := client2.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else { if err := checkMessageTransientRemove(msg, "foo", data); err != nil { t.Fatal(err) } } // Removing a non-existing key is ignored by the server. if err := client1.RemoveTransientData("foo"); err != nil { t.Fatal(err) } ctx3, cancel3 := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel3() if msg, err := client1.RunUntilMessage(ctx3); err != nil { if err != context.DeadlineExceeded { t.Fatal(err) } } else { t.Errorf("Expected no payload, got %+v", msg) } if err := client1.SetTransientData("abc", data, 10*time.Millisecond); err != nil { t.Fatal(err) } client3 := NewTestClient(t, server, hub) defer client3.CloseWithBye() if err := client3.SendHello(testDefaultUserId + "3"); err != nil { t.Fatal(err) } hello3, err := client3.RunUntilHello(ctx) if err != nil { t.Fatal(err) } if room, err := client3.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } _, ignored, err := client3.RunUntilJoinedAndReturn(ctx, hello1.Hello, hello2.Hello, hello3.Hello) if err != nil { t.Fatal(err) } var msg *ServerMessage if len(ignored) == 0 { if msg, err = client3.RunUntilMessage(ctx); err != nil { t.Fatal(err) } } else if len(ignored) == 1 { msg = ignored[0] } else { t.Fatalf("Received too many messages: %+v", ignored) } if err := checkMessageTransientInitial(msg, map[string]interface{}{ "abc": data, }); err != nil { t.Fatal(err) } time.Sleep(10 * time.Millisecond) if msg, err = client3.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else if err := checkMessageTransientRemove(msg, "abc", data); err != nil { t.Fatal(err) } } nextcloud-spreed-signaling-1.2.4/vendor_helper_test.go000066400000000000000000000022341460321600400231730ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2022 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling // Import modules that would otherwise not be detected by "go mod vendor". import ( _ "github.com/mailru/easyjson" _ "github.com/mailru/easyjson/bootstrap" _ "github.com/mailru/easyjson/gen" _ "github.com/mailru/easyjson/parser" _ "google.golang.org/protobuf/compiler/protogen" ) nextcloud-spreed-signaling-1.2.4/virtualsession.go000066400000000000000000000220041460321600400223670ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2019 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "encoding/json" "log" "net/url" "sync/atomic" "time" ) const ( FLAG_MUTED_SPEAKING = 1 FLAG_MUTED_LISTENING = 2 FLAG_TALKING = 4 ) type VirtualSession struct { hub *Hub session *ClientSession privateId string publicId string data *SessionIdData room atomic.Pointer[Room] sessionId string userId string userData *json.RawMessage inCall Flags flags Flags options *AddSessionOptions } func GetVirtualSessionId(session *ClientSession, sessionId string) string { return session.PublicId() + "|" + sessionId } func NewVirtualSession(session *ClientSession, privateId string, publicId string, data *SessionIdData, msg *AddSessionInternalClientMessage) (*VirtualSession, error) { result := &VirtualSession{ hub: session.hub, session: session, privateId: privateId, publicId: publicId, data: data, sessionId: msg.SessionId, userId: msg.UserId, userData: msg.User, options: msg.Options, } if err := session.events.RegisterSessionListener(publicId, session.Backend(), result); err != nil { return nil, err } if msg.InCall != nil { result.SetInCall(*msg.InCall) } else if !session.HasFeature(ClientFeatureInternalInCall) { result.SetInCall(FlagInCall | FlagWithPhone) } if msg.Flags != 0 { result.SetFlags(msg.Flags) } return result, nil } func (s *VirtualSession) PrivateId() string { return s.privateId } func (s *VirtualSession) PublicId() string { return s.publicId } func (s *VirtualSession) ClientType() string { return HelloClientTypeVirtual } func (s *VirtualSession) GetInCall() int { return int(s.inCall.Get()) } func (s *VirtualSession) SetInCall(inCall int) bool { if inCall < 0 { inCall = 0 } return s.inCall.Set(uint32(inCall)) } func (s *VirtualSession) Data() *SessionIdData { return s.data } func (s *VirtualSession) Backend() *Backend { return s.session.Backend() } func (s *VirtualSession) BackendUrl() string { return s.session.BackendUrl() } func (s *VirtualSession) ParsedBackendUrl() *url.URL { return s.session.ParsedBackendUrl() } func (s *VirtualSession) UserId() string { return s.userId } func (s *VirtualSession) UserData() *json.RawMessage { return s.userData } func (s *VirtualSession) SetRoom(room *Room) { s.room.Store(room) if room != nil { if err := s.hub.roomSessions.SetRoomSession(s, s.PublicId()); err != nil { log.Printf("Error adding virtual room session %s: %s", s.PublicId(), err) } } else { s.hub.roomSessions.DeleteRoomSession(s) } } func (s *VirtualSession) GetRoom() *Room { return s.room.Load() } func (s *VirtualSession) LeaveRoom(notify bool) *Room { room := s.GetRoom() if room == nil { return nil } s.SetRoom(nil) room.RemoveSession(s) return room } func (s *VirtualSession) IsExpired(now time.Time) bool { return false } func (s *VirtualSession) Close() { s.CloseWithFeedback(nil, nil) } func (s *VirtualSession) CloseWithFeedback(session *ClientSession, message *ClientMessage) { room := s.GetRoom() s.session.RemoveVirtualSession(s) removed := s.session.hub.removeSession(s) if removed && room != nil { go s.notifyBackendRemoved(room, session, message) } s.session.events.UnregisterSessionListener(s.PublicId(), s.session.Backend(), s) } func (s *VirtualSession) notifyBackendRemoved(room *Room, session *ClientSession, message *ClientMessage) { ctx, cancel := context.WithTimeout(context.Background(), s.hub.backendTimeout) defer cancel() if options := s.Options(); options != nil { request := NewBackendClientRoomRequest(room.Id(), s.UserId(), s.PublicId()) request.Room.Action = "leave" if options != nil { request.Room.ActorId = options.ActorId request.Room.ActorType = options.ActorType } var response BackendClientResponse if err := s.hub.backend.PerformJSONRequest(ctx, s.ParsedBackendUrl(), request, &response); err != nil { virtualSessionId := GetVirtualSessionId(s.session, s.PublicId()) log.Printf("Could not leave virtual session %s at backend %s: %s", virtualSessionId, s.BackendUrl(), err) if session != nil && message != nil { reply := message.NewErrorServerMessage(NewError("remove_failed", "Could not remove virtual session from backend.")) session.SendMessage(reply) } return } if response.Type == "error" { virtualSessionId := GetVirtualSessionId(s.session, s.PublicId()) if session != nil && message != nil && (response.Error == nil || response.Error.Code != "no_such_room") { log.Printf("Could not leave virtual session %s at backend %s: %+v", virtualSessionId, s.BackendUrl(), response.Error) reply := message.NewErrorServerMessage(NewError("remove_failed", response.Error.Error())) session.SendMessage(reply) } return } } else { request := NewBackendClientSessionRequest(room.Id(), "remove", s.PublicId(), &AddSessionInternalClientMessage{ UserId: s.userId, User: s.userData, }) var response BackendClientSessionResponse err := s.hub.backend.PerformJSONRequest(ctx, s.ParsedBackendUrl(), request, &response) if err != nil { log.Printf("Could not remove virtual session %s from backend %s: %s", s.PublicId(), s.BackendUrl(), err) if session != nil && message != nil { reply := message.NewErrorServerMessage(NewError("remove_failed", "Could not remove virtual session from backend.")) session.SendMessage(reply) } } } } func (s *VirtualSession) HasPermission(permission Permission) bool { return true } func (s *VirtualSession) Session() *ClientSession { return s.session } func (s *VirtualSession) SessionId() string { return s.sessionId } func (s *VirtualSession) AddFlags(flags uint32) bool { return s.flags.Add(flags) } func (s *VirtualSession) RemoveFlags(flags uint32) bool { return s.flags.Remove(flags) } func (s *VirtualSession) SetFlags(flags uint32) bool { return s.flags.Set(flags) } func (s *VirtualSession) Flags() uint32 { return s.flags.Get() } func (s *VirtualSession) Options() *AddSessionOptions { return s.options } func (s *VirtualSession) ProcessAsyncSessionMessage(message *AsyncMessage) { if message.Type == "message" && message.Message != nil { switch message.Message.Type { case "message": if message.Message.Message != nil && message.Message.Message.Recipient != nil && message.Message.Message.Recipient.Type == "session" && message.Message.Message.Recipient.SessionId == s.PublicId() { // The client should see his session id as recipient. message.Message.Message.Recipient = &MessageClientMessageRecipient{ Type: "session", SessionId: s.SessionId(), UserId: s.UserId(), } s.session.ProcessAsyncSessionMessage(message) } case "event": if room := s.GetRoom(); room != nil && message.Message.Event.Target == "roomlist" && message.Message.Event.Type == "disinvite" && message.Message.Event.Disinvite != nil && message.Message.Event.Disinvite.RoomId == room.Id() { log.Printf("Virtual session %s was disinvited from room %s, hanging up", s.PublicId(), room.Id()) payload := map[string]interface{}{ "type": "hangup", "hangup": map[string]string{ "reason": "disinvited", }, } data, err := json.Marshal(payload) if err != nil { log.Printf("could not marshal control payload %+v: %s", payload, err) return } s.session.ProcessAsyncSessionMessage(&AsyncMessage{ Type: "message", SendTime: message.SendTime, Message: &ServerMessage{ Type: "control", Control: &ControlServerMessage{ Recipient: &MessageClientMessageRecipient{ Type: "session", SessionId: s.SessionId(), UserId: s.UserId(), }, Data: (*json.RawMessage)(&data), }, }, }) } case "control": if message.Message.Control != nil && message.Message.Control.Recipient != nil && message.Message.Control.Recipient.Type == "session" && message.Message.Control.Recipient.SessionId == s.PublicId() { // The client should see his session id as recipient. message.Message.Control.Recipient = &MessageClientMessageRecipient{ Type: "session", SessionId: s.SessionId(), UserId: s.UserId(), } s.session.ProcessAsyncSessionMessage(message) } } } } nextcloud-spreed-signaling-1.2.4/virtualsession_test.go000066400000000000000000000531041460321600400234330ustar00rootroot00000000000000/** * Standalone signaling server for the Nextcloud Spreed app. * Copyright (C) 2019 struktur AG * * @author Joachim Bauch * * @license GNU AGPL version 3 or any later version * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ package signaling import ( "context" "encoding/json" "errors" "fmt" "testing" ) func TestVirtualSession(t *testing.T) { hub, _, _, server := CreateHubForTest(t) roomId := "the-room-id" emptyProperties := json.RawMessage("{}") backend := &Backend{ id: "compat", compat: true, } room, err := hub.createRoom(roomId, &emptyProperties, backend) if err != nil { t.Fatalf("Could not create room: %s", err) } defer room.Close() clientInternal := NewTestClient(t, server, hub) defer clientInternal.CloseWithBye() if err := clientInternal.SendHelloInternal(); err != nil { t.Fatal(err) } client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() if hello, err := clientInternal.RunUntilHello(ctx); err != nil { t.Error(err) } else { if hello.Hello.UserId != "" { t.Errorf("Expected empty user id, got %+v", hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } if hello.Hello.ResumeId == "" { t.Errorf("Expected resume id, got %+v", hello.Hello) } } hello, err := client.RunUntilHello(ctx) if err != nil { t.Error(err) } if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // Ignore "join" events. if err := client.DrainMessages(ctx); err != nil { t.Error(err) } internalSessionId := "session1" userId := "user1" msgAdd := &ClientMessage{ Type: "internal", Internal: &InternalClientMessage{ Type: "addsession", AddSession: &AddSessionInternalClientMessage{ CommonSessionInternalClientMessage: CommonSessionInternalClientMessage{ SessionId: internalSessionId, RoomId: roomId, }, UserId: userId, Flags: FLAG_MUTED_SPEAKING, }, }, } if err := clientInternal.WriteJSON(msgAdd); err != nil { t.Fatal(err) } msg1, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } // The public session id will be generated by the server, so don't check for it. if err := client.checkMessageJoinedSession(msg1, "", userId); err != nil { t.Fatal(err) } sessionId := msg1.Event.Join[0].SessionId session := hub.GetSessionByPublicId(sessionId) if session == nil { t.Fatalf("Could not get virtual session %s", sessionId) } if session.ClientType() != HelloClientTypeVirtual { t.Errorf("Expected client type %s, got %s", HelloClientTypeVirtual, session.ClientType()) } if sid := session.(*VirtualSession).SessionId(); sid != internalSessionId { t.Errorf("Expected internal session id %s, got %s", internalSessionId, sid) } // Also a participants update event will be triggered for the virtual user. msg2, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } updateMsg, err := checkMessageParticipantsInCall(msg2) if err != nil { t.Error(err) } else if updateMsg.RoomId != roomId { t.Errorf("Expected room %s, got %s", roomId, updateMsg.RoomId) } else if len(updateMsg.Users) != 1 { t.Errorf("Expected one user, got %+v", updateMsg.Users) } else if sid, ok := updateMsg.Users[0]["sessionId"].(string); !ok || sid != sessionId { t.Errorf("Expected session id %s, got %+v", sessionId, updateMsg.Users[0]) } else if virtual, ok := updateMsg.Users[0]["virtual"].(bool); !ok || !virtual { t.Errorf("Expected virtual user, got %+v", updateMsg.Users[0]) } else if inCall, ok := updateMsg.Users[0]["inCall"].(float64); !ok || inCall != (FlagInCall|FlagWithPhone) { t.Errorf("Expected user in call with phone, got %+v", updateMsg.Users[0]) } msg3, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } flagsMsg, err := checkMessageParticipantFlags(msg3) if err != nil { t.Error(err) } else if flagsMsg.RoomId != roomId { t.Errorf("Expected room %s, got %s", roomId, flagsMsg.RoomId) } else if flagsMsg.SessionId != sessionId { t.Errorf("Expected session id %s, got %s", sessionId, flagsMsg.SessionId) } else if flagsMsg.Flags != FLAG_MUTED_SPEAKING { t.Errorf("Expected flags %d, got %+v", FLAG_MUTED_SPEAKING, flagsMsg.Flags) } newFlags := uint32(FLAG_TALKING) msgFlags := &ClientMessage{ Type: "internal", Internal: &InternalClientMessage{ Type: "updatesession", UpdateSession: &UpdateSessionInternalClientMessage{ CommonSessionInternalClientMessage: CommonSessionInternalClientMessage{ SessionId: internalSessionId, RoomId: roomId, }, Flags: &newFlags, }, }, } if err := clientInternal.WriteJSON(msgFlags); err != nil { t.Fatal(err) } msg4, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } flagsMsg, err = checkMessageParticipantFlags(msg4) if err != nil { t.Error(err) } else if flagsMsg.RoomId != roomId { t.Errorf("Expected room %s, got %s", roomId, flagsMsg.RoomId) } else if flagsMsg.SessionId != sessionId { t.Errorf("Expected session id %s, got %s", sessionId, flagsMsg.SessionId) } else if flagsMsg.Flags != newFlags { t.Errorf("Expected flags %d, got %+v", newFlags, flagsMsg.Flags) } // A new client will receive the initial flags of the virtual session. client2 := NewTestClient(t, server, hub) defer client2.CloseWithBye() if err := client2.SendHello(testDefaultUserId + "2"); err != nil { t.Fatal(err) } if _, err := client2.RunUntilHello(ctx); err != nil { t.Error(err) } if room, err := client2.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } gotFlags := false var receivedMessages []*ServerMessage for !gotFlags { messages, err := client2.GetPendingMessages(ctx) if err != nil { t.Error(err) if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { break } } receivedMessages = append(receivedMessages, messages...) for _, msg := range messages { if msg.Type != "event" || msg.Event.Target != "participants" || msg.Event.Type != "flags" { continue } if msg.Event.Flags.RoomId != roomId { t.Errorf("Expected flags in room %s, got %s", roomId, msg.Event.Flags.RoomId) } else if msg.Event.Flags.SessionId != sessionId { t.Errorf("Expected flags for session %s, got %s", sessionId, msg.Event.Flags.SessionId) } else if msg.Event.Flags.Flags != newFlags { t.Errorf("Expected flags %d, got %d", newFlags, msg.Event.Flags.Flags) } else { gotFlags = true break } } } if !gotFlags { t.Errorf("Didn't receive initial flags in %+v", receivedMessages) } // Ignore "join" messages from second client if err := client.DrainMessages(ctx); err != nil { t.Error(err) } // When sending to a virtual session, the message is sent to the actual // client and contains a "Recipient" block with the internal session id. recipient := MessageClientMessageRecipient{ Type: "session", SessionId: sessionId, } data := "from-client-to-virtual" if err := client.SendMessage(recipient, data); err != nil { t.Fatal(err) } msg2, err = clientInternal.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } else if err := checkMessageType(msg2, "message"); err != nil { t.Fatal(err) } else if err := checkMessageSender(hub, msg2.Message.Sender, "session", hello.Hello); err != nil { t.Error(err) } if msg2.Message.Recipient == nil { t.Errorf("Expected recipient, got none") } else if msg2.Message.Recipient.Type != "session" { t.Errorf("Expected recipient type session, got %s", msg2.Message.Recipient.Type) } else if msg2.Message.Recipient.SessionId != internalSessionId { t.Errorf("Expected recipient %s, got %s", internalSessionId, msg2.Message.Recipient.SessionId) } var payload string if err := json.Unmarshal(*msg2.Message.Data, &payload); err != nil { t.Error(err) } else if payload != data { t.Errorf("Expected payload %s, got %s", data, payload) } msgRemove := &ClientMessage{ Type: "internal", Internal: &InternalClientMessage{ Type: "removesession", RemoveSession: &RemoveSessionInternalClientMessage{ CommonSessionInternalClientMessage: CommonSessionInternalClientMessage{ SessionId: internalSessionId, RoomId: roomId, }, }, }, } if err := clientInternal.WriteJSON(msgRemove); err != nil { t.Fatal(err) } msg5, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } if err := client.checkMessageRoomLeaveSession(msg5, sessionId); err != nil { t.Error(err) } } func checkHasEntryWithInCall(message *RoomEventServerMessage, sessionId string, entryType string, inCall int) error { found := false for _, entry := range message.Users { if sid, ok := entry["sessionId"].(string); ok && sid == sessionId { if value, ok := entry[entryType].(bool); !ok || !value { return fmt.Errorf("Expected %s user, got %+v", entryType, entry) } if value, ok := entry["inCall"].(float64); !ok || int(value) != inCall { return fmt.Errorf("Expected in call %d, got %+v", inCall, entry) } found = true break } } if !found { return fmt.Errorf("No user with session id %s found, got %+v", sessionId, message) } return nil } func TestVirtualSessionCustomInCall(t *testing.T) { hub, _, _, server := CreateHubForTest(t) roomId := "the-room-id" emptyProperties := json.RawMessage("{}") backend := &Backend{ id: "compat", compat: true, } room, err := hub.createRoom(roomId, &emptyProperties, backend) if err != nil { t.Fatalf("Could not create room: %s", err) } defer room.Close() clientInternal := NewTestClient(t, server, hub) defer clientInternal.CloseWithBye() features := []string{ ClientFeatureInternalInCall, } if err := clientInternal.SendHelloInternalWithFeatures(features); err != nil { t.Fatal(err) } client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() helloInternal, err := clientInternal.RunUntilHello(ctx) if err != nil { t.Error(err) } else { if helloInternal.Hello.UserId != "" { t.Errorf("Expected empty user id, got %+v", helloInternal.Hello) } if helloInternal.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", helloInternal.Hello) } if helloInternal.Hello.ResumeId == "" { t.Errorf("Expected resume id, got %+v", helloInternal.Hello) } } if room, err := clientInternal.JoinRoomWithRoomSession(ctx, roomId, ""); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } hello, err := client.RunUntilHello(ctx) if err != nil { t.Error(err) } if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } if _, additional, err := clientInternal.RunUntilJoinedAndReturn(ctx, helloInternal.Hello, hello.Hello); err != nil { t.Error(err) } else if len(additional) != 1 { t.Errorf("expected one additional message, got %+v", additional) } else if additional[0].Type != "event" { t.Errorf("expected event message, got %+v", additional[0]) } else if additional[0].Event.Target != "participants" { t.Errorf("expected event participants message, got %+v", additional[0]) } else if additional[0].Event.Type != "update" { t.Errorf("expected event participants update message, got %+v", additional[0]) } else if additional[0].Event.Update.Users[0]["sessionId"].(string) != helloInternal.Hello.SessionId { t.Errorf("expected event update message for internal session, got %+v", additional[0]) } else if additional[0].Event.Update.Users[0]["inCall"].(float64) != 0 { t.Errorf("expected event update message with session not in call, got %+v", additional[0]) } if err := client.RunUntilJoined(ctx, helloInternal.Hello, hello.Hello); err != nil { t.Error(err) } internalSessionId := "session1" userId := "user1" msgAdd := &ClientMessage{ Type: "internal", Internal: &InternalClientMessage{ Type: "addsession", AddSession: &AddSessionInternalClientMessage{ CommonSessionInternalClientMessage: CommonSessionInternalClientMessage{ SessionId: internalSessionId, RoomId: roomId, }, UserId: userId, Flags: FLAG_MUTED_SPEAKING, }, }, } if err := clientInternal.WriteJSON(msgAdd); err != nil { t.Fatal(err) } msg1, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } // The public session id will be generated by the server, so don't check for it. if err := client.checkMessageJoinedSession(msg1, "", userId); err != nil { t.Fatal(err) } sessionId := msg1.Event.Join[0].SessionId session := hub.GetSessionByPublicId(sessionId) if session == nil { t.Fatalf("Could not get virtual session %s", sessionId) } if session.ClientType() != HelloClientTypeVirtual { t.Errorf("Expected client type %s, got %s", HelloClientTypeVirtual, session.ClientType()) } if sid := session.(*VirtualSession).SessionId(); sid != internalSessionId { t.Errorf("Expected internal session id %s, got %s", internalSessionId, sid) } // Also a participants update event will be triggered for the virtual user. msg2, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } updateMsg, err := checkMessageParticipantsInCall(msg2) if err != nil { t.Error(err) } else if updateMsg.RoomId != roomId { t.Errorf("Expected room %s, got %s", roomId, updateMsg.RoomId) } else if len(updateMsg.Users) != 2 { t.Errorf("Expected two users, got %+v", updateMsg.Users) } if err := checkHasEntryWithInCall(updateMsg, sessionId, "virtual", 0); err != nil { t.Error(err) } if err := checkHasEntryWithInCall(updateMsg, helloInternal.Hello.SessionId, "internal", 0); err != nil { t.Error(err) } msg3, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } flagsMsg, err := checkMessageParticipantFlags(msg3) if err != nil { t.Error(err) } else if flagsMsg.RoomId != roomId { t.Errorf("Expected room %s, got %s", roomId, flagsMsg.RoomId) } else if flagsMsg.SessionId != sessionId { t.Errorf("Expected session id %s, got %s", sessionId, flagsMsg.SessionId) } else if flagsMsg.Flags != FLAG_MUTED_SPEAKING { t.Errorf("Expected flags %d, got %+v", FLAG_MUTED_SPEAKING, flagsMsg.Flags) } // The internal session can change its "inCall" flags msgInCall := &ClientMessage{ Type: "internal", Internal: &InternalClientMessage{ Type: "incall", InCall: &InCallInternalClientMessage{ InCall: FlagInCall | FlagWithAudio, }, }, } if err := clientInternal.WriteJSON(msgInCall); err != nil { t.Fatal(err) } msg4, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } updateMsg2, err := checkMessageParticipantsInCall(msg4) if err != nil { t.Error(err) } else if updateMsg2.RoomId != roomId { t.Errorf("Expected room %s, got %s", roomId, updateMsg2.RoomId) } else if len(updateMsg2.Users) != 2 { t.Errorf("Expected two users, got %+v", updateMsg2.Users) } if err := checkHasEntryWithInCall(updateMsg2, sessionId, "virtual", 0); err != nil { t.Error(err) } if err := checkHasEntryWithInCall(updateMsg2, helloInternal.Hello.SessionId, "internal", FlagInCall|FlagWithAudio); err != nil { t.Error(err) } // The internal session can change the "inCall" flags of a virtual session newInCall := FlagInCall | FlagWithPhone msgInCall2 := &ClientMessage{ Type: "internal", Internal: &InternalClientMessage{ Type: "updatesession", UpdateSession: &UpdateSessionInternalClientMessage{ CommonSessionInternalClientMessage: CommonSessionInternalClientMessage{ SessionId: internalSessionId, RoomId: roomId, }, InCall: &newInCall, }, }, } if err := clientInternal.WriteJSON(msgInCall2); err != nil { t.Fatal(err) } msg5, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } updateMsg3, err := checkMessageParticipantsInCall(msg5) if err != nil { t.Error(err) } else if updateMsg3.RoomId != roomId { t.Errorf("Expected room %s, got %s", roomId, updateMsg3.RoomId) } else if len(updateMsg3.Users) != 2 { t.Errorf("Expected two users, got %+v", updateMsg3.Users) } if err := checkHasEntryWithInCall(updateMsg3, sessionId, "virtual", newInCall); err != nil { t.Error(err) } if err := checkHasEntryWithInCall(updateMsg3, helloInternal.Hello.SessionId, "internal", FlagInCall|FlagWithAudio); err != nil { t.Error(err) } } func TestVirtualSessionCleanup(t *testing.T) { hub, _, _, server := CreateHubForTest(t) roomId := "the-room-id" emptyProperties := json.RawMessage("{}") backend := &Backend{ id: "compat", compat: true, } room, err := hub.createRoom(roomId, &emptyProperties, backend) if err != nil { t.Fatalf("Could not create room: %s", err) } defer room.Close() clientInternal := NewTestClient(t, server, hub) defer clientInternal.CloseWithBye() if err := clientInternal.SendHelloInternal(); err != nil { t.Fatal(err) } client := NewTestClient(t, server, hub) defer client.CloseWithBye() if err := client.SendHello(testDefaultUserId); err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() if hello, err := clientInternal.RunUntilHello(ctx); err != nil { t.Error(err) } else { if hello.Hello.UserId != "" { t.Errorf("Expected empty user id, got %+v", hello.Hello) } if hello.Hello.SessionId == "" { t.Errorf("Expected session id, got %+v", hello.Hello) } if hello.Hello.ResumeId == "" { t.Errorf("Expected resume id, got %+v", hello.Hello) } } if _, err := client.RunUntilHello(ctx); err != nil { t.Error(err) } if room, err := client.JoinRoom(ctx, roomId); err != nil { t.Fatal(err) } else if room.Room.RoomId != roomId { t.Fatalf("Expected room %s, got %s", roomId, room.Room.RoomId) } // Ignore "join" events. if err := client.DrainMessages(ctx); err != nil { t.Error(err) } internalSessionId := "session1" userId := "user1" msgAdd := &ClientMessage{ Type: "internal", Internal: &InternalClientMessage{ Type: "addsession", AddSession: &AddSessionInternalClientMessage{ CommonSessionInternalClientMessage: CommonSessionInternalClientMessage{ SessionId: internalSessionId, RoomId: roomId, }, UserId: userId, Flags: FLAG_MUTED_SPEAKING, }, }, } if err := clientInternal.WriteJSON(msgAdd); err != nil { t.Fatal(err) } msg1, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } // The public session id will be generated by the server, so don't check for it. if err := client.checkMessageJoinedSession(msg1, "", userId); err != nil { t.Fatal(err) } sessionId := msg1.Event.Join[0].SessionId session := hub.GetSessionByPublicId(sessionId) if session == nil { t.Fatalf("Could not get virtual session %s", sessionId) } if session.ClientType() != HelloClientTypeVirtual { t.Errorf("Expected client type %s, got %s", HelloClientTypeVirtual, session.ClientType()) } if sid := session.(*VirtualSession).SessionId(); sid != internalSessionId { t.Errorf("Expected internal session id %s, got %s", internalSessionId, sid) } // Also a participants update event will be triggered for the virtual user. msg2, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } updateMsg, err := checkMessageParticipantsInCall(msg2) if err != nil { t.Error(err) } else if updateMsg.RoomId != roomId { t.Errorf("Expected room %s, got %s", roomId, updateMsg.RoomId) } else if len(updateMsg.Users) != 1 { t.Errorf("Expected one user, got %+v", updateMsg.Users) } else if sid, ok := updateMsg.Users[0]["sessionId"].(string); !ok || sid != sessionId { t.Errorf("Expected session id %s, got %+v", sessionId, updateMsg.Users[0]) } else if virtual, ok := updateMsg.Users[0]["virtual"].(bool); !ok || !virtual { t.Errorf("Expected virtual user, got %+v", updateMsg.Users[0]) } else if inCall, ok := updateMsg.Users[0]["inCall"].(float64); !ok || inCall != (FlagInCall|FlagWithPhone) { t.Errorf("Expected user in call with phone, got %+v", updateMsg.Users[0]) } msg3, err := client.RunUntilMessage(ctx) if err != nil { t.Fatal(err) } flagsMsg, err := checkMessageParticipantFlags(msg3) if err != nil { t.Error(err) } else if flagsMsg.RoomId != roomId { t.Errorf("Expected room %s, got %s", roomId, flagsMsg.RoomId) } else if flagsMsg.SessionId != sessionId { t.Errorf("Expected session id %s, got %s", sessionId, flagsMsg.SessionId) } else if flagsMsg.Flags != FLAG_MUTED_SPEAKING { t.Errorf("Expected flags %d, got %+v", FLAG_MUTED_SPEAKING, flagsMsg.Flags) } // The virtual sessions are closed when the parent session is deleted. clientInternal.CloseWithBye() if msg2, err := client.RunUntilMessage(ctx); err != nil { t.Fatal(err) } else if err := client.checkMessageRoomLeaveSession(msg2, sessionId); err != nil { t.Error(err) } }