pax_global_header 0000666 0000000 0000000 00000000064 14532224177 0014521 g ustar 00root root 0000000 0000000 52 comment=0ecc5379762883fbf6454d4633b948c2ad50dec8
aiortc-1.6.0/ 0000775 0000000 0000000 00000000000 14532224177 0013006 5 ustar 00root root 0000000 0000000 aiortc-1.6.0/.gitattributes 0000664 0000000 0000000 00000000034 14532224177 0015676 0 ustar 00root root 0000000 0000000 *.bin binary
*.ulaw binary
aiortc-1.6.0/.github/ 0000775 0000000 0000000 00000000000 14532224177 0014346 5 ustar 00root root 0000000 0000000 aiortc-1.6.0/.github/ISSUE_TEMPLATE.rst 0000664 0000000 0000000 00000001107 14532224177 0017262 0 ustar 00root root 0000000 0000000 Before filing an issue please verify the following:
* Check whether there is already an existing issue for the same topic.
* Ensure you are actually reporting an issue related to ``aiortc``. The goal
of the issue tracker is not to provide general guidance about WebRTC or free
debugging of your code.
* Clearly state whether the issue you are reporting can be reproduced with one
of the examples provided with ``aiortc`` *without any changes*.
* Be considerate to the maintainers. ``aiortc`` is provided on a best-effort,
there is no guarantee your issue will be addressed.
aiortc-1.6.0/.github/workflows/ 0000775 0000000 0000000 00000000000 14532224177 0016403 5 ustar 00root root 0000000 0000000 aiortc-1.6.0/.github/workflows/issues.yml 0000664 0000000 0000000 00000001021 14532224177 0020433 0 ustar 00root root 0000000 0000000 name: issues
on:
schedule:
- cron: '30 1 * * *'
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v8
with:
stale-issue-label: stale
stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.'
days-before-stale: 120
days-before-close: 14
days-before-pr-stale: -1
days-before-pr-close: -1
aiortc-1.6.0/.github/workflows/tests.yml 0000664 0000000 0000000 00000010247 14532224177 0020274 0 ustar 00root root 0000000 0000000 name: tests
on: [push, pull_request]
jobs:
docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Install OS packages
run: |
sudo apt-get update
sudo apt-get install libopus-dev libvpx-dev
- name: Build documentation
run: |
pip install . -r requirements/doc.txt
make -C docs html SPHINXOPTS=-W
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Install packages
run: pip install black mypy ruff
- name: Run linters
run: |
ruff examples src tests
black --check --diff examples src tests
test:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
python:
- '3.12'
- '3.11'
- '3.10'
- '3.9'
- '3.8'
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python }}
- name: Install OS packages and disable firewall
if: matrix.os == 'macos-latest'
run: |
brew update
brew install opus libvpx
sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate off
- name: Install OS packages
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get update
sudo apt-get install libopus-dev libvpx-dev
- name: Run tests
run: |
python -m pip install -U pip setuptools wheel
pip install .[dev]
coverage run -m unittest discover -v
coverage xml
shell: bash
- name: Upload coverage report
uses: codecov/codecov-action@v3
package-source:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Build source package
run: |
pip install -U build
python -m build --sdist
- name: Upload source package
uses: actions/upload-artifact@v3
with:
name: dist
path: dist/
package-wheel:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
include:
- os: macos-latest
arch: arm64
- os: macos-latest
arch: x86_64
- os: ubuntu-latest
arch: aarch64
- os: ubuntu-latest
arch: i686
- os: ubuntu-latest
arch: x86_64
- os: windows-latest
arch: AMD64
- os: windows-latest
arch: x86
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Install QEMU
if: matrix.os == 'ubuntu-latest'
uses: docker/setup-qemu-action@v3
- name: Build wheels
env:
CIBW_ARCHS: ${{ matrix.arch }}
CIBW_BEFORE_BUILD: python scripts/fetch-vendor.py /tmp/vendor
CIBW_BEFORE_BUILD_WINDOWS: python scripts\fetch-vendor.py C:\cibw\vendor
CIBW_ENVIRONMENT: CFLAGS=-I/tmp/vendor/include LDFLAGS=-L/tmp/vendor/lib
CIBW_ENVIRONMENT_WINDOWS: INCLUDE=C:\\cibw\\vendor\\include LIB=C:\\cibw\\vendor\\lib
CIBW_SKIP: '*-musllinux*'
run: |
pip install cibuildwheel
cibuildwheel --output-dir dist
shell: bash
- name: Upload wheels
uses: actions/upload-artifact@v3
with:
name: dist
path: dist/
publish:
runs-on: ubuntu-latest
needs: [lint, test, package-source, package-wheel]
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v3
with:
name: dist
path: dist/
- name: Publish to PyPI
if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.PYPI_TOKEN }}
aiortc-1.6.0/.gitignore 0000664 0000000 0000000 00000000230 14532224177 0014771 0 ustar 00root root 0000000 0000000 *.egg-info
*.pyc
*.so
.coverage
.eggs
.idea
.mypy_cache
.vscode
/build
/dist
/docs/_build
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
aiortc-1.6.0/.readthedocs.yaml 0000664 0000000 0000000 00000000344 14532224177 0016236 0 ustar 00root root 0000000 0000000 version: 2
build:
apt_packages:
- libopus-dev
- libvpx-dev
os: ubuntu-22.04
tools:
python: "3.11"
formats:
- pdf
python:
install:
- method: pip
path: .
- requirements: requirements/doc.txt
aiortc-1.6.0/CODE_OF_CONDUCT.md 0000664 0000000 0000000 00000006221 14532224177 0015606 0 ustar 00root root 0000000 0000000 # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at jeremy.laine@m4x.org. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/
aiortc-1.6.0/LICENSE 0000664 0000000 0000000 00000002750 14532224177 0014017 0 ustar 00root root 0000000 0000000 Copyright (c) 2018-2023 Jeremy Lainé.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of aiortc nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
aiortc-1.6.0/MANIFEST.in 0000664 0000000 0000000 00000000266 14532224177 0014550 0 ustar 00root root 0000000 0000000 include LICENSE
recursive-include docs *.py *.rst Makefile
recursive-include examples *.html *.py *.rst *.wav
recursive-include src/_cffi_src *.py
recursive-include tests *.bin *.py
aiortc-1.6.0/README.rst 0000664 0000000 0000000 00000010434 14532224177 0014477 0 ustar 00root root 0000000 0000000 aiortc
======
|rtd| |pypi-v| |pypi-pyversions| |pypi-l| |tests| |codecov| |gitter|
.. |rtd| image:: https://readthedocs.org/projects/aiortc/badge/?version=latest
:target: https://aiortc.readthedocs.io/
.. |pypi-v| image:: https://img.shields.io/pypi/v/aiortc.svg
:target: https://pypi.python.org/pypi/aiortc
.. |pypi-pyversions| image:: https://img.shields.io/pypi/pyversions/aiortc.svg
:target: https://pypi.python.org/pypi/aiortc
.. |pypi-l| image:: https://img.shields.io/pypi/l/aiortc.svg
:target: https://pypi.python.org/pypi/aiortc
.. |tests| image:: https://github.com/aiortc/aiortc/workflows/tests/badge.svg
:target: https://github.com/aiortc/aiortc/actions
.. |codecov| image:: https://img.shields.io/codecov/c/github/aiortc/aiortc.svg
:target: https://codecov.io/gh/aiortc/aiortc
.. |gitter| image:: https://img.shields.io/gitter/room/aiortc/Lobby.svg
:target: https://gitter.im/aiortc/Lobby
What is ``aiortc``?
-------------------
``aiortc`` is a library for `Web Real-Time Communication (WebRTC)`_ and
`Object Real-Time Communication (ORTC)`_ in Python. It is built on top of
``asyncio``, Python's standard asynchronous I/O framework.
The API closely follows its Javascript counterpart while using pythonic
constructs:
- promises are replaced by coroutines
- events are emitted using ``pyee.EventEmitter``
To learn more about ``aiortc`` please `read the documentation`_.
.. _Web Real-Time Communication (WebRTC): https://webrtc.org/
.. _Object Real-Time Communication (ORTC): https://ortc.org/
.. _read the documentation: https://aiortc.readthedocs.io/en/latest/
Why should I use ``aiortc``?
----------------------------
The main WebRTC and ORTC implementations are either built into web browsers,
or come in the form of native code. While they are extensively battle tested,
their internals are complex and they do not provide Python bindings.
Furthermore they are tightly coupled to a media stack, making it hard to plug
in audio or video processing algorithms.
In contrast, the ``aiortc`` implementation is fairly simple and readable. As
such it is a good starting point for programmers wishing to understand how
WebRTC works or tinker with its internals. It is also easy to create innovative
products by leveraging the extensive modules available in the Python ecosystem.
For instance you can build a full server handling both signaling and data
channels or apply computer vision algorithms to video frames using OpenCV.
Furthermore, a lot of effort has gone into writing an extensive test suite for
the ``aiortc`` code to ensure best-in-class code quality.
Implementation status
---------------------
``aiortc`` allows you to exchange audio, video and data channels and
interoperability is regularly tested against both Chrome and Firefox. Here are
some of its features:
- SDP generation / parsing
- Interactive Connectivity Establishment, with half-trickle and mDNS support
- DTLS key and certificate generation
- DTLS handshake, encryption / decryption (for SCTP)
- SRTP keying, encryption and decryption for RTP and RTCP
- Pure Python SCTP implementation
- Data Channels
- Sending and receiving audio (Opus / PCMU / PCMA)
- Sending and receiving video (VP8 / H.264)
- Bundling audio / video / data channels
- RTCP reports, including NACK / PLI to recover from packet loss
Installing
----------
Since release 0.9.28 binary wheels are available on PyPI for Linux, Mac and
Windows. The easiest way to install ``aiortc`` is to run:
.. code:: bash
pip install aiortc
Building from source
--------------------
If there are no wheels for your system or if you wish to build aiortc from
source you will need a couple of libraries installed on your system:
- OpenSSL 1.0.2 or greater
- FFmpeg 4.0 or greater
- LibVPX for video encoding / decoding
- Opus for audio encoding / decoding
Linux
.....
On Debian/Ubuntu run:
.. code:: bash
apt install libavdevice-dev libavfilter-dev libopus-dev libvpx-dev pkg-config
`pylibsrtp` comes with binary wheels for most platforms, but if it needs to be
built from you will also need to run:
.. code:: bash
apt install libsrtp2-dev
OS X
....
On OS X run:
.. code:: bash
brew install ffmpeg opus libvpx pkg-config
License
-------
``aiortc`` is released under the `BSD license`_.
.. _BSD license: https://aiortc.readthedocs.io/en/latest/license.html
aiortc-1.6.0/docs/ 0000775 0000000 0000000 00000000000 14532224177 0013736 5 ustar 00root root 0000000 0000000 aiortc-1.6.0/docs/Makefile 0000664 0000000 0000000 00000001134 14532224177 0015375 0 ustar 00root root 0000000 0000000 # Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = aiortc
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
aiortc-1.6.0/docs/_static/ 0000775 0000000 0000000 00000000000 14532224177 0015364 5 ustar 00root root 0000000 0000000 aiortc-1.6.0/docs/_static/aiortc.svg 0000664 0000000 0000000 00000021115 14532224177 0017366 0 ustar 00root root 0000000 0000000
aiortc-1.6.0/docs/api.rst 0000664 0000000 0000000 00000004406 14532224177 0015245 0 ustar 00root root 0000000 0000000 API Reference
=============
.. automodule:: aiortc
WebRTC
------
.. autoclass:: RTCPeerConnection
:members:
.. autoclass:: RTCSessionDescription
:members:
.. autoclass:: RTCConfiguration
:members:
Interactive Connectivity Establishment (ICE)
--------------------------------------------
.. autoclass:: RTCIceCandidate
:members:
.. autoclass:: RTCIceGatherer
:members:
.. autoclass:: RTCIceTransport
:members:
.. autoclass:: RTCIceParameters
:members:
.. autoclass:: RTCIceServer
:members:
Datagram Transport Layer Security (DTLS)
----------------------------------------
.. autoclass:: RTCCertificate()
:members:
.. autoclass:: RTCDtlsTransport
:members:
.. autoclass:: RTCDtlsParameters()
:members:
.. autoclass:: RTCDtlsFingerprint()
:members:
Real-time Transport Protocol (RTP)
----------------------------------
.. autoclass:: RTCRtpReceiver
:members:
.. autoclass:: RTCRtpSender
:members:
.. autoclass:: RTCRtpTransceiver
:members:
.. autoclass:: RTCRtpSynchronizationSource()
:members:
.. autoclass:: RTCRtpCapabilities()
:members:
.. autoclass:: RTCRtpCodecCapability()
:members:
.. autoclass:: RTCRtpHeaderExtensionCapability()
:members:
.. autoclass:: RTCRtpParameters()
:members:
.. autoclass:: RTCRtpCodecParameters()
:members:
.. autoclass:: RTCRtcpParameters()
:members:
Stream Control Transmission Protocol (SCTP)
-------------------------------------------
.. autoclass:: RTCSctpTransport
:members:
.. autoclass:: RTCSctpCapabilities
:members:
Data channels
-------------
.. autoclass:: RTCDataChannel(transport, parameters)
:members:
.. autoclass:: RTCDataChannelParameters()
:members:
Media
-----
.. autoclass:: MediaStreamTrack
:members:
Statistics
----------
.. autoclass:: RTCStatsReport()
.. autoclass:: RTCInboundRtpStreamStats()
:members:
.. autoclass:: RTCOutboundRtpStreamStats()
:members:
.. autoclass:: RTCRemoteInboundRtpStreamStats()
:members:
.. autoclass:: RTCRemoteOutboundRtpStreamStats()
:members:
.. autoclass:: RTCTransportStats()
:members:
aiortc-1.6.0/docs/changelog.rst 0000664 0000000 0000000 00000042257 14532224177 0016431 0 ustar 00root root 0000000 0000000 Changelog
=========
.. currentmodule:: aiortc
1.6.0
-----
* Build wheels using `Py_LIMITED_ABI` to make them compatible with future Python versions.
* Build wheels using opus 1.4 and vpx 1.13.1.
* Use unique IDs for audio and video header extensions.
* Allow :class:`aiortc.contrib.media.MediaRecorder` to record audio from pulse.
1.5.0
-----
* Make H.264 send a full picture when picture loss occurs.
* Fix TURN over TCP by updating `aioice` to 0.9.0.
* Make use of the `ifaddr` package instead of the unmaintained `netifaces` package.
1.4.0
-----
* Build wheels for Python 3.11.
* Allow :class:`aiortc.contrib.media.MediaPlayer` to send media without transcoding.
* Allow :class:`aiortc.contrib.media.MediaPlayer` to specify a timeout when opening media.
* Make :class:`aiortc.RTCSctpTransport` transmit packets sooner to reduce datachannel latency.
* Refactor :class:`aiortc.RTCDtlsTransport` to use PyOpenSSL.
* Make :class:`aiortc.RTCPeerConnection` log sent and received SDP when using verbose logging.
1.3.2
-----
* Limit size of NACK reports to avoid excessive packet size.
* Improve H.264 codec matching.
* Determine video size from first frame received by :class:`aiortc.contrib.media.MediaRecorder`.
* Fix a deprecation warning when using `av` >= 9.1.0.
* Tolerate STUN URLs containing a `protocol` querystring argument.
1.3.1
-----
* Build wheels for aarch64 on Linux.
* Adapt :class:`aiortc.contrib.media.MediaPlayer` for PyAV 9.x.
* Ensure H.264 produces B-frames by resetting picture type.
1.3.0
-----
* Build wheels for Python 3.10 and for arm64 on Mac.
* Build wheels against `libvpx` 1.10.
* Add support for looping in :class:`aiortc.contrib.media.MediaPlayer`.
* Add unbuffered option to :class:`aiortc.contrib.media.MediaRelay`.
* Calculate audio energy and send in RTP header extension.
* Fix a race condition in RTP sender/receiver shutdown.
* Improve performance of H.264 bitstream splitting code.
* Update imports for `pyee` version 9.x.
* Fully switch to `google-crc32c` instead of `crc32`.
* Drop support for Python 3.6.
* Remove `apprtc` code as the service is no longer publicly hosted.
1.2.1
-----
* Add a clear error message when no common codec is found.
* Replace the `crc32` dependency with `google-crc32c` which offers a more
liberal license.
1.2.0
-----
* Fix jitter buffer to avoid severe picture corruption under packet loss and
send Picture Loss Indication (PLI) when needed.
* Make H.264 encoder honour the bitrate from the bandwidth estimator.
* Add support for hardware-accelerated H.264 encoding on Raspberry Pi 4 using
the `h264_omx` codec.
* Add :class:`aiortc.contrib.media.MediaRelay` class to allow sending media
tracks to multiple consumers.
1.1.2
-----
* Add :attr:`RTCPeerConnection.connectionState` property.
* Correctly detect RTCIceTransport `"failed"` state.
* Correctly route RTP packets when there are multiple tracks of the same kind.
* Use full module name to name loggers.
1.1.1
-----
* Defer adding remote candidates until after transport bundling to avoid
unnecessary mDNS lookups.
1.1.0
-----
* Add support for resolving mDNS candidates.
* Improve support for TURN, especially long-lived connections.
1.0.0
-----
Breaking
........
* Make :meth:`RTCPeerConnection.addIceCandidate` a coroutine.
* Make :meth:`RTCIceTransport.addRemoteCandidate` a coroutine.
Media
.....
* Handle SSRC attributes in SDP containing a colon (#372).
* Limit number of H.264 NALU per packet (#394, #426).
Examples
........
* `server` make it possible to specify bind address (#347).
0.9.28
------
Provide binary wheels for Linux, Mac and Windows on PyPI.
0.9.27
------
Data channels
.............
* Add :attr:`RTCSctpTransport.maxChannels` property.
* Recycle stream IDs (#256).
* Correctly close data channel when SCTP is not established (#300).
Media
.....
* Add add :attr:`RTCRtpReceiver.track` property (#298).
* Fix a crash in `AimdRateControl` (#295).
0.9.26
------
DTLS
....
* Drop support for OpenSSL < 1.0.2.
Examples
........
* `apprtc` fix handling of empty "candidate" message.
Media
.....
* Fix a MediaPlayer crash when stopping one track of a multi-track file (#237, #274).
* Fix a MediaPlayer error when stopping a track while waiting for the next frame.
* Make `RTCRtpSender` resilient to exceptions raised by media stream tracks (#283).
0.9.25
------
Media
.....
* Do not repeatedly send key frames after receiving a PLI.
SDP
...
* Do not try to determine track ID if there is no Msid.
* Accept a star in rtcp-fb attributes.
0.9.24
------
Peer connection
...............
* Assign DTLS role based on the SDP negotiation, not the resolved ICE role.
* When the peer is ICE lite, adopt the ICE controlling role, and do not use
agressive nomination.
* Do not close transport on `setRemoteDescription` if media and data are
bundled.
* Set RemoteStreamTrack.id based on the Msid.
Media
.....
* Support alsa hardware output in MediaRecorder.
SDP
...
* Add support for the `ice-lite` attribute.
* Add support for receiving session-level `ice-ufrag`, `ice-pwd` and `setup`
attributes.
Miscellaneous
.............
* Switch from `attrs` to standard Python `dataclasses`.
* Use PEP-526 style variable annotations instead of comments.
0.9.23
------
* Drop support for Python 3.5.
* Drop dependency on PyOpenSSL.
* Use PyAV >= 7.0.0.
* Add partial type hints.
0.9.22
------
DTLS
....
* Display exception if data handler fails.
Examples
........
* `server` and `webcam` : add playsinline attribute for iOS compatibility.
* `webcam` : make it possible to play media from a file.
Miscellaneous
.............
* Use aioice >= 0.6.15 to not fail on mDNS candidates.
* Use pyee version 6.x.
0.9.21
------
DTLS
....
* Call SSL_CTX_set_ecdh_auto for OpenSSL 1.0.2.
Media
.....
* Correctly route REMB packets to the :class:`aiortc.RTCRtpSender`.
Examples
........
* :class:`aiortc.contrib.media.MediaPlayer` : release resources (e.g. webcam) when the player stops.
* :class:`aiortc.contrib.signaling.ApprtcSignaling` : make AppRTC signaling available for more examples.
* `datachannel-cli` : make uvloop optional.
* `videostream-cli` : animate the flag with a wave effect.
* `webcam` : explicitly set frame rate to 30 fps for webcams.
0.9.20
------
Data channels
.............
* Support out-of-band negotiation and custom channel id.
Documentation
.............
* Fix documentation build by installing `crc32c` instead of `crcmod`.
Examples
........
* :class:`aiortc.contrib.media.MediaPlayer` : skip frames with no presentation timestamp (pts).
0.9.19
------
Data channels
.............
* Do not raise congestion window when it is not fully utilized.
* Fix Highest TSN Newly Acknowledged logic for striking lost chunks.
* Do not limit congestion window to 120kB, limit burst size instead.
Media
.....
* Skip RTX packets with an empty payload.
Examples
........
* `apprtc` : make the initiator send messages using an HTTP POST instead of WebSocket.
* `janus` : new example to connect to the Janus WebRTC server.
* `server` : add cartoon effect to video transforms.
0.9.18
------
DTLS
....
* Do not use DTLSv1_get_timeout after DTLS handshake completes.
Data channels
.............
* Add setter for :attr:`RTCDataChannel.bufferedAmountLowThreshold`.
* Use `crc32c` package instead of `crcmod`, it provides better performance.
* Improve parsing and serialization code performance.
* Disable logging code if it is not used to improve performance.
0.9.17
------
DTLS
....
* Do not bomb if SRTP is received before DTLS handshake completes.
Data channels
.............
* Implement unordered delivery, so that the `ordered` option is honoured.
* Implement partial reliability, so that the `maxRetransmits` and `maxPacketLifeTime` options are honoured.
Media
.....
* Put all tracks in the same stream for now, fixes breakage introduced in 0.9.14.
* Use case-insensitive comparison for codec names.
* Use a=msid attribute in SDP instead of SSRC-level attributes.
Examples
........
* `server` : make it possible to select unreliable mode for data channels.
* `server` : print the round-trip time for data channel messages.
0.9.16
------
DTLS
....
* Log OpenSSL errors if the DTLS handshake fails.
* Fix DTLS handshake in server mode with OpenSSL < 1.1.0.
Media
.....
* Add :meth:`RTCRtpReceiver.getCapabilities` and :meth:`RTCRtpSender.getCapabilities`.
* Add :meth:`RTCRtpReceiver.getSynchronizationSources`.
* Add :meth:`RTCRtpTransceiver.setCodecPreferences`.
Examples
........
* `server` : make it possible to force audio codec.
* `server` : shutdown cleanly on Chrome which lacks :meth:`RTCRtpTransceiver.stop`.
0.9.15
------
Data channels
.............
* Emit a warning if the crcmod C extension is not present.
Media
.....
* Support subsequent offer / answer exchanges.
* Route RTCP parameters to RTP receiver and sender independently.
* Fix a regression when the remote SSRC are not known.
* Fix VP8 descriptor parsing errors detected by fuzzing.
* Fix H264 descriptor parsing errors detected by fuzzing.
0.9.14
------
Media
.....
* Add support for RTX retransmission packets.
* Fix RTP and RTCP parsing errors detected by fuzzing.
* Use case-insensitive comparison for hash algorithm in SDP, fixes interoperability with Asterisk.
* Offer NACK PLI and REMB feedback mechanisms for H.264.
0.9.13
------
Data channels
.............
* Raise an exception if :meth:`RTCDataChannel.send` is called when readyState is not `'open'`.
* Do not use stream sequence number for unordered data channels.
Media
.....
* Set VP8 target bitrate according to Receiver Estimated Maximum Bandwidth.
Examples
........
* Correctly handle encoding in copy-and-paste signaling.
* `server` : add command line options to use HTTPS.
* `webcam` : add command line options to use HTTPS.
* `webcam` : add code to open webcam on OS X.
0.9.12
------
* Rework code in order to facilitate garbage collection and avoid memory leaks.
0.9.11
------
Media
.....
* Make AudioStreamTrack and VideoStreamTrack produce empty frames more regularly.
Examples
........
* Fix a regession in copy-and-paste signaling which blocked the event loop.
0.9.10
------
Peer connection
...............
* Send `raddr` and `rport` parameters for server reflexive and relayed candidates.
This is required for Firefox to accept our STUN / TURN candidates.
* Do not raise an exception if ICE or DTLS connection fails, just change state.
Media
.....
* Revert to using asyncio's `run_in_executor` to send data to the encoder, it greatly
reduces the response time.
* Adjust package requirements to accept PyAV < 7.0.0.
Examples
........
* `webcam` : force Chrome to use "unified-plan" semantics to enabled `addTransceiver`.
* :class:`aiortc.contrib.media.MediaPlayer` : don't sleep at all when playing from webcam.
This eliminates the constant one-second lag in the `webcam` demo.
0.9.9
-----
.. warning::
`aiortc` now uses PyAV's :class:`~av.audio.frame.AudioFrame` and
:class:`~av.video.frame.VideoFrame` classes instead of defining its own.
Media
.....
* Use a jitter buffer for incoming audio.
* Add :meth:`RTCPeerConnection.addTransceiver` method.
* Add :attr:`RTCRtpTransceiver.direction` to manage transceiver direction.
Examples
........
* `apprtc` : demonstrate the use of :class:`aiortc.contrib.media.MediaPlayer`
and :class:`aiortc.contrib.media.MediaRecorder`.
* `webcam` : new examples illustrating sending video from a webcam to a browser.
* :class:`aiortc.contrib.media.MediaPlayer` : don't sleep if a frame lacks timing information.
* :class:`aiortc.contrib.media.MediaPlayer` : remove `start()` and `stop()` methods.
* :class:`aiortc.contrib.media.MediaRecorder` : use `libx264` for encoding.
* :class:`aiortc.contrib.media.MediaRecorder` : make `start()` and `stop()` coroutines.
0.9.8
-----
Media
.....
* Add support for H.264 video, a big thank you to @dsvictor94!
* Add support for sending Receiver Estimate Maximum Bitrate (REMB) feedback.
* Add support for parsing / serializing more RTP header extensions.
* Move each media encoder / decoder its one thread instead of using a
thread pool.
Statistics
..........
* Add the :meth:`RTCPeerConnection.getStats()` coroutine to retrieve statistics.
* Add initial :class:`RTCTransportStats` to report transport statistics.
Examples
........
* Add new :class:`aiortc.contrib.media.MediaPlayer` class to read audio / video from a file.
* Add new :class:`aiortc.contrib.media.MediaRecorder` class to write audio / video to a file.
* Add new :class:`aiortc.contrib.media.MediaBlackhole` class to discard audio / video.
0.9.7
-----
Media
.....
* Make RemoteStreamTrack emit an "ended" event, to simplify shutting down
media consumers.
* Add RemoteStreamTrack.readyState property.
* Handle timestamp wraparound on sent RTP packets.
Packaging
.........
* Add a versioned dependency on cffi>=1.0.0 to fix Raspberry Pi builds.
0.9.6
-----
Data channels
.............
* Optimize reception for improved latency and throughput.
Media
.....
* Add initial :meth:`RTCRtpReceiver.getStats()` and :meth:`RTCRtpReceiver.getStats()` coroutines.
Examples
........
* `datachannel-cli`: display ping/pong roundtrip time.
0.9.5
-----
Media
.....
* Make it possible to add multiple audio or video streams.
* Implement basic RTP video packet loss detection / retransmission using RTCP NACK feedback.
* Respond to Picture Loss Indications (PLI) by sending a keyframe.
* Use shorter MID values to reduce RTP header extension overhead.
* Correctly shutdown and discard unused transports when using BUNDLE.
Examples
........
* `server` : make it possible to save received video to an AVI file.
0.9.4
-----
Peer connection
...............
* Add support for TURN over TCP.
Examples
........
* Add media and signaling helpers in `aiortc.contrib`.
* Fix colorspace OpenCV colorspace conversions.
* `apprtc` : send rotating image on video track.
0.9.3
-----
Media
.....
* Set PictureID attribute on outgoing VP8 frames.
* Negotiate and send SDES MID header extension for RTP packets.
* Fix negative packets_lost encoding for RTCP reports.
0.9.2
-----
Data channels
.............
* Numerous performance improvements in congestion control.
Examples
........
* `datachannel-filexfer`: use uvloop instead of default asyncio loop.
0.9.1
-----
Data channels
.............
* Revert making RTCDataChannel.send a coroutine.
0.9.0
-----
Media
.....
* Enable post-processing in VP8 decoder to remove (macro) blocks.
* Set target bitrate for VP8 encoder to 900kbps.
* Re-create VP8 encoder if frame size changes.
* Implement jitter estimation for RTCP reports.
* Avoid overflowing the DLSR field for RTCP reports.
* Raise video jitter buffer size.
Data channels
.............
* BREAKING CHANGE: make RTCDataChannel.send a coroutine.
* Support spec-compliant SDP format for datachannels, as used in Firefox 63.
* Never send a negative advertised_cwnd.
Examples
........
* `datachannel-filexfer`: new example for file transfer over a data channel.
* `datachannel-vpn`: new example for a VPN over a data channel.
* `server`: make it possible to select video resolution.
0.8.0
-----
Media
.....
* Align VP8 settings with those used by WebRTC project, which greatly improves
video quality.
* Send RTCP source description, sender report, receiver report and bye packets.
Examples
........
* `server`:
- make it possible to not transform video at all.
- allow video display to be up to 1280px wide.
* `videostream-cli`:
- fix Python 3.5 compatibility
Miscellaneous
.............
* Delay logging string interpolation to reduce cost of packet logging in
non-verbose mode.
0.7.0
-----
Peer connection
...............
* Add :meth:`RTCPeerConnection.addIceCandidate()` method to handle trickled ICE candidates.
Media
.....
* Make stop() methods of :class:`aiortc.RTCRtpReceiver`, :class:`aiortc.RTCRtpSender`
and :class:`RTCRtpTransceiver` coroutines to enable clean shutdown.
Data channels
.............
* Clean up :class:`aiortc.RTCDataChannel` shutdown sequence.
* Support receiving an SCTP `RE-CONFIG` to raise number of inbound streams.
Examples
........
* `server`:
- perform some image processing using OpenCV.
- make it possible to disable data channels.
- make demo web interface more mobile-friendly.
* `apprtc`:
- automatically create a room if no room is specified on command line.
- handle `bye` command.
0.6.0
-----
Peer connection
...............
* Make it possible to specify one STUN server and / or one TURN server.
* Add `BUNDLE` support to use a single ICE/DTLS transport for multiple media.
* Move media encoding / decoding off the main thread.
Data channels
.............
* Use SCTP `ABORT` instead of `SHUTDOWN` when stopping :class:`aiortc.RTCSctpTransport`.
* Advertise support for SCTP `RE-CONFIG` extension.
* Make :class:`aiortc.RTCDataChannel` emit `open` and `close` events.
Examples
........
* Add an example of how to connect to appr.tc.
* Capture audio frames to a WAV file in server example.
* Show datachannel open / close events in server example.
aiortc-1.6.0/docs/conf.py 0000664 0000000 0000000 00000012546 14532224177 0015245 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# aiortc documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 8 17:22:14 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx_autodoc_typehints',
'sphinxcontrib_trio',
]
autodoc_member_order = 'bysource'
intersphinx_mapping = {
'av': ('https://pyav.org/docs/stable', None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'aiortc'
copyright = u'2018-2023, Jeremy Lainé'
author = u'Jeremy Lainé'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'description': 'A library for building WebRTC and ORTC applications in Python.',
'github_button': True,
'github_user': 'aiortc',
'github_repo': 'aiortc',
'logo': 'aiortc.svg',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'aiortcdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'aiortc.tex', 'aiortc Documentation',
u'Jeremy Lainé', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'aiortc', 'aiortc Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'aiortc', 'aiortc Documentation',
author, 'aiortc', 'One line description of project.',
'Miscellaneous'),
]
aiortc-1.6.0/docs/contributing.rst 0000664 0000000 0000000 00000002606 14532224177 0017203 0 ustar 00root root 0000000 0000000 Contributing
============
Thanks for taking the time to contribute to ``aiortc``!
Code of Conduct
---------------
This project and everyone participating in it is governed by the `Code of
Conduct`_. By participating, you are expected to uphold this code. Please
report inappropriate behavior to jeremy DOT laine AT m4x DOT org.
.. _Code of Conduct: https://github.com/aiortc/aiortc/blob/main/CODE_OF_CONDUCT.md
Contributions
-------------
Bug reports, patches and suggestions are welcome!
Please open an issue_ or send a `pull request`_.
Feedback about the examples or documentation are especially valuable as they
make ``aiortc`` accessible to a wider audience.
Code contributions *must* come with full unit test coverage. WebRTC is a
complex protocol stack and ensuring correct behaviour now and in the future
requires a proper investment in automated testing.
.. _issue: https://github.com/aiortc/aiortc/issues/new
.. _pull request: https://github.com/aiortc/aiortc/compare/
Questions
---------
GitHub issues aren't a good medium for handling questions. There are better
places to ask questions, for example Stack Overflow.
If you want to ask a question anyway, please make sure that:
- it's a question about ``aiortc`` and not about :mod:`asyncio`;
- it isn't answered by the documentation;
- it wasn't asked already.
A good question can be written as a suggestion to improve the documentation.
aiortc-1.6.0/docs/examples.rst 0000664 0000000 0000000 00000000317 14532224177 0016307 0 ustar 00root root 0000000 0000000 Examples
========
``aiortc`` comes with a selection of examples, which are a great starting point
for new users.
The examples can be browsed on GitHub:
https://github.com/aiortc/aiortc/tree/main/examples
aiortc-1.6.0/docs/helpers.rst 0000664 0000000 0000000 00000001041 14532224177 0016126 0 ustar 00root root 0000000 0000000 Helpers
=============
.. currentmodule:: aiortc
These classes are not part of the WebRTC or ORTC API, but provide higher-level
helpers for tasks like manipulating media streams.
Media sources
-------------
.. autoclass:: aiortc.contrib.media.MediaPlayer
:members:
Media sinks
-----------
.. autoclass:: aiortc.contrib.media.MediaRecorder
:members:
.. autoclass:: aiortc.contrib.media.MediaBlackhole
:members:
Media transforms
----------------
.. autoclass:: aiortc.contrib.media.MediaRelay
:members:
aiortc-1.6.0/docs/index.rst 0000664 0000000 0000000 00000004562 14532224177 0015606 0 ustar 00root root 0000000 0000000 aiortc
=========
|pypi-v| |pypi-pyversions| |pypi-l| |tests| |codecov| |gitter|
.. |pypi-v| image:: https://img.shields.io/pypi/v/aiortc.svg
:target: https://pypi.python.org/pypi/aiortc
.. |pypi-pyversions| image:: https://img.shields.io/pypi/pyversions/aiortc.svg
:target: https://pypi.python.org/pypi/aiortc
.. |pypi-l| image:: https://img.shields.io/pypi/l/aiortc.svg
:target: https://pypi.python.org/pypi/aiortc
.. |tests| image:: https://github.com/aiortc/aiortc/workflows/tests/badge.svg
:target: https://github.com/aiortc/aiortc/actions
.. |codecov| image:: https://img.shields.io/codecov/c/github/aiortc/aiortc.svg
:target: https://codecov.io/gh/aiortc/aiortc
.. |gitter| image:: https://img.shields.io/gitter/room/aiortc/Lobby.svg
:target: https://gitter.im/aiortc/Lobby
``aiortc`` is a library for `Web Real-Time Communication (WebRTC)`_ and
`Object Real-Time Communication (ORTC)`_ in Python. It is built on top of
``asyncio``, Python's standard asynchronous I/O framework.
The API closely follows its Javascript counterpart while using pythonic
constructs:
- promises are replaced by coroutines
- events are emitted using ``pyee.EventEmitter``
.. _Web Real-Time Communication (WebRTC): https://webrtc.org/
.. _Object Real-Time Communication (ORTC): https://ortc.org/
Why should I use ``aiortc``?
----------------------------
The main WebRTC and ORTC implementations are either built into web browsers,
or come in the form of native code. While they are extensively battle tested,
their internals are complex and they do not provide Python bindings.
Furthermore they are tightly coupled to a media stack, making it hard to plug
in audio or video processing algorithms.
In contrast, the ``aiortc`` implementation is fairly simple and readable. As
such it is a good starting point for programmers wishing to understand how
WebRTC works or tinker with its internals. It is also easy to create innovative
products by leveraging the extensive modules available in the Python ecosystem.
For instance you can build a full server handling both signaling and data
channels or apply computer vision algorithms to video frames using OpenCV.
Furthermore, a lot of effort has gone into writing an extensive test suite for
the ``aiortc`` code to ensure best-in-class code quality.
.. toctree::
:maxdepth: 2
examples
api
helpers
contributing
changelog
license
aiortc-1.6.0/docs/license.rst 0000664 0000000 0000000 00000000060 14532224177 0016106 0 ustar 00root root 0000000 0000000 License
-------
.. literalinclude:: ../LICENSE
aiortc-1.6.0/examples/ 0000775 0000000 0000000 00000000000 14532224177 0014624 5 ustar 00root root 0000000 0000000 aiortc-1.6.0/examples/datachannel-cli/ 0000775 0000000 0000000 00000000000 14532224177 0017633 5 ustar 00root root 0000000 0000000 aiortc-1.6.0/examples/datachannel-cli/README.rst 0000664 0000000 0000000 00000001277 14532224177 0021331 0 ustar 00root root 0000000 0000000 Data channel CLI
================
This example illustrates the establishment of a data channel using an
RTCPeerConnection and a "copy and paste" signaling channel to exchange SDP.
First install the required packages:
.. code-block:: console
$ pip install aiortc
To run the example, you will need instances of the `cli` example:
- The first takes on the role of the offerer. It generates an offer which you
must copy-and-paste to the answerer.
.. code-block:: console
$ python cli.py offer
- The second takes on the role of the answerer. When given an offer, it will
generate an answer which you must copy-and-paste to the offerer.
.. code-block:: console
$ python cli.py answer
aiortc-1.6.0/examples/datachannel-cli/cli.py 0000664 0000000 0000000 00000006434 14532224177 0020763 0 ustar 00root root 0000000 0000000 import argparse
import asyncio
import logging
import time
from aiortc import RTCIceCandidate, RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.signaling import BYE, add_signaling_arguments, create_signaling
def channel_log(channel, t, message):
print("channel(%s) %s %s" % (channel.label, t, message))
def channel_send(channel, message):
channel_log(channel, ">", message)
channel.send(message)
async def consume_signaling(pc, signaling):
while True:
obj = await signaling.receive()
if isinstance(obj, RTCSessionDescription):
await pc.setRemoteDescription(obj)
if obj.type == "offer":
# send answer
await pc.setLocalDescription(await pc.createAnswer())
await signaling.send(pc.localDescription)
elif isinstance(obj, RTCIceCandidate):
await pc.addIceCandidate(obj)
elif obj is BYE:
print("Exiting")
break
time_start = None
def current_stamp():
global time_start
if time_start is None:
time_start = time.time()
return 0
else:
return int((time.time() - time_start) * 1000000)
async def run_answer(pc, signaling):
await signaling.connect()
@pc.on("datachannel")
def on_datachannel(channel):
channel_log(channel, "-", "created by remote party")
@channel.on("message")
def on_message(message):
channel_log(channel, "<", message)
if isinstance(message, str) and message.startswith("ping"):
# reply
channel_send(channel, "pong" + message[4:])
await consume_signaling(pc, signaling)
async def run_offer(pc, signaling):
await signaling.connect()
channel = pc.createDataChannel("chat")
channel_log(channel, "-", "created by local party")
async def send_pings():
while True:
channel_send(channel, "ping %d" % current_stamp())
await asyncio.sleep(1)
@channel.on("open")
def on_open():
asyncio.ensure_future(send_pings())
@channel.on("message")
def on_message(message):
channel_log(channel, "<", message)
if isinstance(message, str) and message.startswith("pong"):
elapsed_ms = (current_stamp() - int(message[5:])) / 1000
print(" RTT %.2f ms" % elapsed_ms)
# send offer
await pc.setLocalDescription(await pc.createOffer())
await signaling.send(pc.localDescription)
await consume_signaling(pc, signaling)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Data channels ping/pong")
parser.add_argument("role", choices=["offer", "answer"])
parser.add_argument("--verbose", "-v", action="count")
add_signaling_arguments(parser)
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
signaling = create_signaling(args)
pc = RTCPeerConnection()
if args.role == "offer":
coro = run_offer(pc, signaling)
else:
coro = run_answer(pc, signaling)
# run event loop
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(coro)
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(pc.close())
loop.run_until_complete(signaling.close())
aiortc-1.6.0/examples/datachannel-filexfer/ 0000775 0000000 0000000 00000000000 14532224177 0020670 5 ustar 00root root 0000000 0000000 aiortc-1.6.0/examples/datachannel-filexfer/README.rst 0000664 0000000 0000000 00000001571 14532224177 0022363 0 ustar 00root root 0000000 0000000 Data channel file transfer
==========================
This example illustrates sending a file over a data channel using an
RTCPeerConnection and a "copy and paste" signaling channel to exchange SDP.
First install the required packages:
.. code-block:: console
$ pip install aiortc
On Linux and Mac OS X you can also install uvloop for better performance:
.. code-block:: console
$ pip install uvloop
To run the example, you will need instances of the `filexfer` example:
- The first takes on the role of the offerer. It generates an offer which you
must copy-and-paste to the answerer.
.. code-block:: console
$ python filexfer.py send somefile.pdf
- The second takes on the role of the answerer. When given an offer, it will
generate an answer which you must copy-and-paste to the offerer.
.. code-block:: console
$ python filexfer.py receive received.pdf
aiortc-1.6.0/examples/datachannel-filexfer/filexfer.py 0000664 0000000 0000000 00000006424 14532224177 0023054 0 ustar 00root root 0000000 0000000 import argparse
import asyncio
import logging
import time
from aiortc import RTCIceCandidate, RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.signaling import BYE, add_signaling_arguments, create_signaling
# optional, for better performance
try:
import uvloop
except ImportError:
uvloop = None
async def consume_signaling(pc, signaling):
while True:
obj = await signaling.receive()
if isinstance(obj, RTCSessionDescription):
await pc.setRemoteDescription(obj)
if obj.type == "offer":
# send answer
await pc.setLocalDescription(await pc.createAnswer())
await signaling.send(pc.localDescription)
elif isinstance(obj, RTCIceCandidate):
await pc.addIceCandidate(obj)
elif obj is BYE:
print("Exiting")
break
async def run_answer(pc, signaling, filename):
await signaling.connect()
@pc.on("datachannel")
def on_datachannel(channel):
start = time.time()
octets = 0
@channel.on("message")
async def on_message(message):
nonlocal octets
if message:
octets += len(message)
fp.write(message)
else:
elapsed = time.time() - start
print(
"received %d bytes in %.1f s (%.3f Mbps)"
% (octets, elapsed, octets * 8 / elapsed / 1000000)
)
# say goodbye
await signaling.send(BYE)
await consume_signaling(pc, signaling)
async def run_offer(pc, signaling, fp):
await signaling.connect()
done_reading = False
channel = pc.createDataChannel("filexfer")
def send_data():
nonlocal done_reading
while (
channel.bufferedAmount <= channel.bufferedAmountLowThreshold
) and not done_reading:
data = fp.read(16384)
channel.send(data)
if not data:
done_reading = True
channel.on("bufferedamountlow", send_data)
channel.on("open", send_data)
# send offer
await pc.setLocalDescription(await pc.createOffer())
await signaling.send(pc.localDescription)
await consume_signaling(pc, signaling)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Data channel file transfer")
parser.add_argument("role", choices=["send", "receive"])
parser.add_argument("filename")
parser.add_argument("--verbose", "-v", action="count")
add_signaling_arguments(parser)
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
if uvloop is not None:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
signaling = create_signaling(args)
pc = RTCPeerConnection()
if args.role == "send":
fp = open(args.filename, "rb")
coro = run_offer(pc, signaling, fp)
else:
fp = open(args.filename, "wb")
coro = run_answer(pc, signaling, fp)
# run event loop
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(coro)
except KeyboardInterrupt:
pass
finally:
fp.close()
loop.run_until_complete(pc.close())
loop.run_until_complete(signaling.close())
aiortc-1.6.0/examples/datachannel-vpn/ 0000775 0000000 0000000 00000000000 14532224177 0017667 5 ustar 00root root 0000000 0000000 aiortc-1.6.0/examples/datachannel-vpn/README.rst 0000664 0000000 0000000 00000002521 14532224177 0021356 0 ustar 00root root 0000000 0000000 Data channel VPN
================
This example illustrates a layer2 VPN running over a WebRTC data channel.
First install the required packages:
.. code-block:: console
$ pip install aiortc
Permissions
-----------
This example requires the CAP_NET_ADMIN capability in order to create and
configure network interfaces. There are two ways to achieve this:
- running the script as the root user. The downside is that the script will be
run with higher privileges than actually needed.
- granting the CAP_NET_ADMIN capability to the Python interpreter. The downside
is that *all* Python scripts will get this capability so you will almost
certainly want to revert this change.
.. code-block:: console
$ sudo setcap CAP_NET_ADMIN=ep /path/to/python3
Running
-------
On the first peer:
.. code-block:: console
$ python3 vpn.py offer
On the second peer:
.. code-block:: console
$ python3 vpn.py answer
Copy-and-paste the offer from the first peer to the second peer, then
copy-and-paste the answer from the second peer to the first peer.
A new network interface will be created on each peer. You can now setup these
interfaces by using the system's network tools:
.. code-block:: console
$ ip address add 172.16.0.1/24 dev revpn-offer
and:
.. code-block:: console
$ ip address add 172.16.0.2/24 dev revpn-answer
aiortc-1.6.0/examples/datachannel-vpn/tuntap.py 0000664 0000000 0000000 00000004710 14532224177 0021556 0 ustar 00root root 0000000 0000000 import fcntl
import os
import socket
import struct
TUNSETIFF = 0x400454CA
TUNSETOWNER = TUNSETIFF + 2
IFF_TUN = 0x0001
IFF_TAP = 0x0002
IFF_NAPI = 0x0010
IFF_NAPI_FRAGS = 0x0020
IFF_NO_PI = 0x1000
IFF_PERSIST = 0x0800
IFF_NOFILTER = 0x1000
# net/if.h
IFF_UP = 0x1
IFF_RUNNING = 0x40
IFNAMSIZ = 16
# From linux/sockios.h
SIOCGIFCONF = 0x8912
SIOCGIFINDEX = 0x8933
SIOCGIFFLAGS = 0x8913
SIOCSIFFLAGS = 0x8914
SIOCGIFHWADDR = 0x8927
SIOCSIFHWADDR = 0x8924
SIOCGIFADDR = 0x8915
SIOCSIFADDR = 0x8916
SIOCGIFNETMASK = 0x891B
SIOCSIFNETMASK = 0x891C
SIOCETHTOOL = 0x8946
SIOCGIFMTU = 0x8921 # get MTU size
SIOCSIFMTU = 0x8922 # set MTU size
class Tun:
mtu = 1500
def __init__(self, name, mode="tap", persist=True):
self.name = name.encode()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sockfd = sock
@property
def ifflags(self):
# Get existing device flags
ifreq = struct.pack("16sh", self.name, 0)
flags = struct.unpack("16sh", fcntl.ioctl(self.sockfd, SIOCGIFFLAGS, ifreq))[1]
return flags
@ifflags.setter
def ifflags(self, flags):
ifreq = struct.pack("16sh", self.name, flags)
fcntl.ioctl(self.sockfd, SIOCSIFFLAGS, ifreq)
def get_mtu(self):
ifreq = struct.pack("16sh", self.name, 0)
self.mtu = struct.unpack("16sh", fcntl.ioctl(self.sockfd, SIOCGIFMTU, ifreq))[1]
def up(self):
"""Bring up interface. Equivalent to ifconfig [iface] up."""
# Set new flags
flags = self.ifflags | IFF_UP
self.ifflags = flags
self.get_mtu()
def down(self):
"""Bring down interface. Equivalent to ifconfig [iface] down."""
# Set new flags
flags = self.ifflags & ~IFF_UP
self.ifflags = flags
def is_up(self):
"""Return True if the interface is up, False otherwise."""
if self.ifflags & IFF_UP:
return True
else:
return False
def open(self):
"""Open file corresponding to the TUN device."""
self.fd = open("/dev/net/tun", "rb+", buffering=0)
tun_flags = IFF_TAP | IFF_NO_PI | IFF_PERSIST
ifr = struct.pack("16sH", self.name, tun_flags)
fcntl.ioctl(self.fd, TUNSETIFF, ifr)
fcntl.ioctl(self.fd, TUNSETOWNER, os.getuid())
self.ifflags = self.ifflags | IFF_RUNNING
def close(self):
if self.fd:
self.ifflags = self.ifflags & ~IFF_RUNNING
self.fd.close()
aiortc-1.6.0/examples/datachannel-vpn/vpn.py 0000664 0000000 0000000 00000005477 14532224177 0021061 0 ustar 00root root 0000000 0000000 import argparse
import asyncio
import logging
import tuntap
from aiortc import RTCIceCandidate, RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.signaling import BYE, add_signaling_arguments, create_signaling
logger = logging.Logger("vpn")
def channel_log(channel, t, message):
logger.info("channel(%s) %s %s" % (channel.label, t, repr(message)))
async def consume_signaling(pc, signaling):
while True:
obj = await signaling.receive()
if isinstance(obj, RTCSessionDescription):
await pc.setRemoteDescription(obj)
if obj.type == "offer":
# send answer
await pc.setLocalDescription(await pc.createAnswer())
await signaling.send(pc.localDescription)
elif isinstance(obj, RTCIceCandidate):
await pc.addIceCandidate(obj)
elif obj is BYE:
print("Exiting")
break
def tun_start(tap, channel):
tap.open()
# relay channel -> tap
channel.on("message")(tap.fd.write)
# relay tap -> channel
def tun_reader():
data = tap.fd.read(tap.mtu)
if data:
channel.send(data)
loop = asyncio.get_event_loop()
loop.add_reader(tap.fd, tun_reader)
tap.up()
async def run_answer(pc, signaling, tap):
await signaling.connect()
@pc.on("datachannel")
def on_datachannel(channel):
channel_log(channel, "-", "created by remote party")
if channel.label == "vpntap":
tun_start(tap, channel)
await consume_signaling(pc, signaling)
async def run_offer(pc, signaling, tap):
await signaling.connect()
channel = pc.createDataChannel("vpntap")
channel_log(channel, "-", "created by local party")
@channel.on("open")
def on_open():
tun_start(tap, channel)
# send offer
await pc.setLocalDescription(await pc.createOffer())
await signaling.send(pc.localDescription)
await consume_signaling(pc, signaling)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="VPN over data channel")
parser.add_argument("role", choices=["offer", "answer"])
parser.add_argument("--verbose", "-v", action="count")
add_signaling_arguments(parser)
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
tap = tuntap.Tun(name="revpn-%s" % args.role)
signaling = create_signaling(args)
pc = RTCPeerConnection()
if args.role == "offer":
coro = run_offer(pc, signaling, tap)
else:
coro = run_answer(pc, signaling, tap)
# run event loop
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(coro)
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(pc.close())
loop.run_until_complete(signaling.close())
tap.close()
aiortc-1.6.0/examples/janus/ 0000775 0000000 0000000 00000000000 14532224177 0015744 5 ustar 00root root 0000000 0000000 aiortc-1.6.0/examples/janus/README.rst 0000664 0000000 0000000 00000002511 14532224177 0017432 0 ustar 00root root 0000000 0000000 Janus video room client
=======================
This example illustrates how to connect to the Janus WebRTC server's video room.
By default it simply sends green video frames, but you can instead specify a
video file to stream to the room.
First install the required packages:
.. code-block:: console
$ pip install aiohttp aiortc
When you run the example, it will connect to Janus and join the '1234' room:
.. code-block:: console
$ python janus.py http://localhost:8088/janus
Additional options
------------------
If you want to join a different room, run:
.. code-block:: console
$ python janus.py --room 5678 http://localhost:8088/janus
If you want to play a media file instead of sending green video frames, run:
.. code-block:: console
$ python janus.py --play-from video.mp4 http://localhost:8088/janus
If you want to play an MPEGTS file containing H.264 video without decoding the frames, run:
.. code-block:: console
$ python janus.py --play-from --play-without-decoding
You can generate an example of such a file using:
.. code-block:: console
$ ffmpeg -f lavfi -i testsrc=duration=20:size=640x480:rate=30 -pix_fmt yuv420p -codec:v libx264 -profile:v baseline -level 31 -f mpegts video.ts
In this case, janus video room must be configured to only allow a single video codec, the one you use.
aiortc-1.6.0/examples/janus/janus.py 0000664 0000000 0000000 00000017517 14532224177 0017451 0 ustar 00root root 0000000 0000000 import argparse
import asyncio
import logging
import random
import string
import time
import aiohttp
from aiortc import RTCPeerConnection, RTCSessionDescription, VideoStreamTrack
from aiortc.contrib.media import MediaPlayer, MediaRecorder
pcs = set()
def transaction_id():
return "".join(random.choice(string.ascii_letters) for x in range(12))
class JanusPlugin:
def __init__(self, session, url):
self._queue = asyncio.Queue()
self._session = session
self._url = url
async def send(self, payload):
message = {"janus": "message", "transaction": transaction_id()}
message.update(payload)
async with self._session._http.post(self._url, json=message) as response:
data = await response.json()
assert data["janus"] == "ack"
response = await self._queue.get()
assert response["transaction"] == message["transaction"]
return response
class JanusSession:
def __init__(self, url):
self._http = None
self._poll_task = None
self._plugins = {}
self._root_url = url
self._session_url = None
async def attach(self, plugin_name: str) -> JanusPlugin:
message = {
"janus": "attach",
"plugin": plugin_name,
"transaction": transaction_id(),
}
async with self._http.post(self._session_url, json=message) as response:
data = await response.json()
assert data["janus"] == "success"
plugin_id = data["data"]["id"]
plugin = JanusPlugin(self, self._session_url + "/" + str(plugin_id))
self._plugins[plugin_id] = plugin
return plugin
async def create(self):
self._http = aiohttp.ClientSession()
message = {"janus": "create", "transaction": transaction_id()}
async with self._http.post(self._root_url, json=message) as response:
data = await response.json()
assert data["janus"] == "success"
session_id = data["data"]["id"]
self._session_url = self._root_url + "/" + str(session_id)
self._poll_task = asyncio.ensure_future(self._poll())
async def destroy(self):
if self._poll_task:
self._poll_task.cancel()
self._poll_task = None
if self._session_url:
message = {"janus": "destroy", "transaction": transaction_id()}
async with self._http.post(self._session_url, json=message) as response:
data = await response.json()
assert data["janus"] == "success"
self._session_url = None
if self._http:
await self._http.close()
self._http = None
async def _poll(self):
while True:
params = {"maxev": 1, "rid": int(time.time() * 1000)}
async with self._http.get(self._session_url, params=params) as response:
data = await response.json()
if data["janus"] == "event":
plugin = self._plugins.get(data["sender"], None)
if plugin:
await plugin._queue.put(data)
else:
print(data)
async def publish(plugin, player):
"""
Send video to the room.
"""
pc = RTCPeerConnection()
pcs.add(pc)
# configure media
media = {"audio": False, "video": True}
if player and player.audio:
pc.addTrack(player.audio)
media["audio"] = True
if player and player.video:
pc.addTrack(player.video)
else:
pc.addTrack(VideoStreamTrack())
# send offer
await pc.setLocalDescription(await pc.createOffer())
request = {"request": "configure"}
request.update(media)
response = await plugin.send(
{
"body": request,
"jsep": {
"sdp": pc.localDescription.sdp,
"trickle": False,
"type": pc.localDescription.type,
},
}
)
# apply answer
await pc.setRemoteDescription(
RTCSessionDescription(
sdp=response["jsep"]["sdp"], type=response["jsep"]["type"]
)
)
async def subscribe(session, room, feed, recorder):
pc = RTCPeerConnection()
pcs.add(pc)
@pc.on("track")
async def on_track(track):
print("Track %s received" % track.kind)
if track.kind == "video":
recorder.addTrack(track)
if track.kind == "audio":
recorder.addTrack(track)
# subscribe
plugin = await session.attach("janus.plugin.videoroom")
response = await plugin.send(
{"body": {"request": "join", "ptype": "subscriber", "room": room, "feed": feed}}
)
# apply offer
await pc.setRemoteDescription(
RTCSessionDescription(
sdp=response["jsep"]["sdp"], type=response["jsep"]["type"]
)
)
# send answer
await pc.setLocalDescription(await pc.createAnswer())
response = await plugin.send(
{
"body": {"request": "start"},
"jsep": {
"sdp": pc.localDescription.sdp,
"trickle": False,
"type": pc.localDescription.type,
},
}
)
await recorder.start()
async def run(player, recorder, room, session):
await session.create()
# join video room
plugin = await session.attach("janus.plugin.videoroom")
response = await plugin.send(
{
"body": {
"display": "aiortc",
"ptype": "publisher",
"request": "join",
"room": room,
}
}
)
publishers = response["plugindata"]["data"]["publishers"]
for publisher in publishers:
print("id: %(id)s, display: %(display)s" % publisher)
# send video
await publish(plugin=plugin, player=player)
# receive video
if recorder is not None and publishers:
await subscribe(
session=session, room=room, feed=publishers[0]["id"], recorder=recorder
)
# exchange media for 10 minutes
print("Exchanging media")
await asyncio.sleep(600)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Janus")
parser.add_argument("url", help="Janus root URL, e.g. http://localhost:8088/janus")
parser.add_argument(
"--room",
type=int,
default=1234,
help="The video room ID to join (default: 1234).",
),
parser.add_argument("--play-from", help="Read the media from a file and sent it."),
parser.add_argument("--record-to", help="Write received media to a file."),
parser.add_argument(
"--play-without-decoding",
help=(
"Read the media without decoding it (experimental). "
"For now it only works with an MPEGTS container with only H.264 video."
),
action="store_true",
)
parser.add_argument("--verbose", "-v", action="count")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
# create signaling and peer connection
session = JanusSession(args.url)
# create media source
if args.play_from:
player = MediaPlayer(args.play_from, decode=not args.play_without_decoding)
else:
player = None
# create media sink
if args.record_to:
recorder = MediaRecorder(args.record_to)
else:
recorder = None
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(
run(player=player, recorder=recorder, room=args.room, session=session)
)
except KeyboardInterrupt:
pass
finally:
if recorder is not None:
loop.run_until_complete(recorder.stop())
loop.run_until_complete(session.destroy())
# close peer connections
coros = [pc.close() for pc in pcs]
loop.run_until_complete(asyncio.gather(*coros))
aiortc-1.6.0/examples/server/ 0000775 0000000 0000000 00000000000 14532224177 0016132 5 ustar 00root root 0000000 0000000 aiortc-1.6.0/examples/server/README.rst 0000664 0000000 0000000 00000002466 14532224177 0017631 0 ustar 00root root 0000000 0000000 Audio, video and data channel server
====================================
This example illustrates establishing audio, video and a data channel with a
browser. It also performs some image processing on the video frames using
OpenCV.
Running
-------
First install the required packages:
.. code-block:: console
$ pip install aiohttp aiortc opencv-python
When you start the example, it will create an HTTP server which you
can connect to from your browser:
.. code-block:: console
$ python server.py
You can then browse to the following page with your browser:
http://127.0.0.1:8080
Once you click `Start` the browser will send the audio and video from its
webcam to the server.
The server will play a pre-recorded audio clip and send the received video back
to the browser, optionally applying a transform to it.
In parallel to media streams, the browser sends a 'ping' message over the data
channel, and the server replies with 'pong'.
Additional options
------------------
If you want to enable verbose logging, run:
.. code-block:: console
$ python server.py -v
Credits
-------
The audio file "demo-instruct.wav" was borrowed from the Asterisk
project. It is licensed as Creative Commons Attribution-Share Alike 3.0:
https://wiki.asterisk.org/wiki/display/AST/Voice+Prompts+and+Music+on+Hold+License
aiortc-1.6.0/examples/server/client.js 0000664 0000000 0000000 00000020335 14532224177 0017751 0 ustar 00root root 0000000 0000000 // get DOM elements
var dataChannelLog = document.getElementById('data-channel'),
iceConnectionLog = document.getElementById('ice-connection-state'),
iceGatheringLog = document.getElementById('ice-gathering-state'),
signalingLog = document.getElementById('signaling-state');
// peer connection
var pc = null;
// data channel
var dc = null, dcInterval = null;
function createPeerConnection() {
var config = {
sdpSemantics: 'unified-plan'
};
if (document.getElementById('use-stun').checked) {
config.iceServers = [{urls: ['stun:stun.l.google.com:19302']}];
}
pc = new RTCPeerConnection(config);
// register some listeners to help debugging
pc.addEventListener('icegatheringstatechange', function() {
iceGatheringLog.textContent += ' -> ' + pc.iceGatheringState;
}, false);
iceGatheringLog.textContent = pc.iceGatheringState;
pc.addEventListener('iceconnectionstatechange', function() {
iceConnectionLog.textContent += ' -> ' + pc.iceConnectionState;
}, false);
iceConnectionLog.textContent = pc.iceConnectionState;
pc.addEventListener('signalingstatechange', function() {
signalingLog.textContent += ' -> ' + pc.signalingState;
}, false);
signalingLog.textContent = pc.signalingState;
// connect audio / video
pc.addEventListener('track', function(evt) {
if (evt.track.kind == 'video')
document.getElementById('video').srcObject = evt.streams[0];
else
document.getElementById('audio').srcObject = evt.streams[0];
});
return pc;
}
function negotiate() {
return pc.createOffer().then(function(offer) {
return pc.setLocalDescription(offer);
}).then(function() {
// wait for ICE gathering to complete
return new Promise(function(resolve) {
if (pc.iceGatheringState === 'complete') {
resolve();
} else {
function checkState() {
if (pc.iceGatheringState === 'complete') {
pc.removeEventListener('icegatheringstatechange', checkState);
resolve();
}
}
pc.addEventListener('icegatheringstatechange', checkState);
}
});
}).then(function() {
var offer = pc.localDescription;
var codec;
codec = document.getElementById('audio-codec').value;
if (codec !== 'default') {
offer.sdp = sdpFilterCodec('audio', codec, offer.sdp);
}
codec = document.getElementById('video-codec').value;
if (codec !== 'default') {
offer.sdp = sdpFilterCodec('video', codec, offer.sdp);
}
document.getElementById('offer-sdp').textContent = offer.sdp;
return fetch('/offer', {
body: JSON.stringify({
sdp: offer.sdp,
type: offer.type,
video_transform: document.getElementById('video-transform').value
}),
headers: {
'Content-Type': 'application/json'
},
method: 'POST'
});
}).then(function(response) {
return response.json();
}).then(function(answer) {
document.getElementById('answer-sdp').textContent = answer.sdp;
return pc.setRemoteDescription(answer);
}).catch(function(e) {
alert(e);
});
}
function start() {
document.getElementById('start').style.display = 'none';
pc = createPeerConnection();
var time_start = null;
function current_stamp() {
if (time_start === null) {
time_start = new Date().getTime();
return 0;
} else {
return new Date().getTime() - time_start;
}
}
if (document.getElementById('use-datachannel').checked) {
var parameters = JSON.parse(document.getElementById('datachannel-parameters').value);
dc = pc.createDataChannel('chat', parameters);
dc.onclose = function() {
clearInterval(dcInterval);
dataChannelLog.textContent += '- close\n';
};
dc.onopen = function() {
dataChannelLog.textContent += '- open\n';
dcInterval = setInterval(function() {
var message = 'ping ' + current_stamp();
dataChannelLog.textContent += '> ' + message + '\n';
dc.send(message);
}, 1000);
};
dc.onmessage = function(evt) {
dataChannelLog.textContent += '< ' + evt.data + '\n';
if (evt.data.substring(0, 4) === 'pong') {
var elapsed_ms = current_stamp() - parseInt(evt.data.substring(5), 10);
dataChannelLog.textContent += ' RTT ' + elapsed_ms + ' ms\n';
}
};
}
var constraints = {
audio: document.getElementById('use-audio').checked,
video: false
};
if (document.getElementById('use-video').checked) {
var resolution = document.getElementById('video-resolution').value;
if (resolution) {
resolution = resolution.split('x');
constraints.video = {
width: parseInt(resolution[0], 0),
height: parseInt(resolution[1], 0)
};
} else {
constraints.video = true;
}
}
if (constraints.audio || constraints.video) {
if (constraints.video) {
document.getElementById('media').style.display = 'block';
}
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
stream.getTracks().forEach(function(track) {
pc.addTrack(track, stream);
});
return negotiate();
}, function(err) {
alert('Could not acquire media: ' + err);
});
} else {
negotiate();
}
document.getElementById('stop').style.display = 'inline-block';
}
function stop() {
document.getElementById('stop').style.display = 'none';
// close data channel
if (dc) {
dc.close();
}
// close transceivers
if (pc.getTransceivers) {
pc.getTransceivers().forEach(function(transceiver) {
if (transceiver.stop) {
transceiver.stop();
}
});
}
// close local audio / video
pc.getSenders().forEach(function(sender) {
sender.track.stop();
});
// close peer connection
setTimeout(function() {
pc.close();
}, 500);
}
function sdpFilterCodec(kind, codec, realSdp) {
var allowed = []
var rtxRegex = new RegExp('a=fmtp:(\\d+) apt=(\\d+)\r$');
var codecRegex = new RegExp('a=rtpmap:([0-9]+) ' + escapeRegExp(codec))
var videoRegex = new RegExp('(m=' + kind + ' .*?)( ([0-9]+))*\\s*$')
var lines = realSdp.split('\n');
var isKind = false;
for (var i = 0; i < lines.length; i++) {
if (lines[i].startsWith('m=' + kind + ' ')) {
isKind = true;
} else if (lines[i].startsWith('m=')) {
isKind = false;
}
if (isKind) {
var match = lines[i].match(codecRegex);
if (match) {
allowed.push(parseInt(match[1]));
}
match = lines[i].match(rtxRegex);
if (match && allowed.includes(parseInt(match[2]))) {
allowed.push(parseInt(match[1]));
}
}
}
var skipRegex = 'a=(fmtp|rtcp-fb|rtpmap):([0-9]+)';
var sdp = '';
isKind = false;
for (var i = 0; i < lines.length; i++) {
if (lines[i].startsWith('m=' + kind + ' ')) {
isKind = true;
} else if (lines[i].startsWith('m=')) {
isKind = false;
}
if (isKind) {
var skipMatch = lines[i].match(skipRegex);
if (skipMatch && !allowed.includes(parseInt(skipMatch[2]))) {
continue;
} else if (lines[i].match(videoRegex)) {
sdp += lines[i].replace(videoRegex, '$1 ' + allowed.join(' ')) + '\n';
} else {
sdp += lines[i] + '\n';
}
} else {
sdp += lines[i] + '\n';
}
}
return sdp;
}
function escapeRegExp(string) {
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string
}
aiortc-1.6.0/examples/server/demo-instruct.wav 0000664 0000000 0000000 00004364170 14532224177 0021465 0 ustar 00root root 0000000 0000000 RIFFp WAVEfmt @ > dataL
" # ) #
$ ( ' $ N 8 a { R \ $ * A Qn<!!Rs08
!!O
)A*`
ju_
#8"<@eE# K)Ϗ\v
_S,>-V)]#pKqGlvΎiؒ!b396?6*.jtj؊=pO%
)'x < F)Պٹo{#:5v<`>7.!:W7y"#%E""
qj ۀr
(=8>}?.s zc̝G #"7#"ۚ֡ב!w(?;