pax_global_header00006660000000000000000000000064140122443310014504gustar00rootroot0000000000000052 comment=a263a66a49bed3a2fc941cfda10dade78f1ff163 svtplay-dl-3.0/000077500000000000000000000000001401224433100134455ustar00rootroot00000000000000svtplay-dl-3.0/.coveragerc000066400000000000000000000001421401224433100155630ustar00rootroot00000000000000[run] branch = true include = lib/svtplay_dl/* omit = lib/svtplay_dl/__version__.py */tests/* svtplay-dl-3.0/.gitattributes000066400000000000000000000000531401224433100163360ustar00rootroot00000000000000lib/svtplay_dl/__version__.py export-subst svtplay-dl-3.0/.github/000077500000000000000000000000001401224433100150055ustar00rootroot00000000000000svtplay-dl-3.0/.github/ISSUE_TEMPLATE.md000066400000000000000000000010441401224433100175110ustar00rootroot00000000000000 ### svtplay-dl versions: Run `svtplay-dl --version` ### Operating system and Python version: Name and version of the operating system and python version (run `python --version`) ### What is the issue: Always include the URL you want to download and all switches you are using. You should also add `--verbose` because it makes it much easier for use to find the issue :) svtplay-dl --verbose https://www.example.com svtplay-dl-3.0/.github/workflows/000077500000000000000000000000001401224433100170425ustar00rootroot00000000000000svtplay-dl-3.0/.github/workflows/tests.yaml000066400000000000000000000161771401224433100211040ustar00rootroot00000000000000name: CI on: [push, pull_request] jobs: tests: name: ${{ matrix.name }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: include: - {name: '3.9', python: '3.9', os: ubuntu-latest, architecture: 'x64', cibuild: "yes"} - {name: '3.8', python: '3.8', os: ubuntu-latest, architecture: 'x64', cibuild: "no"} - {name: '3.7', python: '3.7', os: ubuntu-latest, architecture: 'x64', cibuild: "no"} - {name: '3.6', python: '3.6', os: ubuntu-latest, architecture: 'x64', cibuild: "no"} - {name: Windows, python: '3.8', os: windows-latest, architecture: 'x64', arch-cx: 'win-amd64', cx_name: 'amd64', cibuild: "yes"} - {name: WindowsX86, python: '3.8', os: windows-latest, architecture: 'x86', arch-cx: 'win32', cx_name: 'win32', cibuild: "yes"} steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: ${{ matrix.python }} architecture: ${{ matrix.architecture }} - name: update pip run: | pip install -U wheel pip install -U setuptools python -m pip install -U pip - name: get pip cache dir id: pip-cache run: echo "::set-output name=dir::$(pip cache dir)" - name: cache pip uses: actions/cache@v2 with: path: ${{ steps.pip-cache.outputs.dir }} key: pip-${{ runner.os }}-${{ matrix.python }}-${{ hashFiles('setup.py') }}|${{ hashFiles('requirements*.txt') }} restore-keys: pip-${{ runner.os }}-${{ matrix.python }}- - name: install deps run: | pip install -r requirements.txt pip install -r requirements-dev.txt - name: cache pre-commit uses: actions/cache@v2 with: path: ~/.cache/pre-commit key: per-commit|${{ runner.os }}-${{ matrix.python }}-${{ hashFiles('.pre-commit-config.yaml') }} restore-keys: per-commit|${{ runner.os }}-${{ matrix.python }}- if: matrix.os == 'ubuntu-latest' - name: pre-commit run: pre-commit run --all-files --show-diff-on-failure if: matrix.os == 'ubuntu-latest' - name: pytest run: pytest -v --cov binaries-make: name: "binaries make" runs-on: "ubuntu-latest" strategy: fail-fast: false steps: - uses: actions/checkout@v2 with: fetch-depth: 0 - uses: actions/setup-python@v2 with: python-version: '3.9' - name: update pip run: | pip install -U setuptools python -m pip install -U pip - name: get pip cache dir id: pip-cache run: echo "::set-output name=dir::$(pip cache dir)" - name: cache pip uses: actions/cache@v2 with: path: ${{ steps.pip-cache.outputs.dir }} key: pip-${{ runner.os }}-${{ matrix.python }}-${{ hashFiles('setup.py') }}|${{ hashFiles('requirements*.txt') }} restore-keys: pip-${{ runner.os }}-${{ matrix.python }}- - name: install deps run: | pip install -r requirements.txt pip install -r requirements-dev.txt - name: set version run: python setversion.py # Build .zip fil for *nix - run: make - run: ./svtplay-dl --version - name: cibuild run: python scripts/cibuild.py env: CIBUILD: "yes" BUILD_DOCKER: "no" AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} OS: "ubuntu-latest" binaries-exe: name: "binaries exe ${{ matrix.architecture }}" runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: include: - {name: Windows, python: '3.8', os: windows-latest, architecture: 'x64', arch-cx: 'win-amd64', cx_name: 'amd64', cibuild: "yes"} - {name: WindowsX86, python: '3.8', os: windows-latest, architecture: 'x86', arch-cx: 'win32', cx_name: 'win32', cibuild: "yes"} steps: - uses: actions/checkout@v2 with: fetch-depth: 0 - uses: actions/setup-python@v2 with: python-version: ${{ matrix.python }} architecture: ${{ matrix.architecture }} - name: update pip run: | pip install -U wheel pip install -U setuptools python -m pip install -U pip - name: get pip cache dir id: pip-cache run: echo "::set-output name=dir::$(pip cache dir)" - name: cache pip uses: actions/cache@v2 with: path: ${{ steps.pip-cache.outputs.dir }} key: pip-${{ runner.os }}-${{ matrix.python }}-${{ hashFiles('setup.py') }}|${{ hashFiles('requirements*.txt') }} restore-keys: pip-${{ runner.os }}-${{ matrix.python }}- - name: install deps run: | pip install -r requirements.txt pip install -r requirements-dev.txt - name: set version run: python setversion.py - name: build .exe run: python setup.py build_exe - name: run the .exe file run: build\\exe.${{ matrix.arch-cx }}-${{ matrix.python }}\\svtplay-dl.exe --version - run: | mkdir svtplay-dl xcopy /s build\\exe.${{ matrix.arch-cx }}-${{ matrix.python }} svtplay-dl - run: 7z a -tzip svtplay-dl-${{ matrix.cx_name }}.zip svtplay-dl - name: cibuild run: python scripts/cibuild.py env: CIBUILD: "yes" BUILD_DOCKER: "no" AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} OS: "windows-latest" binaries-pypi: name: "binaries pypi" runs-on: "ubuntu-latest" strategy: fail-fast: false steps: - uses: actions/checkout@v2 with: fetch-depth: 0 - uses: actions/setup-python@v2 with: python-version: 3.9 - name: update pip run: | pip install -U setuptools python -m pip install -U pip - name: get pip cache dir id: pip-cache run: echo "::set-output name=dir::$(pip cache dir)" - name: cache pip uses: actions/cache@v2 with: path: ${{ steps.pip-cache.outputs.dir }} key: pip-${{ runner.os }}-${{ matrix.python }}-${{ hashFiles('setup.py') }}|${{ hashFiles('requirements*.txt') }} restore-keys: pip-${{ runner.os }}-${{ matrix.python }}- - name: install deps run: | pip install -r requirements.txt pip install -r requirements-dev.txt - name: python pkg run: python setup.py sdist bdist_wheel - name: cibuild run: python scripts/cibuild.py env: CIBUILD: "yes" BUILD_DOCKER: "yes" TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }} TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }} DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} OS: "ubuntu-latest" svtplay-dl-3.0/.gitignore000066400000000000000000000055271401224433100154460ustar00rootroot00000000000000cover/ svtplay-dl svtplay-dl.1 svtplay-dl.1.gz github.com/* # Windows thumbnail cache files Thumbs.db ehthumbs.db ehthumbs_vista.db # Dump file *.stackdump # Folder config file [Dd]esktop.ini # Recycle Bin used on file shares $RECYCLE.BIN/ # Windows Installer files *.cab *.msi *.msix *.msm *.msp # Windows shortcuts *.lnk # General .DS_Store .AppleDouble .LSOverride # Thumbnails ._* # Files that might appear in the root of a volume .DocumentRevisions-V100 .fseventsd .Spotlight-V100 .TemporaryItems .Trashes .VolumeIcon.icns .com.apple.timemachine.donotpresent # Directories potentially created on remote AFP share .AppleDB .AppleDesktop Network Trash Folder Temporary Items .apdisk # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ # Swap [._]*.s[a-v][a-z] [._]*.sw[a-p] [._]s[a-v][a-z] [._]sw[a-p] # Session Session.vim # Temporary .netrwhist *~ # Auto-generated tag files tags # Persistent undo [._]*.un~ # User-specific stuff .idea/**/workspace.xml .idea/**/tasks.xml .idea/**/dictionaries .idea/**/shelf # Sensitive or high-churn files .idea/**/dataSources/ .idea/**/dataSources.ids .idea/**/dataSources.local.xml .idea/**/sqlDataSources.xml .idea/**/dynamic.xml .idea/**/uiDesigner.xml .idea/**/dbnavigator.xml # Gradle .idea/**/gradle.xml .idea/**/libraries # Mongo Explorer plugin .idea/**/mongoSettings.xml # File-based project format *.iws # IntelliJ out/ # mpeltonen/sbt-idea plugin .idea_modules/ # JIRA plugin atlassian-ide-plugin.xml # Cursive Clojure plugin .idea/replstate.xml # Crashlytics plugin (for Android Studio and IntelliJ) com_crashlytics_export_strings.xml crashlytics.properties crashlytics-build.properties fabric.properties # Editor-based Rest Client .idea/httpRequests # media *.ts *.mp4 *.srt svtplay-dl-3.0/.pre-commit-config.yaml000066400000000000000000000015541401224433100177330ustar00rootroot00000000000000# See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v3.4.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml - id: check-added-large-files - repo: https://github.com/ambv/black rev: 20.8b1 hooks: - id: black language_version: python3 - repo: https://gitlab.com/pycqa/flake8 rev: 3.8.4 hooks: - id: flake8 - repo: https://github.com/asottile/pyupgrade rev: v2.7.4 hooks: - id: pyupgrade args: [--py3-plus] - repo: https://github.com/asottile/reorder_python_imports rev: v2.3.6 hooks: - id: reorder-python-imports - repo: https://github.com/asottile/add-trailing-comma rev: v2.0.1 hooks: - id: add-trailing-comma svtplay-dl-3.0/LICENSE000066400000000000000000000021121401224433100144460ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2011-2015 Johan Andersson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. svtplay-dl-3.0/MANIFEST.in000066400000000000000000000001361401224433100152030ustar00rootroot00000000000000include README.md include LICENSE include versioneer.py include lib/svtplay_dl/__version__.py svtplay-dl-3.0/Makefile000066400000000000000000000030051401224433100151030ustar00rootroot00000000000000all: svtplay-dl .PHONY: test cover doctest pylint svtplay-dl \ release clean_releasedir $(RELEASE_DIR) # These variables describe the latest release: VERSION = 1.9.11 LATEST_RELEASE = $(VERSION) # Compress the manual if MAN_GZIP is set to y, ifeq ($(MAN_GZIP),y) MANFILE_EXT = .gz endif MANFILE = svtplay-dl.1$(MANFILE_EXT) # As pod2man is a perl tool, we have to jump through some hoops # to remove references to perl.. :-) POD2MAN ?= pod2man --section 1 --utf8 \ --center "svtplay-dl manual" \ --release "svtplay-dl $(VERSION)" \ --date "$(LATEST_RELEASE_DATE)" PREFIX ?= /usr/local BINDIR = $(PREFIX)/bin PYTHON ?= /usr/bin/env python3 export PYTHONPATH=lib # If you don't have a python3 environment (e.g. mock for py3 and # nosetests3), you can remove the -3 flag. TEST_OPTS ?= -2 -3 svtplay-dl: $(PYFILES) $(MAKE) -C lib mv -f lib/svtplay-dl . svtplay-dl.1: svtplay-dl.pod rm -f $@ $(POD2MAN) $< $@ svtplay-dl.1.gz: svtplay-dl.1 rm -f $@ gzip -9 svtplay-dl.1 test: sh scripts/run-tests.sh $(TEST_OPTS) install: svtplay-dl install -d $(DESTDIR)$(BINDIR) install -m 755 svtplay-dl $(DESTDIR)$(BINDIR) cover: sh scripts/run-tests.sh -C pylint: $(MAKE) -C lib pylint doctest: svtplay-dl sh scripts/diff_man_help.sh release: git tag -m "New version $(NEW_RELEASE)" \ -m "$$(git log --oneline $$(git describe --tags --abbrev=0 HEAD^)..HEAD^)" \ $(NEW_RELEASE) clean: $(MAKE) -C lib clean rm -f svtplay-dl rm -f $(MANFILE) rm -rf .tox svtplay-dl-3.0/README.md000066400000000000000000000103631401224433100147270ustar00rootroot00000000000000# svtplay-dl [![Build Status Actions](https://github.com/spaam/svtplay-dl/workflows/Tests/badge.svg)](https://github.com/spaam/svtplay-dl/actions) ## Installation ### MacOS If you have [Homebrew](https://brew.sh/) on your machine you can install by running: ``` brew install svtplay-dl ``` You will need to run `brew install ffmpeg` or `brew install libav` afterwards, if you don't already have one of these packages. ### Debian and Ubuntu svtplay-dl is available in Debian strech and later and on Ubuntu 16.04 and later, which means you can install it straight away using apt. The version in their repo is often old and thus we **strongly** recommend using our own apt repo, which always include the latest version. The svtplay-dl repo for Debian / Ubuntu can be found at [apt.svtplay-dl.se](https://apt.svtplay-dl.se/). ##### Add the release PGP keys: ``` curl -s https://svtplay-dl.se/release-key.txt | sudo apt-key add - ``` ##### Add the "release" channel to your APT sources: ``` echo "deb https://apt.svtplay-dl.se/ svtplay-dl release" | sudo tee /etc/apt/sources.list.d/svtplay-dl.list ``` ##### Update and install svtplay-dl: ``` sudo apt-get update sudo apt-get install svtplay-dl ``` ### Solus svtplay-dl is avaliable in the [Solus](https://getsol.us.com/) repository and can be installed by simply running: ``` sudo eopkg it svtplay-dl ``` ### Windows You can download the Windows binaries from [svtplay-dl.se](https://svtplay-dl.se/) If you want to build your own Windows binaries: 1. Install [cx_freeze](https://anthony-tuininga.github.io/cx_Freeze/) 3. Follow the steps listed under [From source](#from-source) 4. cd path\to\svtplay-dl && mkdir build 5. `pip install -e .` 6. `python setversion.py` # this will change the version string to a more useful one 7. `python %PYTHON%\\Scripts\\cxfreeze --include-modules=cffi,queue,idna.idnadata --target-dir=build bin/svtplay-dl` 8. Find binary in build folder. you need `svtplay-dl.exe` and `pythonXX.dll` from that folder to run `svtplay-dl.exe` ### Other systems with python ``` pip3 install svtplay-dl ``` ### Any UNIX (Linux, BSD, macOS, etc.) ##### Download with curl ``` sudo curl -L https://svtplay-dl.se/download/latest/svtplay-dl -o /usr/local/bin/svtplay-dl ``` ##### Make it executable ``` sudo chmod a+rx /usr/local/bin/svtplay-dl ``` ### From source If packaging isn’t available for your operating system, or you want to use a non-released version, you’ll want to install from source. Use git to download the sources: ``` git clone https://github.com/spaam/svtplay-dl ``` svtplay-dl requires the following additional tools and libraries. They are usually available from your distribution’s package repositories. If you don’t have them, some features will not be working. - [Python](https://www.python.org) 3.4 or higher - [cryptography](https://cryptography.io/en/latest) to download encrypted HLS streams - [PyYaml](https://github.com/yaml/pyyaml) for configure file - [Requests](https://2.python-requests.org) - [PySocks](https://github.com/Anorov/PySocks) to enable proxy support - [ffmpeg](https://ffmpeg.org) or [avconv](https://libav.org) for postprocessing and/or for DASH streams ([ffmpeg](https://ffmpeg.zeranoe.com) for Windows) ##### To install it, run: ``` sudo python3 setup.py install ``` ## After install ``` svtplay-dl [options] URL ``` If you encounter any bugs or problems, don’t hesitate to open an issue [on github](https://github.com/spaam/svtplay-dl/issues). Or why not join the ``#svtplay-dl`` IRC channel on Freenode? ## Supported services This script works for: - aftonbladet.se - bambuser.com - comedycentral.se - di.se - dn.se - dplay.se - dr.dk - efn.se - expressen.se - hbo.com - kanal9play.se - nickelodeon.nl - nickelodeon.no - nickelodeon.se - nrk.no - oppetarkiv.se - ruv.is - svd.se - sverigesradio.se - svtplay.se - viafree.se (former tv3play.se, tv6play.se, tv8play.se, tv10play.se) - viafree.dk (former tv3play.dk) - viafree.no (former tv3play.no, viasat4play.no) - tv3play.ee - tv3play.lt - tv3play.lv - tv4.se - tv4play.se - twitch.tv - ur.se - urplay.se - vg.no - viagame.com ## License This project is licensed under [The MIT License (MIT)](LICENSE) Homepage: [svtplay-dl.se](https://svtplay-dl.se/) svtplay-dl-3.0/bin/000077500000000000000000000000001401224433100142155ustar00rootroot00000000000000svtplay-dl-3.0/bin/svtplay-dl000077500000000000000000000002641401224433100162440ustar00rootroot00000000000000#!/usr/bin/env python3 # ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import svtplay_dl if __name__ == "__main__": svtplay_dl.main() svtplay-dl-3.0/dockerfile/000077500000000000000000000000001401224433100155545ustar00rootroot00000000000000svtplay-dl-3.0/dockerfile/Dockerfile000066400000000000000000000005641401224433100175530ustar00rootroot00000000000000# using edge to get ffmpeg-4.x FROM alpine:edge LABEL maintainer="j@i19.se" COPY dist/*.whl . RUN set -xe \ && apk add --no-cache \ ca-certificates \ python3 \ py3-pip \ py3-cryptography \ ffmpeg \ && python3 -m pip install *.whl \ && rm -f *.whl WORKDIR /data ENTRYPOINT ["python3", "/usr/bin/svtplay-dl"] CMD ["--help"] svtplay-dl-3.0/docs/000077500000000000000000000000001401224433100143755ustar00rootroot00000000000000svtplay-dl-3.0/docs/README.docker.md000066400000000000000000000007411401224433100171240ustar00rootroot00000000000000# svtplay-dl container version of the script. # usage ```sh docker run -it --rm -u $(id -u):$(id -g) -v "$(pwd):/data" spaam/svtplay-dl ``` or create an alias: ##### bash (~/.bashrc) ``` alias svtplay-dl='docker run -it --rm -u $(id -u):$(id -g) -v "$(pwd):/data" spaam/svtplay-dl' ``` ##### zsh (~/.zshrc) ``` alias svtplay-dl='docker run -it --rm -u $(id -u):$(id -g) -v "$(pwd):/data" spaam/svtplay-dl' ``` # build example ```sh docker build -t svtplay-dl . ``` svtplay-dl-3.0/lib/000077500000000000000000000000001401224433100142135ustar00rootroot00000000000000svtplay-dl-3.0/lib/Makefile000066400000000000000000000025551401224433100156620ustar00rootroot00000000000000all: svtplay-dl clean: find . -name '*.pyc' -exec rm {} \; rm -f svtplay-dl pylint: pylint $(PYLINT_OPTS) svtplay_dl export PACKAGES = svtplay_dl \ svtplay_dl.fetcher \ svtplay_dl.utils \ svtplay_dl.service \ svtplay_dl.subtitle \ svtplay_dl.postprocess export PYFILES = $(sort $(addsuffix /*.py,$(subst .,/,$(PACKAGES)))) PYTHON ?= /usr/bin/env python3 VERSION = $(shell git describe 2>/dev/null || echo $(LATEST_RELEASE)-unknown) svtplay-dl: $(PYFILES) @# Verify that there's no .build already \ ! [ -d .build ] || { \ echo "ERROR: build already in progress? (or remove $(PWD)/.build/)"; \ exit 1; \ }; \ mkdir -p .build @# Stage the files in .build for postprocessing for py in $(PYFILES); do \ install -d ".build/$${py%/*}"; \ install $$py .build/$$py; \ done # Add git version info to __version__, seen in --version sed -i -e 's/^__version__ = \(.*\)$$/__version__ = "$(VERSION)"/' \ .build/svtplay_dl/__init__.py @# reset timestamps, to avoid non-determinism in zip file find .build/ -exec touch -m -t 198001010000 {} \; (cd .build && zip -X --quiet svtplay-dl $(PYFILES)) (cd .build && zip -X --quiet --junk-paths svtplay-dl svtplay_dl/__main__.py) echo '#!$(PYTHON)' > svtplay-dl cat .build/svtplay-dl.zip >> svtplay-dl rm -rf .build chmod a+x svtplay-dl svtplay-dl-3.0/lib/svtplay_dl/000077500000000000000000000000001401224433100163745ustar00rootroot00000000000000svtplay-dl-3.0/lib/svtplay_dl/__init__.py000066400000000000000000000043331401224433100205100ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import logging import sys import yaml from svtplay_dl.service.cmore import Cmore from svtplay_dl.utils.getmedia import get_media from svtplay_dl.utils.getmedia import get_multiple_media from svtplay_dl.utils.parser import parser from svtplay_dl.utils.parser import parsertoconfig from svtplay_dl.utils.parser import setup_defaults from .__version__ import get_versions __version__ = get_versions()["version"] del get_versions log = logging.getLogger("svtplay_dl") def setup_log(silent, verbose=False): logging.addLevelName(25, "INFO") fmt = "%(levelname)s: %(message)s" if silent: stream = sys.stderr level = 25 elif verbose: stream = sys.stderr level = logging.DEBUG fmt = "%(levelname)s [%(created)s] %(pathname)s/%(funcName)s: %(message)s" else: stream = sys.stdout level = logging.INFO logging.basicConfig(level=level, format=fmt) hdlr = logging.StreamHandler(stream) log.addHandler(hdlr) def main(): """ Main program """ parse, options = parser(__version__) if options.flexibleq and not options.quality: logging.error("flexible-quality requires a quality") if options.only_audio and options.only_video: logging.error("Only use one of them, not both at the same time") sys.exit(2) if len(options.urls) == 0: parse.print_help() sys.exit(0) urls = options.urls config = parsertoconfig(setup_defaults(), options) if len(urls) < 1: parse.error("Incorrect number of arguments") setup_log(config.get("silent"), config.get("verbose")) if options.cmoreoperatorlist: config = parsertoconfig(setup_defaults(), options) c = Cmore(config, urls) c.operatorlist() sys.exit(0) try: if len(urls) == 1: get_media(urls[0], config, __version__) else: get_multiple_media(urls, config) except KeyboardInterrupt: print("") except (yaml.YAMLError, yaml.MarkedYAMLError) as e: logging.error("Your settings file(s) contain invalid YAML syntax! Please fix and restart!, {}".format(str(e))) sys.exit(2) svtplay-dl-3.0/lib/svtplay_dl/__main__.py000066400000000000000000000005751401224433100204750ustar00rootroot00000000000000#!/usr/bin/env python # ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import sys if __package__ is None and not hasattr(sys, "frozen"): # direct call of __main__.py import os.path sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import svtplay_dl if __name__ == "__main__": svtplay_dl.main() svtplay-dl-3.0/lib/svtplay_dl/__version__.py000066400000000000000000000424511401224433100212350ustar00rootroot00000000000000# This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = " (tag: 3.0)" git_full = "a263a66a49bed3a2fc941cfda10dade78f1ff163" git_date = "2021-02-14 16:59:21 +0100" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "None" cfg.parentdir_prefix = "None" cfg.versionfile_source = "lib/svtplay_dl/__version__.py" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried {}".format(commands)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix) :], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories {} but none started with prefix {}".format(str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs) for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except OSError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r"\d", r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix) :] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format(full_tag, tag_prefix) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split("/"): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} svtplay-dl-3.0/lib/svtplay_dl/error.py000066400000000000000000000017511401224433100201030ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- class UIException(Exception): pass class ServiceError(Exception): pass class NoRequestedProtocols(UIException): """ This excpetion is thrown when the service provides streams, but not using any accepted protocol (as decided by options.stream_prio). """ def __init__(self, requested, found): """ The constructor takes two mandatory parameters, requested and found. Both should be lists. requested is the protocols we want and found is the protocols that can be used to access the stream. """ self.requested = requested self.found = found super().__init__("None of the provided protocols (%s) are in " "the current list of accepted protocols (%s)" % (self.found, self.requested)) def __repr__(self): return "NoRequestedProtocols(requested={}, found={})".format(self.requested, self.found) svtplay-dl-3.0/lib/svtplay_dl/fetcher/000077500000000000000000000000001401224433100200145ustar00rootroot00000000000000svtplay-dl-3.0/lib/svtplay_dl/fetcher/__init__.py000066400000000000000000000050731401224433100221320ustar00rootroot00000000000000import copy from svtplay_dl.utils.http import HTTP from svtplay_dl.utils.output import ETA from svtplay_dl.utils.output import output from svtplay_dl.utils.output import progressbar class VideoRetriever: def __init__(self, config, url, bitrate, output, **kwargs): self.config = config self.url = url self.bitrate = int(bitrate) if bitrate else 0 self.kwargs = kwargs self.http = HTTP(config) self.finished = False self.audio = kwargs.pop("audio", None) self.files = kwargs.pop("files", None) self.keycookie = kwargs.pop("keycookie", None) self.authorization = kwargs.pop("authorization", None) self.output = output self.segments = kwargs.pop("segments", None) self.output_extention = None channels = kwargs.pop("channels", None) codec = kwargs.pop("codec", "h264") self.format = "{}-{}".format(codec, channels) if channels else codec def __repr__(self): return "".format(self.__class__.__name__, self.bitrate, self.format) @property def name(self): pass def _download_url(self, url, audio=False, total_size=None): cookies = self.kwargs["cookies"] data = self.http.request("get", url, cookies=cookies, headers={"Range": "bytes=0-8192"}) if not total_size: try: total_size = data.headers["Content-Range"] total_size = total_size[total_size.find("/") + 1 :] total_size = int(total_size) except KeyError: raise KeyError("Can't get the total size.") bytes_so_far = 8192 if audio: file_d = output(copy.copy(self.output), self.config, "m4a") else: file_d = output(self.output, self.config, "mp4") if file_d is None: return file_d.write(data.content) eta = ETA(total_size) while bytes_so_far < total_size: if not self.config.get("silent"): eta.update(bytes_so_far) progressbar(total_size, bytes_so_far, "".join(["ETA: ", str(eta)])) old = bytes_so_far + 1 bytes_so_far = total_size bytes_range = "bytes={}-{}".format(old, bytes_so_far) data = self.http.request("get", url, cookies=cookies, headers={"Range": bytes_range}) file_d.write(data.content) file_d.close() progressbar(bytes_so_far, total_size, "ETA: complete") # progress_stream.write('\n') self.finished = True svtplay-dl-3.0/lib/svtplay_dl/fetcher/dash.py000066400000000000000000000267671401224433100213270ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import copy import math import os import re import time import xml.etree.ElementTree as ET from datetime import datetime from urllib.parse import urljoin from svtplay_dl.error import ServiceError from svtplay_dl.error import UIException from svtplay_dl.fetcher import VideoRetriever from svtplay_dl.utils.output import ETA from svtplay_dl.utils.output import output from svtplay_dl.utils.output import progress_stream from svtplay_dl.utils.output import progressbar class DASHException(UIException): def __init__(self, url, message): self.url = url super().__init__(message) class LiveDASHException(DASHException): def __init__(self, url): super().__init__(url, "This is a live DASH stream, and they are not supported.") class DASHattibutes: def __init__(self): self.default = {} def set(self, key, value): self.default[key] = value def get(self, key): if key in self.default: return self.default[key] return 0 def templateelemt(attributes, element, filename, idnumber): files = [] init = element.attrib["initialization"] media = element.attrib["media"] if "startNumber" in element.attrib: start = int(element.attrib["startNumber"]) else: start = 1 if "timescale" in element.attrib: attributes.set("timescale", float(element.attrib["timescale"])) else: attributes.set("timescale", 1) if "duration" in element.attrib: attributes.set("duration", float(element.attrib["duration"])) segments = [] timeline = element.findall("{urn:mpeg:dash:schema:mpd:2011}SegmentTimeline/{urn:mpeg:dash:schema:mpd:2011}S") if timeline: t = -1 for s in timeline: duration = int(s.attrib["d"]) repeat = int(s.attrib["r"]) if "r" in s.attrib else 0 segmenttime = int(s.attrib["t"]) if "t" in s.attrib else 0 if t < 0: t = segmenttime count = repeat + 1 end = start + len(segments) + count number = start + len(segments) while number < end: segments.append({"number": number, "duration": math.ceil(duration / attributes.get("timescale")), "time": t}) t += duration number += 1 else: # Saw this on dynamic live content start = 0 now = time.time() periodStartWC = time.mktime(attributes.get("availabilityStartTime").timetuple()) + start periodEndWC = now + attributes.get("minimumUpdatePeriod") periodDuration = periodEndWC - periodStartWC segmentCount = math.ceil(periodDuration * attributes.get("timescale") / attributes.get("duration")) availableStart = math.floor( (now - periodStartWC - attributes.get("timeShiftBufferDepth")) * attributes.get("timescale") / attributes.get("duration"), ) availableEnd = math.floor((now - periodStartWC) * attributes.get("timescale") / attributes.get("duration")) start = max(0, availableStart) end = min(segmentCount, availableEnd) for number in range(start, end): segments.append({"number": number, "duration": int(attributes.get("duration") / attributes.get("timescale"))}) name = media.replace("$RepresentationID$", idnumber).replace("$Bandwidth$", attributes.get("bandwidth")) files.append(urljoin(filename, init.replace("$RepresentationID$", idnumber).replace("$Bandwidth$", attributes.get("bandwidth")))) for segment in segments: if "$Time$" in media: new = name.replace("$Time$", str(segment["time"])) if "$Number" in name: if re.search(r"\$Number(\%\d+)d\$", name): vname = name.replace("$Number", "").replace("$", "") new = vname % segment["number"] else: new = name.replace("$Number$", str(segment["number"])) files.append(urljoin(filename, new)) return files def adaptionset(attributes, element, url, baseurl=None): streams = {} dirname = os.path.dirname(url) + "/" if baseurl: dirname = urljoin(dirname, baseurl) template = element[0].find("{urn:mpeg:dash:schema:mpd:2011}SegmentTemplate") represtation = element[0].findall(".//{urn:mpeg:dash:schema:mpd:2011}Representation") codecs = None if "codecs" in element[0].attrib: codecs = element[0].attrib["codecs"] for i in represtation: files = [] segments = False filename = dirname attributes.set("bandwidth", i.attrib["bandwidth"]) bitrate = int(i.attrib["bandwidth"]) / 1000 idnumber = i.attrib["id"] channels = None codec = None if codecs is None and "codecs" in i.attrib: codecs = i.attrib["codecs"] if codecs[:3] == "avc": codec = "h264" if codecs[:3] == "hvc": codec = "hevc" if i.find("{urn:mpeg:dash:schema:mpd:2011}AudioChannelConfiguration") is not None: chan = i.find("{urn:mpeg:dash:schema:mpd:2011}AudioChannelConfiguration").attrib["value"] if chan == "6": channels = "51" else: channels = None if i.find("{urn:mpeg:dash:schema:mpd:2011}BaseURL") is not None: filename = urljoin(filename, i.find("{urn:mpeg:dash:schema:mpd:2011}BaseURL").text) if i.find("{urn:mpeg:dash:schema:mpd:2011}SegmentBase") is not None: segments = True files.append(filename) if template is not None: segments = True files = templateelemt(attributes, template, filename, idnumber) elif i.find("{urn:mpeg:dash:schema:mpd:2011}SegmentTemplate") is not None: segments = True files = templateelemt(attributes, i.find("{urn:mpeg:dash:schema:mpd:2011}SegmentTemplate"), filename, idnumber) if files: streams[bitrate] = {"segments": segments, "files": files, "codecs": codec, "channels": channels} return streams def dashparse(config, res, url, **kwargs): streams = {} if not res: return streams if res.status_code >= 400: streams[0] = ServiceError("Can't read DASH playlist. {}".format(res.status_code)) return streams if len(res.text) < 1: streams[0] = ServiceError("Can't read DASH playlist. {}, size: {}".format(res.status_code, len(res.text))) return streams return _dashparse(config, res.text, url, res.cookies, **kwargs) def _dashparse(config, text, url, cookies, **kwargs): streams = {} baseurl = None output = kwargs.pop("output", None) attributes = DASHattibutes() xml = ET.XML(text) if xml.find("./{urn:mpeg:dash:schema:mpd:2011}BaseURL") is not None: baseurl = xml.find("./{urn:mpeg:dash:schema:mpd:2011}BaseURL").text if "availabilityStartTime" in xml.attrib: attributes.set("availabilityStartTime", parse_dates(xml.attrib["availabilityStartTime"])) attributes.set("publishTime", parse_dates(xml.attrib["publishTime"])) if "mediaPresentationDuration" in xml.attrib: attributes.set("mediaPresentationDuration", parse_duration(xml.attrib["mediaPresentationDuration"])) if "timeShiftBufferDepth" in xml.attrib: attributes.set("timeShiftBufferDepth", parse_duration(xml.attrib["timeShiftBufferDepth"])) if "minimumUpdatePeriod" in xml.attrib: attributes.set("minimumUpdatePeriod", parse_duration(xml.attrib["minimumUpdatePeriod"])) attributes.set("type", xml.attrib["type"]) temp = xml.findall('.//{urn:mpeg:dash:schema:mpd:2011}AdaptationSet[@mimeType="audio/mp4"]') if len(temp) == 0: temp = xml.findall('.//{urn:mpeg:dash:schema:mpd:2011}AdaptationSet[@contentType="audio"]') audiofiles = adaptionset(attributes, temp, url, baseurl) temp = xml.findall('.//{urn:mpeg:dash:schema:mpd:2011}AdaptationSet[@mimeType="video/mp4"]') if len(temp) == 0: temp = xml.findall('.//{urn:mpeg:dash:schema:mpd:2011}AdaptationSet[@contentType="video"]') videofiles = adaptionset(attributes, temp, url, baseurl) if not audiofiles or not videofiles: streams[0] = ServiceError("Found no Audiofiles or Videofiles to download.") return streams if "channels" in kwargs: kwargs.pop("channels") if "codec" in kwargs: kwargs.pop("codec") for i in videofiles.keys(): bitrate = i + list(audiofiles.keys())[0] streams[bitrate] = DASH( copy.copy(config), url, bitrate, cookies=cookies, audio=audiofiles[list(audiofiles.keys())[0]]["files"], files=videofiles[i]["files"], output=output, segments=videofiles[i]["segments"], codec=videofiles[i]["codecs"], channels=audiofiles[list(audiofiles.keys())[0]]["channels"], **kwargs, ) return streams def parse_duration(duration): match = re.search(r"P(?:(\d*)Y)?(?:(\d*)M)?(?:(\d*)D)?(?:T(?:(\d*)H)?(?:(\d*)M)?(?:([\d.]*)S)?)?", duration) if not match: return 0 year = int(match.group(1)) * 365 * 24 * 60 * 60 if match.group(1) else 0 month = int(match.group(2)) * 30 * 24 * 60 * 60 if match.group(2) else 0 day = int(match.group(3)) * 24 * 60 * 60 if match.group(3) else 0 hour = int(match.group(4)) * 60 * 60 if match.group(4) else 0 minute = int(match.group(5)) * 60 if match.group(5) else 0 second = float(match.group(6)) if match.group(6) else 0 return year + month + day + hour + minute + second def parse_dates(date_str): date_patterns = ["%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%SZ"] dt = None for pattern in date_patterns: try: dt = datetime.strptime(date_str, pattern) break except Exception: pass if not dt: raise ValueError("Can't parse date format: {}".format(date_str)) return dt class DASH(VideoRetriever): @property def name(self): return "dash" def download(self): self.output_extention = "mp4" if self.config.get("live") and not self.config.get("force"): raise LiveDASHException(self.url) if self.segments: if self.audio and not self.config.get("only_video"): self._download2(self.audio, audio=True) if not self.config.get("only_audio"): self._download2(self.files) else: if self.audio and not self.config.get("only_video"): self._download_url(self.audio, audio=True) if not self.config.get("only_audio"): self._download_url(self.url) def _download2(self, files, audio=False): cookies = self.kwargs["cookies"] if audio: file_d = output(copy.copy(self.output), self.config, extension="m4a") else: file_d = output(self.output, self.config, extension="mp4") if file_d is None: return eta = ETA(len(files)) n = 1 for i in files: if not self.config.get("silent"): eta.increment() progressbar(len(files), n, "".join(["ETA: ", str(eta)])) n += 1 data = self.http.request("get", i, cookies=cookies) if data.status_code == 404: break data = data.content file_d.write(data) file_d.close() if not self.config.get("silent"): progress_stream.write("\n") self.finished = True svtplay-dl-3.0/lib/svtplay_dl/fetcher/hds.py000066400000000000000000000215651401224433100211550ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import base64 import binascii import copy import struct import xml.etree.ElementTree as ET from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.error import UIException from svtplay_dl.fetcher import VideoRetriever from svtplay_dl.utils.output import ETA from svtplay_dl.utils.output import output from svtplay_dl.utils.output import progress_stream from svtplay_dl.utils.output import progressbar def _chr(temp): return chr(temp) class HDSException(UIException): def __init__(self, url, message): self.url = url super().__init__(message) class LiveHDSException(HDSException): def __init__(self, url): super().__init__(url, "This is a live HDS stream, and they are not supported.") def hdsparse(config, res, manifest, output=None): streams = {} bootstrap = {} if not res: return streams if res.status_code >= 400: streams[0] = ServiceError("Can't read HDS playlist. {}".format(res.status_code)) return streams data = res.text xml = ET.XML(data) bootstrapIter = xml.iter("{http://ns.adobe.com/f4m/1.0}bootstrapInfo") mediaIter = xml.iter("{http://ns.adobe.com/f4m/1.0}media") if xml.find("{http://ns.adobe.com/f4m/1.0}drmAdditionalHeader") is not None: streams[0] = ServiceError("HDS DRM protected content.") return streams for i in bootstrapIter: if "id" in i.attrib: bootstrap[i.attrib["id"]] = i.text else: bootstrap["0"] = i.text parse = urlparse(manifest) querystring = parse.query url = "{}://{}{}".format(parse.scheme, parse.netloc, parse.path) for i in mediaIter: bootstrapid = bootstrap[i.attrib["bootstrapInfoId"]] streams[int(i.attrib["bitrate"])] = HDS( copy.copy(config), url, i.attrib["bitrate"], url_id=i.attrib["url"], bootstrap=bootstrapid, metadata=i.find("{http://ns.adobe.com/f4m/1.0}metadata").text, querystring=querystring, cookies=res.cookies, output=output, ) return streams class HDS(VideoRetriever): @property def name(self): return "hds" def download(self): self.output_extention = "flv" if self.config.get("live") and not self.config.get("force"): raise LiveHDSException(self.url) querystring = self.kwargs["querystring"] cookies = self.kwargs["cookies"] bootstrap = base64.b64decode(self.kwargs["bootstrap"]) box = readboxtype(bootstrap, 0) antal = None if box[2] == b"abst": antal = readbox(bootstrap, box[0]) baseurl = self.url[0 : self.url.rfind("/")] file_d = output(self.output, self.config, "flv") if file_d is None: return metasize = struct.pack(">L", len(base64.b64decode(self.kwargs["metadata"])))[1:] file_d.write(binascii.a2b_hex(b"464c560105000000090000000012")) file_d.write(metasize) file_d.write(binascii.a2b_hex(b"00000000000000")) file_d.write(base64.b64decode(self.kwargs["metadata"])) file_d.write(binascii.a2b_hex(b"00000000")) i = 1 start = antal[1]["first"] total = antal[1]["total"] eta = ETA(total) while i <= total: url = "{}/{}Seg1-Frag{}?{}".format(baseurl, self.kwargs["url_id"], start, querystring) if not self.config.get("silent"): eta.update(i) progressbar(total, i, "".join(["ETA: ", str(eta)])) data = self.http.request("get", url, cookies=cookies) if data.status_code == 404: break data = data.content number = decode_f4f(i, data) file_d.write(data[number:]) i += 1 start += 1 file_d.close() if not self.config.get("silent"): progress_stream.write("\n") self.finished = True def readbyte(data, pos): return struct.unpack("B", bytes(_chr(data[pos]), "ascii"))[0] def read16(data, pos): endpos = pos + 2 return struct.unpack(">H", data[pos:endpos])[0] def read24(data, pos): end = pos + 3 return struct.unpack(">L", "\x00" + data[pos:end])[0] def read32(data, pos): end = pos + 4 return struct.unpack(">i", data[pos:end])[0] def readu32(data, pos): end = pos + 4 return struct.unpack(">I", data[pos:end])[0] def read64(data, pos): end = pos + 8 return struct.unpack(">Q", data[pos:end])[0] def readstring(data, pos): length = 0 while bytes(_chr(data[pos + length]), "ascii") != b"\x00": length += 1 endpos = pos + length string = data[pos:endpos] pos += length + 1 return pos, string def readboxtype(data, pos): boxsize = read32(data, pos) tpos = pos + 4 endpos = tpos + 4 boxtype = data[tpos:endpos] if boxsize > 1: boxsize -= 8 pos += 8 return pos, boxsize, boxtype # Note! A lot of variable assignments are commented out. These are # accessible values that we currently don't use. def readbox(data, pos): # version = readbyte(data, pos) pos += 1 # flags = read24(data, pos) pos += 3 # bootstrapversion = read32(data, pos) pos += 4 # byte = readbyte(data, pos) pos += 1 # profile = (byte & 0xC0) >> 6 # live = (byte & 0x20) >> 5 # update = (byte & 0x10) >> 4 # timescale = read32(data, pos) pos += 4 # currentmediatime = read64(data, pos) pos += 8 # smptetimecodeoffset = read64(data, pos) pos += 8 temp = readstring(data, pos) # movieidentifier = temp[1] pos = temp[0] serverentrycount = readbyte(data, pos) pos += 1 serverentrytable = [] i = 0 while i < serverentrycount: temp = readstring(data, pos) serverentrytable.append(temp[1]) pos = temp[0] i += 1 qualityentrycount = readbyte(data, pos) pos += 1 qualityentrytable = [] i = 0 while i < qualityentrycount: temp = readstring(data, pos) qualityentrytable.append(temp[1]) pos = temp[0] i += 1 tmp = readstring(data, pos) # drm = tmp[1] pos = tmp[0] tmp = readstring(data, pos) # metadata = tmp[1] pos = tmp[0] segmentruntable = readbyte(data, pos) pos += 1 if segmentruntable > 0: tmp = readboxtype(data, pos) boxtype = tmp[2] boxsize = tmp[1] pos = tmp[0] if boxtype == b"asrt": antal = readasrtbox(data, pos) pos += boxsize fragRunTableCount = readbyte(data, pos) pos += 1 i = 0 first = 1 while i < fragRunTableCount: tmp = readboxtype(data, pos) boxtype = tmp[2] boxsize = tmp[1] pos = tmp[0] if boxtype == b"afrt": first = readafrtbox(data, pos) pos += boxsize i += 1 antal[1]["first"] = first return antal # Note! A lot of variable assignments are commented out. These are # accessible values that we currently don't use. def readafrtbox(data, pos): # version = readbyte(data, pos) pos += 1 # flags = read24(data, pos) pos += 3 # timescale = read32(data, pos) pos += 4 qualityentry = readbyte(data, pos) pos += 1 i = 0 while i < qualityentry: temp = readstring(data, pos) # qualitysegmulti = temp[1] pos = temp[0] i += 1 fragrunentrycount = read32(data, pos) pos += 4 i = 0 first = 1 skip = False while i < fragrunentrycount: firstfragment = readu32(data, pos) if not skip: first = firstfragment skip = True pos += 4 # timestamp = read64(data, pos) pos += 8 # duration = read32(data, pos) pos += 4 i += 1 return first # Note! A lot of variable assignments are commented out. These are # accessible values that we currently don't use. def readasrtbox(data, pos): # version = readbyte(data, pos) pos += 1 # flags = read24(data, pos) pos += 3 qualityentrycount = readbyte(data, pos) pos += 1 qualitysegmentmodifers = [] i = 0 while i < qualityentrycount: temp = readstring(data, pos) qualitysegmentmodifers.append(temp[1]) pos = temp[0] i += 1 seqCount = read32(data, pos) pos += 4 ret = {} i = 0 while i < seqCount: firstseg = read32(data, pos) pos += 4 fragPerSeg = read32(data, pos) pos += 4 tmp = i + 1 ret[tmp] = {"first": firstseg, "total": fragPerSeg} i += 1 return ret def decode_f4f(fragID, fragData): start = fragData.find(b"mdat") + 4 if fragID > 1: (tagLen,) = struct.unpack_from(">L", fragData, start) tagLen &= 0x00FFFFFF start += tagLen + 11 + 4 return start svtplay-dl-3.0/lib/svtplay_dl/fetcher/hls.py000066400000000000000000000451601401224433100211620ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import binascii import copy import os import random import re import time from datetime import datetime from datetime import timedelta from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.ciphers import algorithms from cryptography.hazmat.primitives.ciphers import Cipher from cryptography.hazmat.primitives.ciphers import modes from svtplay_dl.error import ServiceError from svtplay_dl.error import UIException from svtplay_dl.fetcher import VideoRetriever from svtplay_dl.subtitle import subtitle from svtplay_dl.utils.http import get_full_url from svtplay_dl.utils.output import ETA from svtplay_dl.utils.output import output from svtplay_dl.utils.output import progress_stream from svtplay_dl.utils.output import progressbar class HLSException(UIException): def __init__(self, url, message): self.url = url super().__init__(message) class LiveHLSException(HLSException): def __init__(self, url): super().__init__(url, "This is a live HLS stream, and they are not supported.") def hlsparse(config, res, url, **kwargs): streams = {} if not res: return streams if res.status_code > 400: streams[0] = ServiceError("Can't read HLS playlist. {}".format(res.status_code)) return streams m3u8 = M3U8(res.text) keycookie = kwargs.pop("keycookie", None) authorization = kwargs.pop("authorization", None) httpobject = kwargs.pop("httpobject", None) output = kwargs.pop("output", None) channels = kwargs.pop("channels", None) codec = kwargs.pop("codec", "h264") media = {} subtitles = {} segments = None if m3u8.master_playlist: for i in m3u8.master_playlist: audio_url = None vcodec = None chans = None if i["TAG"] == "EXT-X-MEDIA": if "AUTOSELECT" in i and (i["AUTOSELECT"].upper() == "YES"): if i["TYPE"] and i["TYPE"] != "SUBTITLES": if "URI" in i: if segments is None: segments = True if i["GROUP-ID"] not in media: media[i["GROUP-ID"]] = [] if "CHANNELS" in i: if i["CHANNELS"] == "6": chans = "51" media[i["GROUP-ID"]].append([i["URI"], chans]) else: segments = False if i["TYPE"] == "SUBTITLES": if "URI" in i: if i["GROUP-ID"] not in subtitles: subtitles[i["GROUP-ID"]] = [] item = [i["URI"], i["LANGUAGE"]] if item not in subtitles[i["GROUP-ID"]]: subtitles[i["GROUP-ID"]].append(item) continue elif i["TAG"] == "EXT-X-STREAM-INF": if "AVERAGE-BANDWIDTH" in i: bit_rate = float(i["AVERAGE-BANDWIDTH"]) / 1000 else: bit_rate = float(i["BANDWIDTH"]) / 1000 if "CODECS" in i: if i["CODECS"][:3] == "hvc": vcodec = "hevc" if i["CODECS"][:3] == "avc": vcodec = "h264" if "AUDIO" in i and (i["AUDIO"] in media): chans = media[i["AUDIO"]][0][1] audio_url = get_full_url(media[i["AUDIO"]][0][0], url) urls = get_full_url(i["URI"], url) else: continue # Needs to be changed to utilise other tags. chans = chans if audio_url else channels codec = vcodec if vcodec else codec streams[int(bit_rate)] = HLS( copy.copy(config), urls, bit_rate, cookies=res.cookies, keycookie=keycookie, authorization=authorization, audio=audio_url, output=output, segments=bool(segments), channels=chans, codec=codec, **kwargs, ) if subtitles and httpobject: for sub in list(subtitles.keys()): for n in subtitles[sub]: m3u8s = M3U8(httpobject.request("get", get_full_url(n[0], url), cookies=res.cookies).text) if "cmore" in url: subtype = "wrstsegment" # this have been seen in tv4play else: subtype = "wrst" streams[int(random.randint(1, 40))] = subtitle( copy.copy(config), subtype, get_full_url(m3u8s.media_segment[0]["URI"], url), subfix=n[1], output=copy.copy(output), m3u8=m3u8s, ) elif m3u8.media_segment: config.set("segments", False) streams[0] = HLS( copy.copy(config), url, 0, cookies=res.cookies, keycookie=keycookie, authorization=authorization, output=output, segments=False, ) else: streams[0] = ServiceError("Can't find HLS playlist in m3u8 file.") return streams class HLS(VideoRetriever): @property def name(self): return "hls" def download(self): self.output_extention = "ts" if self.segments: if self.audio and not self.config.get("only_video"): self._download(self.audio, file_name=(copy.copy(self.output), "audio.ts")) if not self.config.get("only_audio"): self._download(self.url, file_name=(self.output, "ts")) else: # Ignore audio self.audio = None self._download(self.url, file_name=(self.output, "ts")) def _download(self, url, file_name): cookies = self.kwargs.get("cookies", None) start_time = time.time() m3u8 = M3U8(self.http.request("get", url, cookies=cookies).text) key = None def random_iv(): return os.urandom(16) file_d = output(file_name[0], self.config, file_name[1]) if file_d is None: return hls_time_stamp = self.kwargs.pop("hls_time_stamp", False) decryptor = None size_media = len(m3u8.media_segment) eta = ETA(size_media) total_duration = 0 duration = 0 max_duration = 0 for index, i in enumerate(m3u8.media_segment): if "duration" in i["EXTINF"]: duration = i["EXTINF"]["duration"] max_duration = max(max_duration, duration) total_duration += duration item = get_full_url(i["URI"], url) if not self.config.get("silent"): if self.config.get("live"): progressbar(size_media, index + 1, "".join(["DU: ", str(timedelta(seconds=int(total_duration)))])) else: eta.increment() progressbar(size_media, index + 1, "".join(["ETA: ", str(eta)])) data = self.http.request("get", item, cookies=cookies) if data.status_code == 404: break data = data.content if m3u8.encrypted: headers = {} if self.keycookie: keycookies = self.keycookie else: keycookies = cookies if self.authorization: headers["authorization"] = self.authorization # Update key/decryptor if "EXT-X-KEY" in i: keyurl = get_full_url(i["EXT-X-KEY"]["URI"], url) if keyurl and keyurl[:4] == "skd:": raise HLSException(keyurl, "Can't decrypt beacuse of DRM") key = self.http.request("get", keyurl, cookies=keycookies, headers=headers).content iv = binascii.unhexlify(i["EXT-X-KEY"]["IV"][2:].zfill(32)) if "IV" in i["EXT-X-KEY"] else random_iv() backend = default_backend() cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend) decryptor = cipher.decryptor() # In some cases the playlist say its encrypted but the files is not. # This happen on svtplay 5.1ch stream where it started with ID3.. # Adding the other ones is header for mpeg-ts files. third byte is 10 or 11.. if data[:3] != b"ID3" and data[:3] != b"\x47\x40\x11" and data[:3] != b"\x47\x40\x10": if decryptor: data = decryptor.update(data) else: raise ValueError("No decryptor found for encrypted hls steam.") file_d.write(data) if self.config.get("capture_time") > 0 and total_duration >= self.config.get("capture_time") * 60: break if (size_media == (index + 1)) and self.config.get("live"): sleep_int = (start_time + max_duration * 2) - time.time() if sleep_int > 0: time.sleep(sleep_int) size_media_old = size_media while size_media_old == size_media: start_time = time.time() if hls_time_stamp: end_time_stamp = (datetime.utcnow() - timedelta(minutes=1, seconds=max_duration * 2)).replace(microsecond=0) start_time_stamp = end_time_stamp - timedelta(minutes=1) base_url = url.split(".m3u8")[0] url = "{}.m3u8?in={}&out={}?".format(base_url, start_time_stamp.isoformat(), end_time_stamp.isoformat()) new_m3u8 = M3U8(self.http.request("get", url, cookies=cookies).text) for n_m3u in new_m3u8.media_segment: if not any(d["URI"] == n_m3u["URI"] for d in m3u8.media_segment): m3u8.media_segment.append(n_m3u) size_media = len(m3u8.media_segment) if size_media_old == size_media: time.sleep(max_duration) file_d.close() if not self.config.get("silent"): progress_stream.write("\n") self.finished = True class M3U8: # Created for hls version <=7 # https://tools.ietf.org/html/rfc8216 MEDIA_SEGMENT_TAGS = ("EXTINF", "EXT-X-BYTERANGE", "EXT-X-DISCONTINUITY", "EXT-X-KEY", "EXT-X-MAP", "EXT-X-PROGRAM-DATE-TIME", "EXT-X-DATERANGE") MEDIA_PLAYLIST_TAGS = ( "EXT-X-TARGETDURATION", "EXT-X-MEDIA-SEQUENCE", "EXT-X-DISCONTINUITY-SEQUENCE", "EXT-X-ENDLIST", "EXT-X-PLAYLIST-TYPE", "EXT-X-I-FRAMES-ONLY", ) MASTER_PLAYLIST_TAGS = ("EXT-X-MEDIA", "EXT-X-STREAM-INF", "EXT-X-I-FRAME-STREAM-INF", "EXT-X-SESSION-DATA", "EXT-X-SESSION-KEY") MEDIA_OR_MASTER_PLAYLIST_TAGS = ("EXT-X-INDEPENDENT-SEGMENTS", "EXT-X-START") TAG_TYPES = {"MEDIA_SEGMENT": 0, "MEDIA_PLAYLIST": 1, "MASTER_PLAYLIST": 2} def __init__(self, data): self.version = None self.media_segment = [] self.media_playlist = {} self.master_playlist = [] self.encrypted = False self.independent_segments = False self.parse_m3u(data) def __str__(self): return "Version: {}\nMedia Segment: {}\nMedia Playlist: {}\nMaster Playlist: {}\nEncrypted: {}\tIndependent_segments: {}".format( self.version, self.media_segment, self.media_playlist, self.master_playlist, self.encrypted, self.independent_segments, ) def parse_m3u(self, data): if not data.startswith("#EXTM3U"): raise ValueError("Does not appear to be an 'EXTM3U' file.") data = data.replace("\r\n", "\n") lines = data.split("\n")[1:] last_tag_type = None tag_type = None media_segment_info = {} for index, l in enumerate(lines): if not l: continue elif l.startswith("#EXT"): info = {} tag, attr = _get_tag_attribute(l) if tag == "EXT-X-VERSION": self.version = int(attr) # 4.3.2. Media Segment Tags elif tag in M3U8.MEDIA_SEGMENT_TAGS: tag_type = M3U8.TAG_TYPES["MEDIA_SEGMENT"] # 4.3.2.1. EXTINF if tag == "EXTINF": if "," in attr: dur, title = attr.split(",", 1) else: dur = attr title = None info["duration"] = float(dur) info["title"] = title # 4.3.2.2. EXT-X-BYTERANGE elif tag == "EXT-X-BYTERANGE": if "@" in attr: n, o = attr.split("@", 1) info["n"], info["o"] = (int(n), int(o)) else: info["n"] = int(attr) info["o"] = 0 # 4.3.2.3. EXT-X-DISCONTINUITY elif tag == "EXT-X-DISCONTINUITY": pass # 4.3.2.4. EXT-X-KEY elif tag == "EXT-X-KEY": self.encrypted = True info = _get_tuple_attribute(attr) # 4.3.2.5. EXT-X-MAP elif tag == "EXT-X-MAP": info = _get_tuple_attribute(attr) # 4.3.2.6. EXT-X-PROGRAM-DATE-TIME" elif tag == "EXT-X-PROGRAM-DATE-TIME": info = attr # 4.3.2.7. EXT-X-DATERANGE elif tag == "EXT-X-DATERANGE": info = _get_tuple_attribute(attr) media_segment_info[tag] = info # 4.3.3. Media Playlist Tags elif tag in M3U8.MEDIA_PLAYLIST_TAGS: tag_type = M3U8.TAG_TYPES["MEDIA_PLAYLIST"] # 4.3.3.1. EXT-X-TARGETDURATION if tag == "EXT-X-TARGETDURATION": info = int(attr) # 4.3.3.2. EXT-X-MEDIA-SEQUENCE elif tag == "EXT-X-MEDIA-SEQUENCE": info = int(attr) # 4.3.3.3. EXT-X-DISCONTINUITY-SEQUENCE elif tag == "EXT-X-DISCONTINUITY-SEQUENCE": info = int(attr) # 4.3.3.4. EXT-X-ENDLIST elif tag == "EXT-X-ENDLIST": break # 4.3.3.5. EXT-X-PLAYLIST-TYPE elif tag == "EXT-X-PLAYLIST-TYPE": info = attr # 4.3.3.6. EXT-X-I-FRAMES-ONLY elif tag == "EXT-X-I-FRAMES-ONLY": pass self.media_playlist[tag] = info # 4.3.4. Master Playlist Tags elif tag in M3U8.MASTER_PLAYLIST_TAGS: tag_type = M3U8.TAG_TYPES["MASTER_PLAYLIST"] # 4.3.4.1. EXT-X-MEDIA if tag == "EXT-X-MEDIA": info = _get_tuple_attribute(attr) # 4.3.4.2. EXT-X-STREAM-INF elif tag == "EXT-X-STREAM-INF": info = _get_tuple_attribute(attr) if "BANDWIDTH" not in info: raise ValueError("Can't find 'BANDWIDTH' in 'EXT-X-STREAM-INF'") info["URI"] = lines[index + 1] # 4.3.4.3. EXT-X-I-FRAME-STREAM-INF elif tag == "EXT-X-I-FRAME-STREAM-INF": info = _get_tuple_attribute(attr) # 4.3.4.4. EXT-X-SESSION-DATA elif tag == "EXT-X-SESSION-DATA": info = _get_tuple_attribute(attr) # 4.3.4.5. EXT-X-SESSION-KEY elif tag == "EXT-X-SESSION-KEY": self.encrypted = True info = _get_tuple_attribute(attr) info["TAG"] = tag self.master_playlist.append(info) # 4.3.5. Media or Master Playlist Tags elif tag in M3U8.MEDIA_OR_MASTER_PLAYLIST_TAGS: tag_type = M3U8.TAG_TYPES["MEDIA_PLAYLIST"] # 4.3.5.1. EXT-X-INDEPENDENT-SEGMENTS if tag == "EXT-X-INDEPENDENT-SEGMENTS": self.independent_segments = True # 4.3.5.2. EXT-X-START elif tag == "EXT-X-START": info = _get_tuple_attribute(attr) self.media_playlist[tag] = info # Unused tags else: pass # This is a comment elif l.startswith("#"): pass # This must be a url/uri else: tag_type = None if last_tag_type is M3U8.TAG_TYPES["MEDIA_SEGMENT"]: media_segment_info["URI"] = l self.media_segment.append(media_segment_info) media_segment_info = {} last_tag_type = tag_type if self.media_segment and self.master_playlist: raise ValueError("This 'M3U8' file contains data for both 'Media Segment' and 'Master Playlist'. This is not allowed.") def _get_tag_attribute(line): line = line[1:] try: search_line = re.search(r"^([A-Z\-]*):(.*)", line) return search_line.group(1), search_line.group(2) except Exception: return line, None def _get_tuple_attribute(attribute): attr_tuple = {} for art_l in re.split(""",(?=(?:[^'"]|'[^']*'|"[^"]*")*$)""", attribute): if art_l: name, value = art_l.split("=", 1) name = name.strip() # Checks for attribute name if not re.match(r"^[A-Z0-9\-]*$", name): raise ValueError("Not a valid attribute name.") # Remove extra quotes of string if value.startswith('"') and value.endswith('"'): value = value[1:-1] attr_tuple[name] = value return attr_tuple svtplay-dl-3.0/lib/svtplay_dl/fetcher/http.py000066400000000000000000000025041401224433100213460ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import os from svtplay_dl.fetcher import VideoRetriever from svtplay_dl.utils.output import ETA from svtplay_dl.utils.output import output from svtplay_dl.utils.output import progressbar class HTTP(VideoRetriever): @property def name(self): return "http" def download(self): """ Get the stream from HTTP """ _, ext = os.path.splitext(self.url) if ext == ".mp3": self.output_extention = "mp3" else: self.output_extention = "mp4" # this might be wrong.. data = self.http.request("get", self.url, stream=True) try: total_size = data.headers["content-length"] except KeyError: total_size = 0 total_size = int(total_size) bytes_so_far = 0 file_d = output(self.output, self.config, self.output_extention) if file_d is None: return eta = ETA(total_size) for i in data.iter_content(8192): bytes_so_far += len(i) file_d.write(i) if not self.config.get("silent"): eta.update(bytes_so_far) progressbar(total_size, bytes_so_far, "".join(["ETA: ", str(eta)])) file_d.close() self.finished = True svtplay-dl-3.0/lib/svtplay_dl/log.py000066400000000000000000000002671401224433100175340ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import logging import sys log = logging.getLogger("svtplay_dl") progress_stream = sys.stderr svtplay-dl-3.0/lib/svtplay_dl/postprocess/000077500000000000000000000000001401224433100207605ustar00rootroot00000000000000svtplay-dl-3.0/lib/svtplay_dl/postprocess/__init__.py000066400000000000000000000251631401224433100231000ustar00rootroot00000000000000import logging import os import platform import re from json import dumps from random import sample from re import match from shutil import which from requests import codes from requests import post from requests import Timeout from svtplay_dl.utils.output import formatname from svtplay_dl.utils.proc import run_program class postprocess: def __init__(self, stream, config, subfixes=None): self.stream = stream self.config = config self.subfixes = subfixes self.detect = None for i in ["ffmpeg", "avconv"]: self.detect = which(i) if self.detect: break def remux(self): if self.detect is None: logging.error("Cant detect ffmpeg or avconv. Cant mux files without it.") return if self.stream.finished is False: return if formatname(self.stream.output, self.config, self.stream.output_extention).endswith(".mp4") is False: orig_filename = formatname(self.stream.output, self.config, self.stream.output_extention) name, ext = os.path.splitext(orig_filename) new_name = "{}.mp4".format(name) cmd = [self.detect, "-i", orig_filename] _, stdout, stderr = run_program(cmd, False) # return 1 is good here. streams = _streams(stderr) videotrack, audiotrack = _checktracks(streams) if self.config.get("merge_subtitle"): logging.info("Muxing {} and merging its subtitle into {}".format(orig_filename, new_name)) else: logging.info("Muxing {} into {}".format(orig_filename, new_name)) tempfile = "{}.temp".format(orig_filename) arguments = [] if videotrack: arguments += ["-map", "{}".format(videotrack)] if audiotrack: arguments += ["-map", "{}".format(audiotrack)] arguments += ["-c", "copy", "-f", "mp4"] if ext == ".ts" and "aac" in _getcodec(streams, audiotrack): arguments += ["-bsf:a", "aac_adtstoasc"] if self.config.get("merge_subtitle"): langs = _sublanguage(self.stream, self.config, self.subfixes) for stream_num, language in enumerate(langs): arguments += [ "-map", str(stream_num + 1), "-c:s:" + str(stream_num), "mov_text", "-metadata:s:s:" + str(stream_num), "language=" + language, ] if self.subfixes and len(self.subfixes) >= 2: for subfix in self.subfixes: subfile = "{}.srt".format(name + subfix) cmd += ["-i", subfile] else: subfile = "{}.srt".format(name) cmd += ["-i", subfile] arguments += ["-y", tempfile] cmd += arguments returncode, stdout, stderr = run_program(cmd) if returncode != 0: return if self.config.get("merge_subtitle") and not self.config.get("subtitle"): logging.info("Muxing done, removing the old files.") if self.subfixes and len(self.subfixes) >= 2: for subfix in self.subfixes: subfile = "{}.srt".format(name + subfix) os.remove(subfile) else: os.remove(subfile) else: logging.info("Muxing done, removing the old file.") os.remove(orig_filename) os.rename(tempfile, new_name) def merge(self): if self.detect is None: logging.error("Cant detect ffmpeg or avconv. Cant mux files without it.") return if self.stream.finished is False: return orig_filename = formatname(self.stream.output, self.config, self.stream.output_extention) name, ext = os.path.splitext(orig_filename) if ext == ".ts": audio_filename = "{}.audio.ts".format(name) else: audio_filename = "{}.m4a".format(name) cmd = [self.detect] if self.config.get("only_video") or not self.config.get("only_audio"): cmd += ["-i", orig_filename] if self.config.get("only_audio") or not self.config.get("only_video"): cmd += ["-i", audio_filename] _, stdout, stderr = run_program(cmd, False) # return 1 is good here. streams = _streams(stderr) videotrack, audiotrack = _checktracks(streams) if self.config.get("merge_subtitle"): logging.info("Merge audio, video and subtitle into {}".format(orig_filename)) else: logging.info("Merge audio and video into {}".format(orig_filename)) tempfile = "{}.temp".format(orig_filename) arguments = ["-c:v", "copy", "-c:a", "copy", "-f", "mp4"] if ext == ".ts": if audiotrack and "aac" in _getcodec(streams, audiotrack): arguments += ["-bsf:a", "aac_adtstoasc"] cmd = [self.detect] if self.config.get("only_video") or not self.config.get("only_audio"): cmd += ["-i", orig_filename] if self.config.get("only_audio") or not self.config.get("only_video"): cmd += ["-i", audio_filename] if videotrack: arguments += ["-map", "{}".format(videotrack)] if audiotrack: arguments += ["-map", "{}".format(audiotrack)] if self.config.get("merge_subtitle"): langs = _sublanguage(self.stream, self.config, self.subfixes) tracks = [x for x in [videotrack, audiotrack] if x] for stream_num, language in enumerate(langs, start=len(tracks)): arguments += [ "-map", str(stream_num), "-c:s:" + str(stream_num - 2), "mov_text", "-metadata:s:s:" + str(stream_num - 2), "language=" + language, ] if self.subfixes and len(self.subfixes) >= 2: for subfix in self.subfixes: subfile = "{}.srt".format(name + subfix) cmd += ["-i", subfile] else: subfile = "{}.srt".format(name) cmd += ["-i", subfile] arguments += ["-y", tempfile] cmd += arguments returncode, stdout, stderr = run_program(cmd) if returncode != 0: return logging.info("Merging done, removing old files.") if self.config.get("only_video") or not self.config.get("only_audio"): os.remove(orig_filename) if self.config.get("only_audio") or not self.config.get("only_video"): os.remove(audio_filename) if self.config.get("merge_subtitle") and not self.config.get("subtitle"): if self.subfixes and len(self.subfixes) >= 2: for subfix in self.subfixes: subfile = "{}.srt".format(name + subfix) os.remove(subfile) else: os.remove(subfile) os.rename(tempfile, orig_filename) def _streams(output): return re.findall(r"Stream \#(\d:\d)([\[\(][^\[]+[\]\)])?([\(\)\w]+)?: (Video|Audio): (.*)", output) def _getcodec(streams, number): for stream in streams: if stream[0] == number: return stream[4] return None def _checktracks(streams): videotrack = None audiotrack = None for stream in streams: if stream[3] == "Video": videotrack = stream[0] if stream[3] == "Audio": if stream[4] == "mp3, 0 channels": continue audiotrack = stream[0] return videotrack, audiotrack def _sublanguage(stream, config, subfixes): # parse() function partly borrowed from a guy on github. /thanks! # https://github.com/riobard/srt.py/blob/master/srt.py def parse(self): def parse_block(block): lines = block.strip("-").split("\n") txt = "\r\n".join(lines[2:]) return txt if platform.system() == "Windows": fd = open(self, encoding="utf8") else: fd = open(self) return list(map(parse_block, fd.read().strip().replace("\r", "").split("\n\n"))) def query(self): _ = parse(self) random_sentences = " ".join(sample(_, len(_) if len(_) < 8 else 8)).replace("\r\n", "") url = "https://whatlanguage.herokuapp.com" payload = {"query": random_sentences} # Note: requests handles json from version 2.4.2 and onwards so i use json.dumps for now. headers = {"content-type": "application/json"} try: # Note: reasonable timeout i guess? svtplay-dl is mainly used while multitasking i presume, # and it is heroku after all (fast enough) r = post(url, data=dumps(payload), headers=headers, timeout=30) if r.status_code == codes.ok: try: response = r.json() return response["language"] except TypeError: return "und" else: logging.error("Server error appeared. Setting language as undetermined.") return "und" except Timeout: logging.error("30 seconds server timeout reached. Setting language as undetermined.") return "und" langs = [] exceptions = {"lulesamiska": "smj", "meankieli": "fit", "jiddisch": "yid"} if subfixes and len(subfixes) >= 2: logging.info("Determining the languages of the subtitles.") else: logging.info("Determining the language of the subtitle.") if config.get("get_all_subtitles"): for subfix in subfixes: if [exceptions[key] for key in exceptions.keys() if match(key, subfix.strip("-"))]: if "oversattning" in subfix.strip("-"): subfix = subfix.strip("-").split(".")[0] else: subfix = subfix.strip("-") langs += [exceptions[subfix]] continue subfile = "{}.srt".format(os.path.splitext(formatname(stream.output, config, stream.output_extention))[0] + subfix) langs += [query(subfile)] else: subfile = "{}.srt".format(os.path.splitext(formatname(stream.output, config, stream.output_extention))[0]) langs += [query(subfile)] if len(langs) >= 2: logging.info("Language codes: " + ", ".join(langs)) else: logging.info("Language code: " + langs[0]) return langs svtplay-dl-3.0/lib/svtplay_dl/service/000077500000000000000000000000001401224433100200345ustar00rootroot00000000000000svtplay-dl-3.0/lib/svtplay_dl/service/__init__.py000066400000000000000000000165031401224433100221520ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import logging import os import re from urllib.parse import urlparse from svtplay_dl.utils.http import download_thumbnails from svtplay_dl.utils.http import HTTP from svtplay_dl.utils.parser import merge from svtplay_dl.utils.parser import readconfig from svtplay_dl.utils.parser import setup_defaults class Service: supported_domains = [] supported_domains_re = [] def __init__(self, config, _url, http=None): self._url = _url self._urldata = None self._error = False self.subtitle = None self.cookies = {} self.auto_name = None self.output = { "title": None, "season": None, "episode": None, "episodename": None, "id": None, "service": self.__class__.__name__.lower(), "tvshow": None, "title_nice": None, "showdescription": None, "episodedescription": None, "showthumbnailurl": None, "episodethumbnailurl": None, "publishing_datetime": None, } # Config if config.get("configfile") and os.path.isfile(config.get("configfile")): self.config = merge( readconfig(setup_defaults(), config.get("configfile"), service=self.__class__.__name__.lower()).get_variable(), config.get_variable(), ) else: self.config = config if not http: self.http = HTTP(self.config) else: self.http = http logging.debug("service: {}".format(self.__class__.__name__.lower())) @property def url(self): return self._url def get_urldata(self): if self._urldata is None: self._urldata = self.http.request("get", self.url).text return self._urldata @classmethod def handles(cls, url): urlp = urlparse(url) # Apply supported_domains_re regexp to the netloc. This # is meant for 'dynamic' domains, e.g. containing country # information etc. for domain_re in [re.compile(x) for x in cls.supported_domains_re]: if domain_re.match(urlp.netloc): return True if urlp.netloc in cls.supported_domains: return True # For every listed domain, try with www.subdomain as well. if urlp.netloc in ["www." + x for x in cls.supported_domains]: return True return False def get_subtitle(self, options): pass # the options parameter is unused, but is part of the # interface, so we don't want to remove it. Thus, the # pylint ignore. def find_all_episodes(self, options): # pylint: disable-msg=unused-argument logging.warning("--all-episodes not implemented for this service") return [self.url] def opengraph_get(html, prop): """ Extract specified OpenGraph property from html. >>> opengraph_get('>> opengraph_get('>> opengraph_get(']*property="og:' + prop + '" content="([^"]*)"', html) if match is None: match = re.search(']*content="([^"]*)" property="og:' + prop + '"', html) if match is None: return None return match.group(1) class OpenGraphThumbMixin: """ Mix this into the service class to grab thumbnail from OpenGraph properties. """ def get_thumbnail(self, options): url = opengraph_get(self.get_urldata(), "image") if url is None: return download_thumbnails(self.output, options, [(False, url)]) class MetadataThumbMixin: """ Mix this into the service class to grab thumbnail from extracted metadata. """ def get_thumbnail(self, options): urls = [] if self.output["showthumbnailurl"] is not None: urls.append((True, self.output["showthumbnailurl"])) if self.output["episodethumbnailurl"] is not None: urls.append((False, self.output["episodethumbnailurl"])) if urls: download_thumbnails(self.output, options, urls) class Generic(Service): """ Videos embed in sites """ def get(self, sites): data = self.http.request("get", self.url).text return self._match(data, sites) def _match(self, data, sites): match = re.search(r"src=(\"|\')(http://www.svt.se/wd[^\'\"]+)(\"|\')", data) stream = None if match: url = match.group(2) for i in sites: if i.handles(url): url = url.replace("&", "&").replace("&", "&") return url, i(self.config, url) matchlist = [ r"src=\"(https://player.vimeo.com/video/[0-9]+)\" ", r'src="(http://tv.aftonbladet[^"]*)"', r'a href="(http://tv.aftonbladet[^"]*)" class="abVi', r"iframe src='(http://www.svtplay[^']*)'", 'src="(http://mm-resource-service.herokuapp.com[^"]*)"', r'src="([^.]+\.solidtango.com[^"+]+)"', 's.src="(https://csp-ssl.picsearch.com[^"]+|http://csp.picsearch.com/rest[^"]+)', ] for i in matchlist: match = re.search(i, data) if match: url = match.group(1) for n in sites: if n.handles(match.group(1)): return match.group(1), n(self.config, url) match = re.search(r"tv4play.se/iframe/video/(\d+)?", data) if match: url = "http://www.tv4play.se/?video_id=%s" % match.group(1) for i in sites: if i.handles(url): return url, i(self.config, url) match = re.search("(lemonwhale|lwcdn.com)", data) if match: url = "http://lemonwhale.com" for i in sites: if i.handles(url): return self.url, i(self.config, self.url) match = re.search("(picsearch_ajax_auth|screen9-ajax-auth)", data) if match: url = "http://csp.picsearch.com" for i in sites: if i.handles(url): return self.url, i(self.config, self.url) match = re.search('iframe src="(//csp.screen9.com[^"]+)"', data) if match: url = "http:%s" % match.group(1) for i in sites: if i.handles(url): return self.url, i(self.config, self.url) match = re.search('source src="([^"]+)" type="application/x-mpegURL"', data) if match: for i in sites: if i.__name__ == "Raw": return self.url, i(self.config, match.group(1)) return self.url, stream def service_handler(sites, options, url): handler = None for i in sites: if i.handles(url): handler = i(options, url) break return handler svtplay-dl-3.0/lib/svtplay_dl/service/aftonbladet.py000066400000000000000000000043231401224433100226730ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import json import re from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import Service from svtplay_dl.utils.text import decode_html_entities class Aftonbladettv(Service): supported_domains = ["svd.se"] def get(self): data = self.get_urldata() match = re.search('data-player-config="([^"]+)"', data) if not match: match = re.search('data-svpPlayer-video="([^"]+)"', data) if not match: yield ServiceError("Can't find video info") return data = json.loads(decode_html_entities(match.group(1))) streams = hlsparse(self.config, self.http.request("get", data["streamUrls"]["hls"]), data["streamUrls"]["hls"], output=self.output) for n in list(streams.keys()): yield streams[n] class Aftonbladet(Service): supported_domains = ["aftonbladet.se", "tv.aftonbladet.se"] def get(self): data = self.get_urldata() match = re.search("window.FLUX_STATE = ({.*})", data) if not match: yield ServiceError("Can't find video info") return try: janson = json.loads(match.group(1)) except json.decoder.JSONDecodeError: yield ServiceError("Can't decode api request: {}".format(match.group(1))) return videos = self._get_video(janson) yield from videos def _get_video(self, janson): collections = janson["collections"] for n in list(collections.keys()): contents = collections[n]["contents"]["items"] for i in list(contents.keys()): if "type" in contents[i] and contents[i]["type"] == "video": streams = hlsparse( self.config, self.http.request("get", contents[i]["videoAsset"]["streamUrls"]["hls"]), contents[i]["videoAsset"]["streamUrls"]["hls"], output=self.output, ) for key in list(streams.keys()): yield streams[key] svtplay-dl-3.0/lib/svtplay_dl/service/atg.py000066400000000000000000000026561401224433100211720ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import json from datetime import datetime from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import Service class Atg(Service): supported_domains = ["atgplay.se"] def get(self): parse = urlparse(self.url) if not parse.path.startswith("/video"): yield ServiceError("Can't find video info") return wanted_id = parse.path[7:] current_time = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds() * 1000) api_url = "https://www.atgplay.se/api/{}/video/{}".format(current_time, wanted_id) video_assets = self.http.request("get", api_url) try: janson = json.loads(video_assets.text) except json.decoder.JSONDecodeError: yield ServiceError("Can't decode api request: {}".format(video_assets.text)) return if "title" in janson: self.output["title"] = janson["title"] if "urls" in janson: for i in janson["urls"]: if "m3u" == i: stream = hlsparse(self.config, self.http.request("get", janson["urls"]["m3u"]), janson["urls"]["m3u"], output=self.output) for key in list(stream.keys()): yield stream[key] svtplay-dl-3.0/lib/svtplay_dl/service/barnkanalen.py000066400000000000000000000161111401224433100226620ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import hashlib import json import logging import re from urllib.parse import parse_qs from urllib.parse import urljoin from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.service.svtplay import Svtplay from svtplay_dl.utils.text import filenamify class Barnkanalen(Svtplay): supported_domains = ["svt.se"] supported_path = "/barnkanalen" @classmethod def handles(cls, url): urlp = urlparse(url) correctpath = urlp.path.startswith(cls.supported_path) if urlp.netloc in cls.supported_domains and correctpath: return True # For every listed domain, try with www. subdomain as well. if urlp.netloc in ["www." + x for x in cls.supported_domains] and correctpath: return True return False def get(self): parse = urlparse(self.url) query = parse_qs(parse.query) self.access = None if "accessService" in query: self.access = query["accessService"] match = re.search("__barnplay'] = ({.*});", self.get_urldata()) if not match: yield ServiceError("Can't find video info.") return janson = json.loads(match.group(1))["context"]["dispatcher"]["stores"]["ApplicationStateStore"]["data"] if "episodeModel" not in janson["categoryStateCache"]["karaktarer"]: yield ServiceError("No videos found") return janson["video"] = janson["categoryStateCache"]["karaktarer"]["episodeModel"] if "title" not in janson["video"]: yield ServiceError("Can't find any video on that page.") return if "live" in janson["video"]: self.config.set("live", janson["video"]["live"]) self.outputfilename(janson["video"]) self.extrametadata(janson) if "programVersionId" in janson["video"]: vid = janson["video"]["programVersionId"] else: vid = janson["video"]["id"] res = self.http.get("http://api.svt.se/videoplayer-api/video/{}".format(vid)) try: janson = res.json() except json.decoder.JSONDecodeError: yield ServiceError("Can't decode api request: {}".format(res.request.url)) return videos = self._get_video(janson) yield from videos def find_all_episodes(self, config): videos = [] match = re.search("__barnplay'] = ({.*});", self.get_urldata()) if not match: logging.error("Couldn't retrieve episode list.") return else: dataj = json.loads(match.group(1)) dataj = dataj["context"]["dispatcher"]["stores"]["EpisodesStore"] showId = list(dataj["data"].keys())[0] items = dataj["data"][showId]["episodes"] for i in items: program = i videos = self.videos_to_list(program, videos) videos.reverse() episodes = [urljoin("http://www.svt.se", x) for x in videos] if config.get("all_last") > 0: return episodes[-config.get("all_last") :] return episodes def videos_to_list(self, lvideos, videos): url = self.url + "/" + str(lvideos["id"]) parse = urlparse(url) if parse.path not in videos: videos.append(parse.path) return videos def outputfilename(self, data): name = None desc = None if "programTitle" in data and data["programTitle"]: name = filenamify(data["programTitle"]) elif "titleSlug" in data and data["titleSlug"]: name = filenamify(data["titleSlug"]) other = data["title"] if "programVersionId" in data: vid = str(data["programVersionId"]) else: vid = str(data["id"]) id = hashlib.sha256(vid.encode("utf-8")).hexdigest()[:7] if name == other: other = None elif name is None: name = other other = None season, episode = self.seasoninfo(data) if "accessService" in data: if data["accessService"] == "audioDescription": desc = "syntolkat" if data["accessService"] == "signInterpretation": desc = "teckentolkat" if not other: other = desc elif desc: other += "-{}".format(desc) self.output["title"] = name self.output["id"] = id self.output["season"] = season self.output["episode"] = episode self.output["episodename"] = other def seasoninfo(self, data): season, episode = None, None if "season" in data and data["season"]: season = "{:02d}".format(data["season"]) if int(season) == 0: season = None if "episodeNumber" in data and data["episodeNumber"]: episode = "{:02d}".format(data["episodeNumber"]) if int(episode) == 0: episode = None if episode is not None and season is None: # Missing season, happens for some barnkanalen shows assume first and only season = "01" return season, episode def extrametadata(self, data): self.output["tvshow"] = self.output["season"] is not None and self.output["episode"] is not None try: self.output["publishing_datetime"] = data["video"]["broadcastDate"] / 1000 except KeyError: pass try: title = data["video"]["programTitle"] self.output["title_nice"] = title except KeyError: title = data["video"]["titleSlug"] self.output["title_nice"] = title try: t = data["state"]["titleModel"]["thumbnail"] except KeyError: t = "" if isinstance(t, dict): url = "https://www.svtstatic.se/image/original/default/{id}/{changed}?format=auto&quality=100".format(**t) self.output["showthumbnailurl"] = url elif t: # Get the image if size/format is not specified in the URL set it to large url = t.format(format="large") self.output["showthumbnailurl"] = url try: t = data["video"]["thumbnailXL"] except KeyError: try: t = data["video"]["thumbnail"] except KeyError: t = "" if isinstance(t, dict): url = "https://www.svtstatic.se/image/original/default/{id}/{changed}?format=auto&quality=100".format(**t) self.output["episodethumbnailurl"] = url elif t: # Get the image if size/format is not specified in the URL set it to large url = t.format(format="large") self.output["episodethumbnailurl"] = url try: self.output["showdescription"] = data["state"]["titleModel"]["description"] except KeyError: pass try: self.output["episodedescription"] = data["video"]["description"] except KeyError: pass svtplay-dl-3.0/lib/svtplay_dl/service/bigbrother.py000066400000000000000000000056061401224433100225440ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import copy import json import re from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hds import hdsparse from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.fetcher.http import HTTP from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service class Bigbrother(Service, OpenGraphThumbMixin): supported_domains = ["bigbrother.se"] def get(self): data = self.get_urldata() match = re.search(r'id="(bcPl[^"]+)"', data) if not match: yield ServiceError("Can't find flash id.") return flashid = match.group(1) match = re.search(r'playerID" value="([^"]+)"', self.get_urldata()) if not match: yield ServiceError("Can't find playerID") return playerid = match.group(1) match = re.search(r'playerKey" value="([^"]+)"', self.get_urldata()) if not match: yield ServiceError("Can't find playerKey") return playerkey = match.group(1) match = re.search(r'videoPlayer" value="([^"]+)"', self.get_urldata()) if not match: yield ServiceError("Can't find videoPlayer info") return videoplayer = match.group(1) dataurl = ( "http://c.brightcove.com/services/viewer/htmlFederated?flashID={}&playerID={}&playerKey={}" "&isVid=true&isUI=true&dynamicStreaming=true&@videoPlayer={}".format(flashid, playerid, playerkey, videoplayer) ) data = self.http.request("get", dataurl).content match = re.search(r"experienceJSON = ({.*});", data) if not match: yield ServiceError("Can't find json data") return jsondata = json.loads(match.group(1)) renditions = jsondata["data"]["programmedContent"]["videoPlayer"]["mediaDTO"]["renditions"] if jsondata["data"]["publisherType"] == "PREMIUM": yield ServiceError("Premium content") return for i in renditions: if i["defaultURL"].endswith("f4m"): streams = hdsparse( copy.copy(self.config), self.http.request("get", i["defaultURL"], params={"hdcore": "3.7.0"}), i["defaultURL"], output=self.output, ) for n in list(streams.keys()): yield streams[n] if i["defaultURL"].endswith("m3u8"): streams = hlsparse(self.config, self.http.request("get", i["defaultURL"]), i["defaultURL"], output=self.output) for n in list(streams.keys()): yield streams[n] if i["defaultURL"].endswith("mp4"): yield HTTP(copy.copy(self.config), i["defaultURL"], i["encodingRate"] / 1024, output=self.output) svtplay-dl-3.0/lib/svtplay_dl/service/cmore.py000066400000000000000000000124771401224433100215260ustar00rootroot00000000000000import logging import re from urllib.parse import urljoin from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import Service class Cmore(Service): supported_domains = ["www.cmore.se", "www.cmore.dk", "www.cmore.no", "www.cmore.fi"] def get(self): if not self.config.get("username") or not self.config.get("password"): yield ServiceError("You need username and password to download things from this site.") return token, message = self._login() if not token: yield ServiceError(message) return vid = self._get_vid() if not vid: yield ServiceError("Can't find video id") return tld = self._gettld() self.output["id"] = vid metaurl = "https://playback-api.b17g.net/asset/{}?service=cmore.{}" "&device=browser&drm=widevine&protocol=dash%2Chls".format( self.output["id"], tld, ) res = self.http.get(metaurl) janson = res.json() self._autoname(janson) if janson["metadata"]["isDrmProtected"]: yield ServiceError("Can't play this because the video got drm.") return url = "https://playback-api.b17g.net/media/{}?service=cmore.{}&device=browser&protocol=hls%2Cdash&drm=widevine".format(self.output["id"], tld) res = self.http.request("get", url, cookies=self.cookies, headers={"authorization": "Bearer {}".format(token)}) if res.status_code > 200: yield ServiceError("Can't play this because the video is geoblocked.") return if res.json()["playbackItem"]["type"] == "hls": streams = hlsparse( self.config, self.http.request("get", res.json()["playbackItem"]["manifestUrl"]), res.json()["playbackItem"]["manifestUrl"], output=self.output, ) for n in list(streams.keys()): yield streams[n] def find_all_episodes(self, config): episodes = [] token, message = self._login() if not token: logging.error(message) return res = self.http.get(self.url) tags = re.findall(' 0: return sorted(episodes[-config.get("all_last") :]) return sorted(episodes) def _gettld(self): if isinstance(self.url, list): parse = urlparse(self.url[0]) else: parse = urlparse(self.url) return re.search(r"\.(\w{2})$", parse.netloc).group(1) def _login(self): tld = self._gettld() if self.config.get("cmoreoperator"): url = "https://tve.cmore.se/country/{}/operator/{}/user/{}/exists?client=cmore-web-prod".format( tld, self.config.get("cmoreoperator"), self.config.get("username"), ) post = { "password": self.config.get("password"), } else: url = "https://account-delta.b17g.services/api?client=cmore-web" post = { "query": "mutation($username: String, $password: String, $site: String) { login(credentials:" "{username: $username, password: $password}, site: $site) { user { ...UserFields } session { token vimondToken } }} " "fragment UserFields on User { acceptedCmoreTerms acceptedPlayTerms countryCode email firstName genericAds " "lastName tv4UserDataComplete userId username yearOfBirth zipCode type}", "variables": {"username": self.config.get("username"), "password": self.config.get("password"), "site": "CMORE_SE"}, } res = self.http.post(url, json=post, cookies=self.cookies) if res.status_code >= 400: return None, "Wrong username or password" janson = res.json() token = janson["data"]["login"]["session"]["vimondToken"] return token, None def operatorlist(self): res = self.http.get("https://tve.cmore.se/country/{}/operator?client=cmore-web-prod".format(self._gettld())) for i in res.json()["data"]["operators"]: print("operator: '{}'".format(i["name"].lower())) def _get_vid(self): res = self.http.get(self.url) match = re.search('data-asset-id="([^"]+)"', res.text) if match: return match.group(1) parse = urlparse(self.url) match = re.search(r"/(\d+)-[\w-]+$", parse.path) if match: return match.group(1) return None def _autoname(self, janson): if "seriesTitle" in janson["metadata"]: self.output["title"] = janson["metadata"]["seriesTitle"] self.output["episodename"] = janson["metadata"]["episodeTitle"] else: self.output["title"] = janson["metadata"]["title"] self.output["season"] = janson["metadata"]["seasonNumber"] self.output["episode"] = janson["metadata"]["episodeNumber"] self.config.set("live", janson["metadata"]["isLive"]) svtplay-dl-3.0/lib/svtplay_dl/service/disney.py000066400000000000000000000077411401224433100217120ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import copy import json import re from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.fetcher.http import HTTP from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service class Disney(Service, OpenGraphThumbMixin): supported_domains = ["disney.se", "video.disney.se", "disneyjunior.disney.se"] def get(self): parse = urlparse(self.url) if parse.hostname == "video.disney.se" or parse.hostname == "disneyjunior.disney.se": data = self.get_urldata() match = re.search(r"Grill.burger=({.*}):", data) if not match: yield ServiceError("Can't find video info") return jsondata = json.loads(match.group(1)) for n in jsondata["stack"]: if len(n["data"]) > 0: for x in n["data"]: if "flavors" in x: for i in x["flavors"]: if i["format"] == "mp4": res = self.http.get(i["url"]) match = re.search('button primary" href="([^"]+)"', res.text) if match: yield HTTP(copy.copy(self.config), match.group(1), i["bitrate"], output=self.output) else: data = self.get_urldata() match = re.search(r"uniqueId : '([^']+)'", data) if not match: yield ServiceError("Can't find video info") return uniq = match.group(1) match = re.search("entryId : '([^']+)'", self.get_urldata()) entryid = match.group(1) match = re.search("partnerId : '([^']+)'", self.get_urldata()) partnerid = match.group(1) match = re.search("uiConfId : '([^']+)'", self.get_urldata()) uiconfid = match.group(1) match = re.search("json : ({.*}}),", self.get_urldata()) jsondata = json.loads(match.group(1)) parse = urlparse(self.url) if len(parse.fragment) > 0: entry = parse.fragment[parse.fragment.rindex("/") + 1 :] if entry in jsondata["idlist"]: entryid = jsondata["idlist"][entry] else: yield ServiceError("Cant find video info") return for i in jsondata["playlists"][0]["playlist"]: if entryid in i["id"]: title = i["longId"] break self.output["title"] = title url = ( "http://cdnapi.kaltura.com/html5/html5lib/v1.9.7.6/mwEmbedFrame.php?&wid={}&uiconf_id={}&entry_id={}" "&playerId={}&forceMobileHTML5=true&urid=1.9.7.6&callback=mwi".format(partnerid, uiconfid, entryid, uniq) ) data = self.http.request("get", url).text match = re.search(r"mwi\(({.*})\);", data) jsondata = json.loads(match.group(1)) data = jsondata["content"] match = re.search(r"window.kalturaIframePackageData = ({.*});", data) jsondata = json.loads(match.group(1)) ks = jsondata["enviornmentConfig"]["ks"] name = jsondata["entryResult"]["meta"]["name"] self.output["title"] = name url = ( "http://cdnapi.kaltura.com/p/{}/sp/{}00/playManifest/entryId/{}/format/applehttp/protocol/http/a.m3u8" "?ks={}&referrer=aHR0cDovL3d3dy5kaXNuZXkuc2U=&".format(partnerid[1:], partnerid[1:], entryid, ks) ) redirect = self.http.check_redirect(url) streams = hlsparse(self.config, self.http.request("get", redirect), redirect, output=self.output) for n in list(streams.keys()): yield streams[n] svtplay-dl-3.0/lib/svtplay_dl/service/dplay.py000066400000000000000000000231521401224433100215220ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import datetime import hashlib import logging import random import re from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import Service from svtplay_dl.subtitle import subtitle country = {"sv": ".se", "da": ".dk", "no": ".no"} REALMS = {"discoveryplus.se": "dplayse", "discoveryplus.no": "dplayno", "discoveryplus.dk": "dplaydk"} class Dplay(Service): supported_domains = ["discoveryplus.se", "discoveryplus.no", "discoveryplus.dk"] packages = [] def get(self): parse = urlparse(self.url) self.domain = re.search(r"(discoveryplus\.\w\w)", parse.netloc).group(1) if not self._token(): logging.error("Something went wrong getting token for requests") if not self._login(): yield ServiceError("You need the 'st' cookie from your web brower for the site to make it work") return channel = False if "kanaler" in parse.path: match = re.search("kanaler/([^/]+)$", parse.path) if not match: yield ServiceError("Can't detect 'kanaler'") return path = "/channels/{}".format(match.group(1)) url = "https://disco-api.{}/content{}".format(self.domain, path) channel = True self.config.set("live", True) elif "program" in parse.path: match = re.search("(programmer|program)/([^/]+)$", parse.path) if not match: yield ServiceError("Can't find program url") return path = "/shows/{}".format(match.group(2)) url = "https://disco-api.{}/content{}".format(self.domain, path) res = self.http.get(url, headers={"x-disco-client": "WEB:UNKNOWN:dplay-client:0.0.1"}) programid = res.json()["data"]["id"] qyerystring = ( "include=primaryChannel,show&filter[videoType]=EPISODE&filter[show.id]={}&" "page[size]=100&sort=seasonNumber,episodeNumber,-earliestPlayableStart".format(programid) ) res = self.http.get("https://disco-api.{}/content/videos?{}".format(self.domain, qyerystring)) janson = res.json() vid = 0 slug = None for i in janson["data"]: if int(i["id"]) > vid: vid = int(i["id"]) slug = i["attributes"]["path"] if slug: url = "https://disco-api.{}/content/videos/{}".format(self.domain, slug) else: yield ServiceError("Cant find latest video on program url") return else: match = re.search("(videos|videoer)/(.*)$", parse.path) url = "https://disco-api.{}/content/videos/{}".format(self.domain, match.group(2)) res = self.http.get(url, headers={"x-disco-client": "WEB:UNKNOWN:dplay-client:0.0.1"}) janson = res.json() if "errors" in janson: yield ServiceError("Cant find any videos on this url") return if channel: name = janson["data"]["attributes"]["name"] self.output["title"] = name else: name = self._autoname(janson) if name is None: yield ServiceError("Cant find vid id for autonaming") return self.output["id"] = janson["data"]["id"] api = "https://disco-api.{}/playback/videoPlaybackInfo/{}?usePreAuth=true".format(self.domain, janson["data"]["id"]) res = self.http.get(api) if res.status_code > 400: yield ServiceError("You dont have permission to watch this") return streams = hlsparse( self.config, self.http.request("get", res.json()["data"]["attributes"]["streaming"]["hls"]["url"]), res.json()["data"]["attributes"]["streaming"]["hls"]["url"], httpobject=self.http, output=self.output, ) for n in list(streams.keys()): if isinstance(streams[n], subtitle): # we get the subtitles from the hls playlist. if self.config.get("get_all_subtitles"): yield streams[n] else: if streams[n].subfix in country and country[streams[n].subfix] in self.domain: yield streams[n] else: yield streams[n] def _autoname(self, jsondata): match = re.search("^([^/]+)/", jsondata["data"]["attributes"]["path"]) self.output["title"] = match.group(1) self.output["season"] = int(jsondata["data"]["attributes"]["seasonNumber"]) self.output["episode"] = int(jsondata["data"]["attributes"]["episodeNumber"]) self.output["episodename"] = jsondata["data"]["attributes"]["name"] return self.output["title"] def find_all_episodes(self, config): parse = urlparse(self.url) self.domain = re.search(r"(discoveryplus\.\w\w)", parse.netloc).group(1) programid = None seasons = [] episodes = [] match = re.search("^/(program|programmer|videos|videoer)/([^/]+)", parse.path) if not match: logging.error("Can't find show name") return None if not self._login(): logging.error("Need the 'st' cookie to work") return None if not self._token(): logging.error("Something went wrong getting token for requests") self._getpackages() urllocal = "" if self.domain in ["dplay.dk", "dplay.no"]: urllocal = "mer" url = "http://disco-api.{}/cms/routes/program{}/{}?decorators=viewingHistory&include=default".format(self.domain, urllocal, match.group(2)) res = self.http.get(url) if res.status_code > 400: logging.error("Cant find any videos. wrong url?") return episodes showid = None for what in res.json()["included"]: if "attributes" in what and "alias" in what["attributes"] and "season" in what["attributes"]["alias"]: programid = what["id"] for ses in what["attributes"]["component"]["filters"]: if ses["id"] == "seasonNumber": for opt in ses["options"]: seasons.append(opt["value"]) if "mandatoryParams" in what["attributes"]["component"]: showid = what["attributes"]["component"]["mandatoryParams"] if programid: for season in seasons: page = 1 totalpages = 1 while page <= totalpages: querystring = "decorators=viewingHistory&include=default&page[items.number]={}&pf[seasonNumber]={}".format( page, season, ) if showid: querystring += "&{}".format(showid) res = self.http.get("https://disco-api.{}/cms/collections/{}?{}".format(self.domain, programid, querystring)) janson = res.json() totalpages = janson["data"]["meta"]["itemsTotalPages"] for i in janson["included"]: if i["type"] != "video": continue if i["attributes"]["videoType"] == "EPISODE": if not self._playablefile(i["attributes"]["availabilityWindows"]): continue episodes.append("https://www.{}/videos/{}".format(self.domain, i["attributes"]["path"])) page += 1 if not episodes: logging.error("Cant find any playable files") if config.get("all_last") > 0: return episodes[: config.get("all_last")] return episodes def _login(self): res = self.http.get("https://disco-api.{}/users/me".format(self.domain), headers={"authority": "disco-api.{}".format(self.domain)}) if res.status_code >= 400: return False if not res.json()["data"]["attributes"]["anonymous"]: return True return False def _token(self) -> bool: # random device id for cookietoken deviceid = hashlib.sha256(bytes(int(random.random() * 1000))).hexdigest() url = "https://disco-api.{}/token?realm={}&deviceId={}&shortlived=true".format(self.domain, REALMS[self.domain], deviceid) res = self.http.get(url) if res.status_code >= 400: return False return True def _getpackages(self): res = self.http.get("https://disco-api.{}/users/me".format(self.domain), headers={"authority": "disco-api.{}".format(self.domain)}) if res.status_code < 400: self.packages.extend(res.json()["data"]["attributes"]["packages"]) def _playablefile(self, needs): playable = False now = datetime.datetime.utcnow() for package in self.packages: for need in needs: if package != need["package"]: continue start = datetime.datetime.strptime(need["playableStart"], "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=None) if now > start: if "playableEnd" in need: end = datetime.datetime.strptime(need["playableEnd"], "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=None) if now < end: playable = True else: playable = True return playable svtplay-dl-3.0/lib/svtplay_dl/service/dr.py000066400000000000000000000060571401224433100210230ustar00rootroot00000000000000import copy import json import logging import re import uuid from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service from svtplay_dl.subtitle import subtitle class Dr(Service, OpenGraphThumbMixin): supported_domains = ["dr.dk"] def get(self): data = self.get_urldata() match = re.search("__data = ([^<]+)", data) if not match: yield ServiceError("Cant find info for this video") return janson = json.loads(match.group(1)) page = janson["cache"]["page"][list(janson["cache"]["page"].keys())[0]] offers = page["entries"][0]["item"]["offers"] resolution = None vid = None for i in offers: if i["deliveryType"] == "Stream": vid = i["scopes"][0] resolution = i["resolution"] deviceid = uuid.uuid1() res = self.http.request( "post", "https://isl.dr-massive.com/api/authorization/anonymous-sso?device=web_browser&ff=idp%2Cldp&lang=da", json={"deviceId": str(deviceid), "scopes": ["Catalog"], "optout": True}, ) token = res.json()[0]["value"] url = "https://isl.dr-massive.com/api/account/items/{}/videos?delivery=stream&device=web_browser&ff=idp%2Cldp&lang=da&resolution={}&sub=Anonymous".format( vid, resolution, ) res = self.http.request("get", url, headers={"authorization": "Bearer {}".format(token)}) for video in res.json(): if video["accessService"] == "StandardVideo": if video["format"] == "video/hls": res = self.http.request("get", video["url"]) if res.status_code > 400: yield ServiceError("Can't play this because the video is geoblocked or not available.") else: streams = hlsparse(self.config, res, video["url"], output=self.output) for n in list(streams.keys()): yield streams[n] yield subtitle(copy.copy(self.config), "wrst", video["subtitles"][0]["link"], output=self.output) def find_all_episodes(self, config): episodes = [] data = self.get_urldata() match = re.search("__data = ([^<]+)", data) if not match: logging.error("Can't find video info.") return episodes janson = json.loads(match.group(1)) page = janson["cache"]["page"][list(janson["cache"]["page"].keys())[0]] item = page["entries"][0]["item"] if "season" in item: entries = item["season"]["episodes"]["items"] for i in entries: episodes.append("https://www.dr.dk/drtv{}".format(i["watchPath"])) if config.get("all_last") != -1: episodes = episodes[: config.get("all_last")] else: episodes.reverse() return episodes svtplay-dl-3.0/lib/svtplay_dl/service/efn.py000066400000000000000000000012031401224433100211520ustar00rootroot00000000000000import re from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service class Efn(Service, OpenGraphThumbMixin): supported_domains_re = ["www.efn.se"] def get(self): match = re.search('data-hls="([^"]+)"', self.get_urldata()) if not match: yield ServiceError("Cant find video info") return streams = hlsparse(self.config, self.http.request("get", match.group(1)), match.group(1), output=self.output) for n in list(streams.keys()): yield streams[n] svtplay-dl-3.0/lib/svtplay_dl/service/eurosport.py000066400000000000000000000121061401224433100224500ustar00rootroot00000000000000import json import re from urllib.parse import quote from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import Service class Eurosport(Service): supported_domains_re = [r"^([^.]+\.)*eurosportplayer.com"] def get(self): parse = urlparse(self.url) match = re.search("window.server_path = ({.*});", self.get_urldata()) if not match: yield ServiceError("Cant find api key") return janson = json.loads(match.group(1)) clientapikey = janson["sdk"]["clientApiKey"] devices = "https://eu.edge.bamgrid.com/devices" postdata = {"deviceFamily": "browser", "applicationRuntime": "firefox", "deviceProfile": "macosx", "attributes": {}} header = {"authorization": "Bearer {}".format(clientapikey)} res = self.http.post(devices, headers=header, json=postdata) assertion = res.json()["assertion"] token = "https://eu.edge.bamgrid.com/token" data = { "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange", "latitude": 0, "longitude": 0, "platform": "browser", "subject_token": assertion, "subject_token_type": "urn:bamtech:params:oauth:token-type:device", } res = self.http.post(token, headers=header, data=data) access_token = res.json()["access_token"] login = "https://eu.edge.bamgrid.com/idp/login" header = {"authorization": "Bearer {}".format(access_token)} res = self.http.post(login, headers=header, json={"email": self.config.get("username"), "password": self.config.get("password")}) if res.status_code > 400: yield ServiceError("Wrong username or password") return id_token = res.json()["id_token"] grant = "https://eu.edge.bamgrid.com/accounts/grant" res = self.http.post(grant, headers=header, json={"id_token": id_token}) assertion = res.json()["assertion"] token = "https://eu.edge.bamgrid.com/token" data = { "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange", "latitude": 0, "longitude": 0, "platform": "browser", "subject_token": assertion, "subject_token_type": "urn:bamtech:params:oauth:token-type:account", } header = {"authorization": "Bearer {}".format(clientapikey)} res = self.http.post(token, headers=header, data=data) access_token = res.json()["access_token"] query = {"preferredLanguages": ["en"], "mediaRights": ["GeoMediaRight"], "uiLang": "en", "include_images": True} if parse.path[:11] == "/en/channel": pagetype = "channel" match = re.search("/([^/]+)$", parse.path) if not match: yield ServiceError("Cant find channel") return (vid,) = match.groups() query["pageType"] = pagetype query["channelCallsign"] = vid query["channelCallsigns"] = vid query["onAir"] = True self.config.set("live", True) # lets override to true url = ( "https://search-api.svcs.eurosportplayer.com/svc/search/v2/graphql/persisted/" "query/eurosport/web/Airings/onAir?variables={}".format(quote(json.dumps(query))) ) res = self.http.get(url, headers={"authorization": access_token}) vid2 = res.json()["data"]["Airings"][0]["channel"]["id"] url = "https://global-api.svcs.eurosportplayer.com/channels/{}/scenarios/browser".format(vid2) res = self.http.get(url, headers={"authorization": access_token, "Accept": "application/vnd.media-service+json; version=1"}) hls_url = res.json()["stream"]["slide"] else: pagetype = "event" match = re.search("/([^/]+)/([^/]+)$", parse.path) if not match: yield ServiceError("Cant fint event id") return query["title"], query["contentId"] = match.groups() query["pageType"] = pagetype url = "https://search-api.svcs.eurosportplayer.com/svc/search/v2/graphql/" "persisted/query/eurosport/Airings?variables={}".format( quote(json.dumps(query)), ) res = self.http.get(url, headers={"authorization": access_token}) programid = res.json()["data"]["Airings"][0]["programId"] mediaid = res.json()["data"]["Airings"][0]["mediaId"] url = "https://global-api.svcs.eurosportplayer.com/programs/{}/media/{}/scenarios/browser".format(programid, mediaid) res = self.http.get(url, headers={"authorization": access_token, "Accept": "application/vnd.media-service+json; version=1"}) hls_url = res.json()["stream"]["complete"] streams = hlsparse(self.config, self.http.request("get", hls_url), hls_url, authorization=access_token, output=self.output) for n in list(streams.keys()): yield streams[n] svtplay-dl-3.0/lib/svtplay_dl/service/expressen.py000066400000000000000000000016131401224433100224230ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import json import re from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import Service from svtplay_dl.utils.text import decode_html_entities class Expressen(Service): supported_domains = ["expressen.se"] def get(self): data = self.get_urldata() match = re.search('data-article-data="([^"]+)"', data) if not match: yield ServiceError("Cant find video file info") return data = decode_html_entities(match.group(1)) janson = json.loads(data) self.config.set("live", janson["isLive"]) streams = hlsparse(self.config, self.http.request("get", janson["stream"]), janson["stream"], output=self.output) for n in list(streams.keys()): yield streams[n] svtplay-dl-3.0/lib/svtplay_dl/service/facebook.py000066400000000000000000000026241401224433100221630ustar00rootroot00000000000000import copy import json import re from urllib.parse import unquote_plus from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.http import HTTP from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service class Facebook(Service, OpenGraphThumbMixin): supported_domains_re = ["www.facebook.com"] def get(self): data = self.get_urldata() match = re.search('params","([^"]+)"', data) if not match: yield ServiceError("Cant find params info. video need to be public.") return data2 = json.loads('["{}"]'.format(match.group(1))) data2 = json.loads(unquote_plus(data2[0])) if "sd_src_no_ratelimit" in data2["video_data"]["progressive"][0]: yield HTTP(copy.copy(self.config), data2["video_data"]["progressive"][0]["sd_src_no_ratelimit"], "240", output=self.output) else: yield HTTP(copy.copy(self.config), data2["video_data"]["progressive"][0]["sd_src"], "240") if "hd_src_no_ratelimit" in data2["video_data"]["progressive"][0]: yield HTTP(copy.copy(self.config), data2["video_data"]["progressive"][0]["hd_src_no_ratelimit"], "720", output=self.output) else: if data2["video_data"]["progressive"][0]["hd_src"]: yield HTTP(copy.copy(self.config), data2["video_data"]["progressive"][0]["hd_src"], "720", output=self.output) svtplay-dl-3.0/lib/svtplay_dl/service/filmarkivet.py000066400000000000000000000012211401224433100227170ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import copy import re from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.http import HTTP from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service class Filmarkivet(Service, OpenGraphThumbMixin): supported_domains = ["filmarkivet.se"] def get(self): match = re.search(r'[^/]file: "(http[^"]+)', self.get_urldata()) if not match: yield ServiceError("Can't find the video file") return yield HTTP(copy.copy(self.config), match.group(1), 480, output=self.output) svtplay-dl-3.0/lib/svtplay_dl/service/flowonline.py000066400000000000000000000025001401224433100225570ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import copy import re from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service from svtplay_dl.subtitle import subtitle class Flowonline(Service, OpenGraphThumbMixin): supported_domains_re = [r"^([a-z]{1,4}\.|www\.)?flowonline\.tv$"] def get(self): match = re.search('iframe src="(/embed/[^"]+)"', self.get_urldata()) if not match: yield ServiceError("Cant find video") return parse = urlparse(self.url) url = "{}://{}{}".format(parse.scheme, parse.netloc, match.group(1)) data = self.http.get(url) match = re.search('src="([^"]+vtt)"', data.text) if match: yield subtitle(copy.copy(self.config), "wrst", match.group(1)) match = re.search('source src="([^"]+)" type="application/x-mpegURL"', data.text) if not match: yield ServiceError("Cant find video file") return streams = hlsparse(self.config, self.http.request("get", match.group(1)), match.group(1), output=self.output) for n in list(streams.keys()): yield streams[n] svtplay-dl-3.0/lib/svtplay_dl/service/koket.py000066400000000000000000000060721401224433100215300ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service def findCourse(data, courseSlug): for c in data["content"]["coursePages"]: if c["slug"] == courseSlug: return c return None def findLesson(course, lessonSlug): for l in course["lessons"]: if l["slug"] == lessonSlug: return l return None class Koket(Service, OpenGraphThumbMixin): supported_domains = ["koket.se"] supported_path = "/kurser" def __init__(self, config, _url, http=None): Service.__init__(self, config, _url, http) self._data = None def get(self): urlp = urlparse(self.url) slugs = urlp.path.split("/") courseSlug = slugs[2] lessonSlug = slugs[3] login = self._login() if not login: yield ServiceError("Could not login") return data = self._getData() if data is None: yield ServiceError("Could not fetch data") return course = findCourse(data, courseSlug) if course is None: yield ServiceError("Could not find course") return lesson = findLesson(course, lessonSlug) if lesson is None: yield ServiceError("Could not find lesson") return self.output["id"] = lesson["videoAssetId"] self.output["title"] = lesson["title"] url = "https://playback-api.b17g.net/media/{}?service=tv4&device=browser&protocol=hls%2Cdash&drm=widevine".format(self.output["id"]) videoDataRes = self.http.get(url) if videoDataRes.json()["playbackItem"]["type"] == "hls": streams = hlsparse( self.config, self.http.get(videoDataRes.json()["playbackItem"]["manifestUrl"]), videoDataRes.json()["playbackItem"]["manifestUrl"], output=self.output, ) for n in list(streams.keys()): yield streams[n] def _login(self): if self._getAuthToken() is None: username = self.config.get("username") password = self.config.get("password") if (not username) or (not password): return False url = "https://www.koket.se/account/login" login = {"username": username, "password": password} self.http.get(url) self.http.post(url, data=login) if self._getAuthToken() is None: return False return True def _getAuthToken(self): return self.http.cookies.get("authToken") def _getData(self): auth_token = self._getAuthToken() if auth_token is None: return None if self._data is None: self._data = self.http.get("https://www.koket.se/kurser/api/data/{}".format(auth_token)).json() return self._data svtplay-dl-3.0/lib/svtplay_dl/service/lemonwhale.py000066400000000000000000000042201401224433100225370ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import json import re from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import Service from svtplay_dl.utils.text import decode_html_entities class Lemonwhale(Service): # lemonwhale.com is just bogus for generic supported_domains = ["vk.se", "lemonwhale.com"] def get(self): vid = self.get_vid() if not vid: yield ServiceError("Can't find video id") return url = "http://ljsp.lwcdn.com/web/public/item.json?type=video&%s" % decode_html_entities(vid) data = self.http.request("get", url).text jdata = json.loads(data) if "videos" in jdata: streams = self.get_video(jdata) if streams: for n in list(streams.keys()): yield streams[n] url = "http://ljsp.lwcdn.com/web/public/video.json?id={}&delivery=hls".format(decode_html_entities(vid)) data = self.http.request("get", url).text jdata = json.loads(data) if "videos" in jdata: streams = self.get_video(jdata) for n in list(streams.keys()): yield streams[n] def get_vid(self): match = re.search(r'video url-([^"]+)', self.get_urldata()) if match: return match.group(1) match = re.search(r"__INITIAL_STATE__ = ({.*})", self.get_urldata()) if match: janson = json.loads(match.group(1)) vid = janson["content"]["current"]["data"]["templateData"]["pageData"]["video"]["id"] return vid match = re.search(r'embed.jsp\?([^"]+)"', self.get_urldata()) if match: return match.group(1) return None def get_video(self, janson): videos = janson["videos"][0]["media"]["streams"] for i in videos: if i["name"] == "auto": hls = "{}{}".format(janson["videos"][0]["media"]["base"], i["url"]) streams = hlsparse(self.config, self.http.request("get", hls), hls, output=self.output) return streams svtplay-dl-3.0/lib/svtplay_dl/service/mtvnn.py000066400000000000000000000145251401224433100215570ustar00rootroot00000000000000import json import logging import re import xml.etree.ElementTree as ET from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service # This is _very_ similar to mtvservices.. class Mtvnn(Service, OpenGraphThumbMixin): supported_domains = ["nickelodeon.se", "nickelodeon.nl", "nickelodeon.no", "www.comedycentral.se", "nickelodeon.dk"] def get(self): data = self.get_urldata() parse = urlparse(self.url) if parse.netloc.endswith("se"): match = re.search(r'
', data) if not match: yield ServiceError("Can't find video info") return match_id = re.search(r'data-id="([0-9a-fA-F|\-]+)" ', match.group(1)) if not match_id: yield ServiceError("Can't find video info") return wanted_id = match_id.group(1) url_service = ( "http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed?mgid=mgid:arc:episode:nick.intl:{}" "&arcEp=nickelodeon.se&imageEp=nickelodeon.se&stage=staging&accountOverride=intl.mtvi.com&ep=a9cc543c".format(wanted_id) ) service_asset = self.http.request("get", url_service) match_guid = re.search('(.*)', service_asset.text) if not match_guid: yield ServiceError("Can't find video info") return hls_url = ( "https://mediautilssvcs-a.akamaihd.net/services/MediaGenerator/{}?arcStage=staging&accountOverride=intl.mtvi.com&" "billingSection=intl&ep=a9cc543c&acceptMethods=hls".format(match_guid.group(1)) ) hls_asset = self.http.request("get", hls_url) xml = ET.XML(hls_asset.text) if ( xml.find("./video") is not None and xml.find("./video").find("item") is not None and xml.find("./video").find("item").find("rendition") is not None and xml.find("./video").find("item").find("rendition").find("src") is not None ): hls_url = xml.find("./video").find("item").find("rendition").find("src").text stream = hlsparse(self.config, self.http.request("get", hls_url), hls_url, output=self.output) for key in list(stream.keys()): yield stream[key] return match = re.search(r'data-mrss=[\'"](http://gakusei-cluster.mtvnn.com/v2/mrss.xml[^\'"]+)[\'"]', data) if not match: yield ServiceError("Can't find id for the video") return mrssxmlurl = match.group(1) data = self.http.request("get", mrssxmlurl).content xml = ET.XML(data) title = xml.find("channel").find("item").find("title").text self.output["title"] = title match = re.search("gon.viacom_config=([^;]+);", self.get_urldata()) if match: countrycode = json.loads(match.group(1))["country_code"].replace("_", "/") match = re.search("mtvnn.com:([^&]+)", mrssxmlurl) if match: urlpart = match.group(1).replace("-", "/").replace("playlist", "playlists") # it use playlists dunno from where it gets it hlsapi = "http://api.mtvnn.com/v2/{}/{}.json?video_format=m3u8&callback=&".format(countrycode, urlpart) data = self.http.request("get", hlsapi).text dataj = json.loads(data) for i in dataj["local_playlist_videos"]: streams = hlsparse(self.config, self.http.request("get", i["url"]), i["url"], output=self.output) for n in list(streams.keys()): yield streams[n] def find_all_episodes(self, config): match = re.search(r"data-franchise='([^']+)'", self.get_urldata()) if match is None: logging.error("Couldn't program id") return programid = match.group(1) match = re.findall(r"
  • 0: alt = self.http.get(query["alt"][0]) if alt: streams = hlsparse(self.config, self.http.request("get", alt.request.url), alt.request.url, output=self.output) for n in list(streams.keys()): yield streams[n] if i["format"] == "hds" or i["format"] == "flash": match = re.search(r"\/se\/secure\/", i["url"]) if not match: streams = hdsparse(self.config, self.http.request("get", i["url"], params={"hdcore": "3.7.0"}), i["url"], output=self.output) for n in list(streams.keys()): yield streams[n] if "alt" in query and len(query["alt"]) > 0: alt = self.http.get(query["alt"][0]) if alt: streams = hdsparse( self.config, self.http.request("get", alt.request.url, params={"hdcore": "3.7.0"}), alt.request.url, output=self.output, ) for n in list(streams.keys()): yield streams[n] if i["format"] == "dash264" or i["format"] == "dashhbbtv": streams = dashparse(self.config, self.http.request("get", i["url"]), i["url"], output=self.output) for n in list(streams.keys()): yield streams[n] if "alt" in query and len(query["alt"]) > 0: alt = self.http.get(query["alt"][0]) if alt: streams = dashparse(self.config, self.http.request("get", alt.request.url), alt.request.url, output=self.output) for n in list(streams.keys()): yield streams[n] def find_video_id(self): match = re.search('data-video-id="([^"]+)"', self.get_urldata()) if match: return match.group(1) return None def find_all_episodes(self, config): page = 1 data = self.get_urldata() match = re.search(r'"/etikett/titel/([^"/]+)', data) if match is None: match = re.search(r'"http://www.oppetarkiv.se/etikett/titel/([^/]+)/', self.url) if match is None: logging.error("Couldn't find title") return program = match.group(1) episodes = [] n = 0 if self.config.get("all_last") > 0: sort = "tid_fallande" else: sort = "tid_stigande" while True: url = "http://www.oppetarkiv.se/etikett/titel/{}/?sida={}&sort={}&embed=true".format(program, page, sort) data = self.http.request("get", url) if data.status_code == 404: break data = data.text regex = re.compile(r'href="(/video/[^"]+)"') for match in regex.finditer(data): if n == self.config.get("all_last"): break episodes.append("http://www.oppetarkiv.se{}".format(match.group(1))) n += 1 page += 1 return episodes def outputfilename(self, data): id = hashlib.sha256(data["programVersionId"].encode("utf-8")).hexdigest()[:7] self.output["id"] = id datatitle = re.search('data-title="([^"]+)"', self.get_urldata()) if not datatitle: return None datat = decode_html_entities(datatitle.group(1)) self.output["title"] = self.name(datat) self.seasoninfo(datat) def seasoninfo(self, data): match = re.search(r"S.song (\d+) - Avsnitt (\d+)", data) if match: self.output["season"] = int(match.group(1)) self.output["episode"] = int(match.group(2)) else: match = re.search(r"Avsnitt (\d+)", data) if match: self.output["episode"] = int(match.group(1)) def name(self, data): if data.find(" - S.song") > 0: title = data[: data.find(" - S.song")] else: if data.find(" - Avsnitt") > 0: title = data[: data.find(" - Avsnitt")] else: title = data return title svtplay-dl-3.0/lib/svtplay_dl/service/picsearch.py000066400000000000000000000122731401224433100223540ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import copy import json import re from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.fetcher.http import HTTP from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service class Picsearch(Service, OpenGraphThumbMixin): supported_domains = ["dn.se", "mobil.dn.se", "di.se", "csp.picsearch.com", "csp.screen9.com"] def get(self): self.backupapi = None ajax_auth = self.get_auth() if not ajax_auth: yield ServiceError("Cant find token for video") return mediaid = self.get_mediaid() if not mediaid: yield ServiceError("Cant find media id") return if not isinstance(mediaid, str): mediaid = mediaid.group(1) jsondata = self.http.request( "get", "http://csp.screen9.com/player?eventParam=1&" "ajaxauth={}&method=embed&mediaid={}".format(ajax_auth.group(1), mediaid), ).text jsondata = json.loads(jsondata) if "data" in jsondata: if "live" in jsondata["data"]["publishing_status"]: self.config.set("live", jsondata["data"]["publishing_status"]["live"]) playlist = jsondata["data"]["streams"] for i in playlist: if "application/x-mpegurl" in i: streams = hlsparse( self.config, self.http.request("get", i["application/x-mpegurl"]), i["application/x-mpegurl"], output=self.output, ) if streams: for n in list(streams.keys()): yield streams[n] if "video/mp4" in i: yield HTTP(copy.copy(self.config), i["video/mp4"], 800, output=self.output) if self.backupapi: res = self.http.get(self.backupapi.replace("i=", ""), params={"i": "object"}) data = res.text.replace("ps.embedHandler(", "").replace('"");', "") data = data[: data.rfind(",")] jansson = json.loads(data) for i in jansson["media"]["playerconfig"]["playlist"]: if "provider" in i and i["provider"] == "httpstreaming": streams = hlsparse(self.config, self.http.request("get", i["url"]), i["url"], output=self.output) for n in list(streams.keys()): yield streams[n] def get_auth(self): match = re.search(r"picsearch_ajax_auth[ ]*=[ ]*['\"]([^'\"]+)['\"]", self.get_urldata()) if not match: match = re.search(r'screen9-ajax-auth="([^"]+)"', self.get_urldata()) if not match: match = re.search('screen9"[ ]*:[ ]*"([^"]+)"', self.get_urldata()) if not match: match = re.search('data-auth="([^"]+)"', self.get_urldata()) if not match: match = re.search('s.src="(https://csp-ssl.picsearch.com[^"]+|http://csp.picsearch.com/rest[^"]+)', self.get_urldata()) if match: data = self.http.request("get", match.group(1)) self.backupapi = match.group(1) match = re.search(r'ajaxAuth": "([^"]+)"', data.text) if not match: match = re.search('iframe src="(//csp.screen9.com[^"]+)"', self.get_urldata()) if match: url = "http:{}".format(match.group(1)) data = self.http.request("get", url) self.backupapi = url match = re.search(r"picsearch_ajax_auth = '([^']+)'", data.text) if not match: match = re.search(r"screen9_ajax_auth = '([^']+)'", data.text) return match def get_mediaid(self): match = re.search(r"mediaId = '([^']+)';", self.get_urldata()) if not match: match = re.search(r'media-id="([^"]+)"', self.get_urldata()) if not match: match = re.search(r'screen9-mid="([^"]+)"', self.get_urldata()) if not match: match = re.search(r'data-id="([^"]+)"', self.get_urldata()) if not match: match = re.search(r"data-id=([^ ]+) ", self.get_urldata()) if not match: match = re.search(r'data-videoid="([^"]+)"', self.get_urldata()) if not match: match = re.search('s.src="(https://csp-ssl.picsearch.com[^"]+|http://csp.picsearch.com/rest[^"]+)', self.get_urldata()) if match: data = self.http.request("get", match.group(1)) match = re.search(r'mediaid": "([^"]+)"', data.text) if not match: match = re.search('iframe src="(//csp.screen9.com[^"]+)"', self.get_urldata()) if match: url = "http:{}".format(match.group(1)) data = self.http.request("get", url) match = re.search(r"mediaid: '([^']+)'", data.text) if not match: urlp = urlparse(self.url) match = urlp.fragment return match svtplay-dl-3.0/lib/svtplay_dl/service/pokemon.py000066400000000000000000000025021401224433100220550ustar00rootroot00000000000000import re from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service class Pokemon(Service, OpenGraphThumbMixin): supported_domains = ["pokemon.com"] def get(self): data = self.get_urldata() parse = urlparse(self.url) match = re.search(r"^/([a-z]{2})/", parse.path) if not match: yield ServiceError("Cant county code") return res = self.http.get("http://www.pokemon.com/api/pokemontv/channels?region={}".format(match.group(1))) janson = res.json() match = re.search('data-video-season="([0-9]+)"', data) season = match.group(1) match = re.search('data-video-episode="([0-9]+)"', data) episode = match.group(1) for i in janson: for n in i["media"]: if season == n["season"] and episode == n["episode"]: stream = n["stream_url"] self.output["title"] = "pokemon" self.output["season"] = season self.output["episode"] = episode streams = hlsparse(self.config, self.http.request("get", stream), stream, output=self.output) for n in list(streams.keys()): yield streams[n] svtplay-dl-3.0/lib/svtplay_dl/service/radioplay.py000066400000000000000000000013411401224433100223710ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import copy import json import re from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.http import HTTP from svtplay_dl.service import Service class Radioplay(Service): supported_domains = ["radioplay.se"] def get(self): data = self.get_urldata() match = re.search(r"RP.vcdData = ({.*});", data) if match: data = json.loads(match.group(1)) for i in list(data["station"]["streams"].keys()): yield HTTP(copy.copy(self.config), data["station"]["streams"][i], i) else: yield ServiceError("Can't find stream info") return svtplay-dl-3.0/lib/svtplay_dl/service/raw.py000066400000000000000000000020221401224433100211730ustar00rootroot00000000000000import os import re from svtplay_dl.fetcher.dash import dashparse from svtplay_dl.fetcher.hds import hdsparse from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import Service class Raw(Service): def get(self): filename = os.path.basename(self.url[: self.url.rfind("/")]) self.output["title"] = filename streams = [] if re.search(".f4m", self.url): self.output["ext"] = "flv" streams.append(hdsparse(self.config, self.http.request("get", self.url, params={"hdcore": "3.7.0"}), self.url, output=self.output)) if re.search(".m3u8", self.url): streams.append(hlsparse(self.config, self.http.request("get", self.url), self.url, output=self.output)) if re.search(".mpd", self.url): streams.append(dashparse(self.config, self.http.request("get", self.url), self.url, output=self.output)) for stream in streams: if stream: for n in list(stream.keys()): yield stream[n] svtplay-dl-3.0/lib/svtplay_dl/service/riksdagen.py000066400000000000000000000025251401224433100223610ustar00rootroot00000000000000import copy import re from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.fetcher.http import HTTP from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service class Riksdagen(Service, OpenGraphThumbMixin): supported_domains_re = ["riksdagen.se", "www.riksdagen.se"] def get(self): match = re.search("_([a-zA-Z0-9]+)$", self.url) if not match: yield ServiceError("Cant find video id.") return vid = match.group(1) res = self.http.get("http://www.riksdagen.se/api/videostream/get/%s" % vid) data = res.json() try: janson = data["videodata"][0]["streams"]["files"] except TypeError: yield ServiceError("Cant find video.") return for i in janson: if i["mimetype"] == "application/x-mpegurl": data2 = self.http.get(i["url"]).json() streams = hlsparse(self.config, self.http.request("get", data2["url"]), data2["url"], output=self.output) for n in list(streams.keys()): yield streams[n] if i["mimetype"] == "video/mp4": for n in i["bandwidth"]: yield HTTP(copy.copy(self.config), n["url"], n["quality"], output=self.output) svtplay-dl-3.0/lib/svtplay_dl/service/ruv.py000066400000000000000000000033031401224433100212210ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import copy import json import re from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import HLS from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.fetcher.http import HTTP from svtplay_dl.service import Service class Ruv(Service): supported_domains = ["ruv.is"] def get(self): data = self.get_urldata() match = re.search(r'"([^"]+geo.php)"', data) if match: data = self.http.request("get", match.group(1)).content match = re.search(r"punktur=\(([^ ]+)\)", data) if match: janson = json.loads(match.group(1)) self.config.get("live", checklive(janson["result"][1])) streams = hlsparse(self.config, self.http.request("get", janson["result"][1]), janson["result"][1], output=self.output) for n in list(streams.keys()): yield streams[n] else: yield ServiceError("Can't find json info") else: match = re.search(r'(http[^<]+)", data) if match: data = self.http.request("get", match.group(1)).text match = re.search("is_livestream: true", data) if match: self.config.set("live", True) match = re.search("isLivestream: true", data) if match: self.config.set("live", True) match = re.search('html5_source: "([^"]+)"', data) match2 = re.search('hlsURI: "([^"]+)"', data) if match: streams = hlsparse(self.config, self.http.request("get", match.group(1)), match.group(1), output=self.output) for n in list(streams.keys()): yield streams[n] elif match2: streams = hlsparse(self.config, self.http.request("get", match2.group(1)), match2.group(1), output=self.output) for n in list(streams.keys()): yield streams[n] else: parse = urlparse(self.url) url2 = "https://{}/api/v1/play/{}.xml".format(parse.netloc, parse.path[parse.path.rfind("/") + 1 :]) data = self.http.request("get", url2) if data.status_code != 200: yield ServiceError("Can't find video info. if there is a video on the page. its a bug.") return xmldoc = data.text xml = ET.XML(xmldoc) elements = xml.findall(".//manifest") streams = hlsparse(self.config, self.http.request("get", elements[0].text), elements[0].text, output=self.output) for n in list(streams.keys()): yield streams[n] svtplay-dl-3.0/lib/svtplay_dl/service/sportlib.py000066400000000000000000000057201401224433100222500ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import re from urllib.parse import urljoin from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service class Sportlib(Service, OpenGraphThumbMixin): supported_domains = ["sportlib.se"] def get(self): data = self.http.get("https://www.sportlib.se/sportlib/login").text match = re.search('src="(/app[^"]+)">', data) if not match: yield ServiceError("Can't find url for login info") return url = urljoin("https://www.sportlib.se", match.group(1)) data = self.http.get(url).text match = re.search('CLIENT_SECRET:"([^"]+)"', data) if not match: yield ServiceError("Cant fint login info") return cs = match.group(1) match = re.search('CLIENT_ID:"([^"]+)"', data) if not match: yield ServiceError("Cant fint login info") return cid = match.group(1) res = self.http.get("https://core.oz.com/channels?slug=sportlib&org=www.sportlib.se") janson = res.json() sid = janson["data"][0]["id"] data = { "client_id": cid, "client_secret": cs, "grant_type": "password", "username": self.config.get("username"), "password": self.config.get("password"), } res = self.http.post("https://core.oz.com/oauth2/token?channelId={}".format(sid), data=data) if res.status_code > 200: yield ServiceError("Wrong username / password?") return janson = res.json() token_type = janson["token_type"].title() access_token = janson["access_token"] parse = urlparse(self.url) match = re.search("video/([-a-fA-F0-9]+)", parse.path) if not match: yield ServiceError("Cant find video id") return vid = match.group(1) headers = {"content-type": "application/json", "authorization": "{} {}".format(token_type, access_token)} url = "https://core.oz.com/channels/{}/videos/{}?include=collection,streamUrl".format(sid, vid) res = self.http.get(url, headers=headers) janson = res.json() cookiename = janson["data"]["streamUrl"]["cookieName"] token = janson["data"]["streamUrl"]["token"] hlsplaylist = janson["data"]["streamUrl"]["cdnUrl"] self.output["title"] = janson["data"]["title"] # get cookie postjson = {"name": cookiename, "value": token} res = self.http.post("https://playlist.oz.com/cookie", json=postjson) cookies = res.cookies streams = hlsparse(self.config, self.http.request("get", hlsplaylist), hlsplaylist, keycookie=cookies, output=self.output) for n in list(streams.keys()): yield streams[n] svtplay-dl-3.0/lib/svtplay_dl/service/sr.py000066400000000000000000000020451401224433100210330ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import copy import json import re from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.http import HTTP from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service class Sr(Service, OpenGraphThumbMixin): supported_domains = ["sverigesradio.se"] def get(self): data = self.get_urldata() match = re.search(r'data-audio-id="(\d+)"', data) match2 = re.search(r'data-audio-type="(\w+)"', data) if match and match2: aid = match.group(1) type = match2.group(1) else: yield ServiceError("Can't find audio info") return dataurl = "https://sverigesradio.se/sida/playerajax/" "getaudiourl?id={}&type={}&quality=high&format=iis".format(aid, type) data = self.http.request("get", dataurl).text playerinfo = json.loads(data) yield HTTP(copy.copy(self.config), playerinfo["audioUrl"], 128, output=self.output) svtplay-dl-3.0/lib/svtplay_dl/service/svt.py000066400000000000000000000031161401224433100212230ustar00rootroot00000000000000import codecs import copy import json import re from svtplay_dl.error import ServiceError from svtplay_dl.service.svtplay import Svtplay from svtplay_dl.subtitle import subtitle class Svt(Svtplay): supported_domains = ["svt.se", "www.svt.se"] def get(self): data = self.get_urldata() match = re.search("n.reduxState=(.*);", data) if not match: match = re.search(r"stateData = JSON.parse\(\"(.*)\"\)\<\/script", data) if not match: yield ServiceError("Cant find video info.") return janson = json.loads(codecs.escape_decode(match.group(1))[0].decode("utf-8")) if janson["recipe"]["content"]["data"]["videoClips"]: vid = janson["recipe"]["content"]["data"]["videoClips"][0]["id"] else: vid = janson["recipe"]["content"]["data"]["videoEpisodes"][0]["id"] res = self.http.get("https://api.svt.se/videoplayer-api/video/{}".format(vid)) else: janson = json.loads(match.group(1)) vid = janson["areaData"]["articles"][list(janson["areaData"]["articles"].keys())[0]]["media"][0]["image"]["svtId"] res = self.http.get("https://api.svt.se/video/{}".format(vid)) janson = res.json() if "subtitleReferences" in janson: for i in janson["subtitleReferences"]: if i["format"] == "websrt" and "url" in i: yield subtitle(copy.copy(self.config), "wrst", i["url"], output=self.output) videos = self._get_video(janson) yield from videos svtplay-dl-3.0/lib/svtplay_dl/service/svtplay.py000066400000000000000000000330521401224433100221130ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import copy import datetime import hashlib import json import logging import re import time from operator import itemgetter from urllib.parse import parse_qs from urllib.parse import urljoin from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.dash import dashparse from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import MetadataThumbMixin from svtplay_dl.service import Service from svtplay_dl.subtitle import subtitle from svtplay_dl.utils.text import filenamify URL_VIDEO_API = "https://api.svt.se/video/" class Svtplay(Service, MetadataThumbMixin): supported_domains = ["svtplay.se", "svt.se", "beta.svtplay.se", "svtflow.se"] info_search_expr = r"