pax_global_header00006660000000000000000000000064135432032140014507gustar00rootroot0000000000000052 comment=65df9eb15c15171ee5e93a7e8b0d8ace098667b3 svtplay-dl-2.4/000077500000000000000000000000001354320321400134535ustar00rootroot00000000000000svtplay-dl-2.4/.coveragerc000066400000000000000000000001421354320321400155710ustar00rootroot00000000000000[run] branch = true include = lib/svtplay_dl/* omit = lib/svtplay_dl/__version__.py */tests/* svtplay-dl-2.4/.gitattributes000066400000000000000000000000531354320321400163440ustar00rootroot00000000000000lib/svtplay_dl/__version__.py export-subst svtplay-dl-2.4/.github/000077500000000000000000000000001354320321400150135ustar00rootroot00000000000000svtplay-dl-2.4/.github/ISSUE_TEMPLATE.md000066400000000000000000000010441354320321400175170ustar00rootroot00000000000000 ### svtplay-dl versions: Run `svtplay-dl --version` ### Operating system and Python version: Name and version of the operating system and python version (run `python --version`) ### What is the issue: Always include the URL you want to download and all switches you are using. You should also add `--verbose` because it makes it much easier for use to find the issue :) svtplay-dl --verbose https://www.example.com svtplay-dl-2.4/.gitignore000066400000000000000000000055271354320321400154540ustar00rootroot00000000000000cover/ svtplay-dl svtplay-dl.1 svtplay-dl.1.gz github.com/* # Windows thumbnail cache files Thumbs.db ehthumbs.db ehthumbs_vista.db # Dump file *.stackdump # Folder config file [Dd]esktop.ini # Recycle Bin used on file shares $RECYCLE.BIN/ # Windows Installer files *.cab *.msi *.msix *.msm *.msp # Windows shortcuts *.lnk # General .DS_Store .AppleDouble .LSOverride # Thumbnails ._* # Files that might appear in the root of a volume .DocumentRevisions-V100 .fseventsd .Spotlight-V100 .TemporaryItems .Trashes .VolumeIcon.icns .com.apple.timemachine.donotpresent # Directories potentially created on remote AFP share .AppleDB .AppleDesktop Network Trash Folder Temporary Items .apdisk # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ # Swap [._]*.s[a-v][a-z] [._]*.sw[a-p] [._]s[a-v][a-z] [._]sw[a-p] # Session Session.vim # Temporary .netrwhist *~ # Auto-generated tag files tags # Persistent undo [._]*.un~ # User-specific stuff .idea/**/workspace.xml .idea/**/tasks.xml .idea/**/dictionaries .idea/**/shelf # Sensitive or high-churn files .idea/**/dataSources/ .idea/**/dataSources.ids .idea/**/dataSources.local.xml .idea/**/sqlDataSources.xml .idea/**/dynamic.xml .idea/**/uiDesigner.xml .idea/**/dbnavigator.xml # Gradle .idea/**/gradle.xml .idea/**/libraries # Mongo Explorer plugin .idea/**/mongoSettings.xml # File-based project format *.iws # IntelliJ out/ # mpeltonen/sbt-idea plugin .idea_modules/ # JIRA plugin atlassian-ide-plugin.xml # Cursive Clojure plugin .idea/replstate.xml # Crashlytics plugin (for Android Studio and IntelliJ) com_crashlytics_export_strings.xml crashlytics.properties crashlytics-build.properties fabric.properties # Editor-based Rest Client .idea/httpRequests # media *.ts *.mp4 *.srt svtplay-dl-2.4/.pre-commit-config.yaml000066400000000000000000000014121354320321400177320ustar00rootroot00000000000000# See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v2.3.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml - id: check-added-large-files - repo: https://github.com/ambv/black rev: 19.3b0 hooks: - id: black language_version: python3 - repo: https://github.com/pre-commit/pre-commit-hooks rev: v2.3.0 hooks: - id: flake8 - repo: https://github.com/asottile/pyupgrade rev: v1.23.0 hooks: - id: pyupgrade args: [--py3-plus] - repo: https://github.com/asottile/reorder_python_imports rev: v1.6.1 hooks: - id: reorder-python-imports svtplay-dl-2.4/.travis.yml000066400000000000000000000033351354320321400155700ustar00rootroot00000000000000sudo: false language: python matrix: fast_finish: true include: - python: 3.5 env: PRECOMMIT=0 - python: 3.6 - python: 3.7 dist: xenial sudo: true # required workaround for https://github.com/travis-ci/travis-ci/issues/9815 env: CIBUILD=1 cache: pip install: - pip install -r requirements-dev.txt -r requirements.txt awscli script: - python --version - | if [[ -z "$PRECOMMIT" ]] then pre-commit run --all-files --show-diff-on-failure fi - pytest -v --cov - make - | if [[ "$CIBUILD" == "1" && "$TRAVIS_PULL_REQUEST" == "false" ]] then python scripts/cibuild.py fi env: global: - secure: NqhAYBLkalelOFcSNdTaBuqXtQ2r0m9Lt5/V2tDZdtqPjur0H12DLsDIjt/X0jID6ci6LmdbXNZ97wz3D3KjBBeAuw5VRKiiZMPZTuOB5GdFRP/H6PmZYazGPHU2idFJLwG9JUhy+LMyzhpwyw/hruV1a2Udd7VT5fiPkf2hZ7A= - secure: bmNNj7iM/SFKROFVh6s9q877a2z+VtieCkmtdV8SzYfR1KwG27TwxRckSx9JKWfKMri+PvcF9jz9SFt7IMP3SJxM22pYVKKPJa164ze0IuQLAlhsrdNISe2CPOMJrXAq2SP6RTn/YfZnyv3vjxuLJDuG4IPD1Yosln+gBZgCvjk= - secure: AVwpii9rbPfEZ2HpanR9DMRf+0ica2BMui454XL4sgdXkfzkocOvw46ztT+XNpb25fpcfTC/hDOtyFeobNFH8E2DreM8h8kn7pqFCJeGAS8OkbDESAS2HApe1VqJj8xMQCKdZz/B25Ye1Lannqamxrt5Nai6lgQaaBeaXifb6/Q= - secure: ZUK56NpCC5CVUVTY6mWwFlcJxK8/WFf+0g/x71cp6eO7j8+yW78UahcT2ismphVgzvGgYRZCksC2Y/qyl1HzwuKE73gPIcvcD9MFOnGmrTedln78+CLRjZh6+YG4VhIgWhRikI3tGMZ7iChj04Qz6yBBG1hOY4T2oO/Uiv96NOY= - secure: keZjlq/Op5xD6ku2rAfvxr7waSCOkHgOJw0d14vBYmWyFtivdzUHeIliQlgsdmzKpGn1qOUXP9l52pYmCkSIwV1xZ6+gyxB5KH52rzs505whtvQOLsMXJeSxhpkI1WcX+4Rc3VUPnirbNyfkImghs07wSGAAMIGs8rbSsbHj/2U= - secure: b4m5le6Q494cPzXHMmBKjyF7UN+cxLs1cn7CnzpHh7dHsAqH0rP+GtuhOj23eTSpFrkUFnA7ofyDF/3SRi1Qij14KeQEj9Ri/fTGGPoOYgvVnds9mbqqWeqzMbVEguxp0gz8F6g8TfUTgt9eFAYkWKnQS5Ar3o56Zh2jT7gBHNo= svtplay-dl-2.4/LICENSE000066400000000000000000000021121354320321400144540ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2011-2015 Johan Andersson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. svtplay-dl-2.4/MANIFEST.in000066400000000000000000000001361354320321400152110ustar00rootroot00000000000000include README.md include LICENSE include versioneer.py include lib/svtplay_dl/__version__.py svtplay-dl-2.4/Makefile000066400000000000000000000030051354320321400151110ustar00rootroot00000000000000all: svtplay-dl .PHONY: test cover doctest pylint svtplay-dl \ release clean_releasedir $(RELEASE_DIR) # These variables describe the latest release: VERSION = 1.9.11 LATEST_RELEASE = $(VERSION) # Compress the manual if MAN_GZIP is set to y, ifeq ($(MAN_GZIP),y) MANFILE_EXT = .gz endif MANFILE = svtplay-dl.1$(MANFILE_EXT) # As pod2man is a perl tool, we have to jump through some hoops # to remove references to perl.. :-) POD2MAN ?= pod2man --section 1 --utf8 \ --center "svtplay-dl manual" \ --release "svtplay-dl $(VERSION)" \ --date "$(LATEST_RELEASE_DATE)" PREFIX ?= /usr/local BINDIR = $(PREFIX)/bin PYTHON ?= /usr/bin/env python3 export PYTHONPATH=lib # If you don't have a python3 environment (e.g. mock for py3 and # nosetests3), you can remove the -3 flag. TEST_OPTS ?= -2 -3 svtplay-dl: $(PYFILES) $(MAKE) -C lib mv -f lib/svtplay-dl . svtplay-dl.1: svtplay-dl.pod rm -f $@ $(POD2MAN) $< $@ svtplay-dl.1.gz: svtplay-dl.1 rm -f $@ gzip -9 svtplay-dl.1 test: sh scripts/run-tests.sh $(TEST_OPTS) install: svtplay-dl install -d $(DESTDIR)$(BINDIR) install -m 755 svtplay-dl $(DESTDIR)$(BINDIR) cover: sh scripts/run-tests.sh -C pylint: $(MAKE) -C lib pylint doctest: svtplay-dl sh scripts/diff_man_help.sh release: git tag -m "New version $(NEW_RELEASE)" \ -m "$$(git log --oneline $$(git describe --tags --abbrev=0 HEAD^)..HEAD^)" \ $(NEW_RELEASE) clean: $(MAKE) -C lib clean rm -f svtplay-dl rm -f $(MANFILE) rm -rf .tox svtplay-dl-2.4/README.md000066400000000000000000000105371354320321400147400ustar00rootroot00000000000000# svtplay-dl [![Build Status Appveyor](https://ci.appveyor.com/api/projects/status/github/spaam/svtplay-dl?svg=true)](https://ci.appveyor.com/project/spaam/svtplay-dl) [![Build Status Travis](https://travis-ci.org/spaam/svtplay-dl.svg)](https://travis-ci.org/spaam/svtplay-dl/) ## Installation ### MacOS If you have [Homebrew](https://brew.sh/) on your machine you can install by running: ``` brew install svtplay-dl ``` You will need to run `brew install ffmpeg` or `brew install libav` afterwards, if you don't already have one of these packages. ### Debian and Ubuntu svtplay-dl is available in Debian strech and later and on Ubuntu 16.04 and later, which means you can install it straight away using apt. The version in their repo is often old and thus we **strongly** recommend using our own apt repo, which always include the latest version. The svtplay-dl repo for Debian / Ubuntu can be found at [apt.svtplay-dl.se](https://apt.svtplay-dl.se/). ##### Add the release PGP keys: ``` curl -s https://svtplay-dl.se/release-key.txt | sudo apt-key add - ``` ##### Add the "release" channel to your APT sources: ``` echo "deb https://apt.svtplay-dl.se/ svtplay-dl release" | sudo tee /etc/apt/sources.list.d/svtplay-dl.list ``` ##### Update and install svtplay-dl: ``` sudo apt-get update sudo apt-get install svtplay-dl ``` ### Solus svtplay-dl is avaliable in the [Solus](https://getsol.us.com/) repository and can be installed by simply running: ``` sudo eopkg it svtplay-dl ``` ### Windows You can download the Windows binaries from [svtplay-dl.se](https://svtplay-dl.se/) If you want to build your own Windows binaries: 1. Install [cx_freeze](https://anthony-tuininga.github.io/cx_Freeze/) 3. Follow the steps listed under [From source](#from-source) 4. cd path\to\svtplay-dl && mkdir build 5. `python setversion.py` # this will change the version string to a more useful one 5. `python %PYTHON%\\Scripts\\cxfreeze --include-modules=cffi,queue,idna.idnadata --target-dir=build bin/svtplay-dl` 6. Find binary in build folder. you need `svtplay-dl.exe` and `pythonXX.dll` from that folder to run `svtplay-dl.exe` ### Other systems with python ``` pip3 install svtplay-dl ``` ### Any UNIX (Linux, BSD, macOS, etc.) ##### Download with curl ``` sudo curl -L https://svtplay-dl.se/download/latest/svtplay-dl -o /usr/local/bin/svtplay-dl ``` ##### Make it executable ``` sudo chmod a+rx /usr/local/bin/svtplay-dl ``` ### From source If packaging isn’t available for your operating system, or you want to use a non-released version, you’ll want to install from source. Use git to download the sources: ``` git clone https://github.com/spaam/svtplay-dl ``` svtplay-dl requires the following additional tools and libraries. They are usually available from your distribution’s package repositories. If you don’t have them, some features will not be working. - [Python](https://www.python.org) 3.4 or higher - [cryptography](https://cryptography.io/en/latest) to download encrypted HLS streams - [PyYaml](https://github.com/yaml/pyyaml) for configure file - [Requests](https://2.python-requests.org) - [PySocks](https://github.com/Anorov/PySocks) to enable proxy support - [ffmpeg](https://ffmpeg.org) or [avconv](https://libav.org) for postprocessing and/or for DASH streams ([ffmpeg](https://ffmpeg.zeranoe.com) for Windows) ##### To install it, run: ``` sudo python3 setup.py install ``` ## After install ``` svtplay-dl [options] URL ``` If you encounter any bugs or problems, don’t hesitate to open an issue [on github](https://github.com/spaam/svtplay-dl/issues). Or why not join the ``#svtplay-dl`` IRC channel on Freenode? ## Supported services This script works for: - aftonbladet.se - bambuser.com - comedycentral.se - di.se - dn.se - dplay.se - dr.dk - efn.se - expressen.se - hbo.com - kanal9play.se - nickelodeon.nl - nickelodeon.no - nickelodeon.se - nrk.no - oppetarkiv.se - ruv.is - svd.se - sverigesradio.se - svtplay.se - viafree.se (former tv3play.se, tv6play.se, tv8play.se, tv10play.se) - viafree.dk (former tv3play.dk) - viafree.no (former tv3play.no, viasat4play.no) - tv3play.ee - tv3play.lt - tv3play.lv - tv4.se - tv4play.se - twitch.tv - ur.se - urplay.se - vg.no - viagame.com ## License This project is licensed under [The MIT License (MIT)](LICENSE) Homepage: [svtplay-dl.se](https://svtplay-dl.se/) svtplay-dl-2.4/appveyor.yml000066400000000000000000000023701354320321400160450ustar00rootroot00000000000000environment: PYTHON: "C:\\Python35" PYTHON_VERSION: 3.5 PYTHON_ARCH: "32" TWINE_USERNAME: secure: yig0ualP0M39xIahEXhxZA== TWINE_PASSWORD: secure: 79wxWyPcqzj6131wKAuWBg== DOCKER_USERNAME: secure: yig0ualP0M39xIahEXhxZA== DOCKER_PASSWORD: secure: uiu9FbvYYz89WrBtF6BN0w== AWS_ACCESS_KEY_ID: secure: ED96t0LSsl1VnMu5CEQddJYdsIwIdsomF6gmY7r49w8= AWS_SECRET_ACCESS_KEY: secure: r/GpKgKWeTtq2k2J+Wy4qwOvLa8auzCgSYvEb88NqzenKZh04wUiWyxwXk6xc7UV init: - "ECHO %PYTHON% %PYTHON_VERSION% %PYTHON_ARCH%" install: - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" - "python -m pip install -U pip setuptools" - "pip install -r requirements-dev.txt -r requirements.txt" - "pip install cx_freeze awscli backports-datetime-fromisoformat" - "git describe --tags --dirty --always" test_script: - "pip install -e ." after_test: - "mkdir svtplay-dl" - "python setversion.py" - "python %PYTHON%\\Scripts\\cxfreeze --include-modules=cffi,queue,idna.idnadata --target-dir=svtplay-dl bin/svtplay-dl" - "svtplay-dl\\svtplay-dl.exe --version" - "7z.exe a -tzip svtplay-dl.zip svtplay-dl" - IF "%APPVEYOR_PULL_REQUEST_NUMBER%"=="" python scripts/cibuild.py artifacts: - path: svtplay-dl.zip name: svtplay-dl build: off svtplay-dl-2.4/bin/000077500000000000000000000000001354320321400142235ustar00rootroot00000000000000svtplay-dl-2.4/bin/svtplay-dl000077500000000000000000000002641354320321400162520ustar00rootroot00000000000000#!/usr/bin/env python3 # ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import svtplay_dl if __name__ == "__main__": svtplay_dl.main() svtplay-dl-2.4/dockerfile/000077500000000000000000000000001354320321400155625ustar00rootroot00000000000000svtplay-dl-2.4/dockerfile/Dockerfile000066400000000000000000000005741354320321400175620ustar00rootroot00000000000000# using edge to get ffmpeg-4.x FROM alpine:edge MAINTAINER spaam COPY dist/*.whl . RUN set -xe \ && apk add --no-cache \ ca-certificates \ python3 \ py3-pip \ rtmpdump \ py3-cryptography \ ffmpeg \ && python3 -m pip install *.whl \ && rm -f *.whl WORKDIR /data ENTRYPOINT ["python3", "/usr/bin/svtplay-dl"] CMD ["--help"] svtplay-dl-2.4/docs/000077500000000000000000000000001354320321400144035ustar00rootroot00000000000000svtplay-dl-2.4/docs/README.docker.md000066400000000000000000000007411354320321400171320ustar00rootroot00000000000000# svtplay-dl container version of the script. # usage ```sh docker run -it --rm -u $(id -u):$(id -g) -v "$(pwd):/data" spaam/svtplay-dl ``` or create an alias: ##### bash (~/.bashrc) ``` alias svtplay-dl='docker run -it --rm -u $(id -u):$(id -g) -v "$(pwd):/data" spaam/svtplay-dl' ``` ##### zsh (~/.zshrc) ``` alias svtplay-dl='docker run -it --rm -u $(id -u):$(id -g) -v "$(pwd):/data" spaam/svtplay-dl' ``` # build example ```sh docker build -t svtplay-dl . ``` svtplay-dl-2.4/lib/000077500000000000000000000000001354320321400142215ustar00rootroot00000000000000svtplay-dl-2.4/lib/Makefile000066400000000000000000000025551354320321400156700ustar00rootroot00000000000000all: svtplay-dl clean: find . -name '*.pyc' -exec rm {} \; rm -f svtplay-dl pylint: pylint $(PYLINT_OPTS) svtplay_dl export PACKAGES = svtplay_dl \ svtplay_dl.fetcher \ svtplay_dl.utils \ svtplay_dl.service \ svtplay_dl.subtitle \ svtplay_dl.postprocess export PYFILES = $(sort $(addsuffix /*.py,$(subst .,/,$(PACKAGES)))) PYTHON ?= /usr/bin/env python3 VERSION = $(shell git describe 2>/dev/null || echo $(LATEST_RELEASE)-unknown) svtplay-dl: $(PYFILES) @# Verify that there's no .build already \ ! [ -d .build ] || { \ echo "ERROR: build already in progress? (or remove $(PWD)/.build/)"; \ exit 1; \ }; \ mkdir -p .build @# Stage the files in .build for postprocessing for py in $(PYFILES); do \ install -d ".build/$${py%/*}"; \ install $$py .build/$$py; \ done # Add git version info to __version__, seen in --version sed -i -e 's/^__version__ = \(.*\)$$/__version__ = "$(VERSION)"/' \ .build/svtplay_dl/__init__.py @# reset timestamps, to avoid non-determinism in zip file find .build/ -exec touch -m -t 198001010000 {} \; (cd .build && zip -X --quiet svtplay-dl $(PYFILES)) (cd .build && zip -X --quiet --junk-paths svtplay-dl svtplay_dl/__main__.py) echo '#!$(PYTHON)' > svtplay-dl cat .build/svtplay-dl.zip >> svtplay-dl rm -rf .build chmod a+x svtplay-dl svtplay-dl-2.4/lib/svtplay_dl/000077500000000000000000000000001354320321400164025ustar00rootroot00000000000000svtplay-dl-2.4/lib/svtplay_dl/__init__.py000066400000000000000000000042341354320321400205160ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import from __future__ import unicode_literals import logging import sys import yaml from svtplay_dl.service.cmore import Cmore from svtplay_dl.utils.getmedia import get_media from svtplay_dl.utils.getmedia import get_multiple_media from svtplay_dl.utils.parser import parser from svtplay_dl.utils.parser import parsertoconfig from svtplay_dl.utils.parser import setup_defaults from .__version__ import get_versions __version__ = get_versions()["version"] del get_versions log = logging.getLogger("svtplay_dl") def setup_log(silent, verbose=False): logging.addLevelName(25, "INFO") fmt = "%(levelname)s: %(message)s" if silent: stream = sys.stderr level = 25 elif verbose: stream = sys.stderr level = logging.DEBUG fmt = "%(levelname)s [%(created)s] %(pathname)s/%(funcName)s: %(message)s" else: stream = sys.stdout level = logging.INFO logging.basicConfig(level=level, format=fmt) hdlr = logging.StreamHandler(stream) log.addHandler(hdlr) def main(): """ Main program """ parse, options = parser(__version__) if options.flexibleq and not options.quality: logging.error("flexible-quality requires a quality") if len(options.urls) == 0: parse.print_help() sys.exit(0) urls = options.urls config = parsertoconfig(setup_defaults(), options) if len(urls) < 1: parse.error("Incorrect number of arguments") setup_log(config.get("silent"), config.get("verbose")) if options.cmoreoperatorlist: config = parsertoconfig(setup_defaults(), options) c = Cmore(config, urls) c.operatorlist() sys.exit(0) try: if len(urls) == 1: get_media(urls[0], config, __version__) else: get_multiple_media(urls, config) except KeyboardInterrupt: print("") except (yaml.YAMLError, yaml.MarkedYAMLError) as e: logging.error("Your settings file(s) contain invalid YAML syntax! Please fix and restart!, {}".format(str(e))) sys.exit(2) svtplay-dl-2.4/lib/svtplay_dl/__main__.py000066400000000000000000000005751354320321400205030ustar00rootroot00000000000000#!/usr/bin/env python # ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- import sys if __package__ is None and not hasattr(sys, "frozen"): # direct call of __main__.py import os.path sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import svtplay_dl if __name__ == "__main__": svtplay_dl.main() svtplay-dl-2.4/lib/svtplay_dl/__version__.py000066400000000000000000000424561354320321400212500ustar00rootroot00000000000000# This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = " (tag: 2.4)" git_full = "32323ee28f78042a0566161abcf5b582d763ce79" git_date = "2019-09-07 12:31:05 +0200" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "None" cfg.parentdir_prefix = "None" cfg.versionfile_source = "lib/svtplay_dl/__version__.py" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried {}".format(commands)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix) :], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories {} but none started with prefix {}".format(str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except OSError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r"\d", r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix) :] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format(full_tag, tag_prefix) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split("/"): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} svtplay-dl-2.4/lib/svtplay_dl/error.py000066400000000000000000000020201354320321400200770ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import class UIException(Exception): pass class ServiceError(Exception): pass class NoRequestedProtocols(UIException): """ This excpetion is thrown when the service provides streams, but not using any accepted protocol (as decided by options.stream_prio). """ def __init__(self, requested, found): """ The constructor takes two mandatory parameters, requested and found. Both should be lists. requested is the protocols we want and found is the protocols that can be used to access the stream. """ self.requested = requested self.found = found super().__init__("None of the provided protocols (%s) are in " "the current list of accepted protocols (%s)" % (self.found, self.requested)) def __repr__(self): return "NoRequestedProtocols(requested={}, found={})".format(self.requested, self.found) svtplay-dl-2.4/lib/svtplay_dl/fetcher/000077500000000000000000000000001354320321400200225ustar00rootroot00000000000000svtplay-dl-2.4/lib/svtplay_dl/fetcher/__init__.py000066400000000000000000000046371354320321400221450ustar00rootroot00000000000000from __future__ import absolute_import import copy from svtplay_dl.utils.http import HTTP from svtplay_dl.utils.output import ETA from svtplay_dl.utils.output import output from svtplay_dl.utils.output import progressbar class VideoRetriever: def __init__(self, config, url, bitrate=0, **kwargs): self.config = config self.url = url self.bitrate = int(bitrate) self.kwargs = kwargs self.http = HTTP(config) self.finished = False self.audio = kwargs.pop("audio", None) self.files = kwargs.pop("files", None) self.keycookie = kwargs.pop("keycookie", None) self.authorization = kwargs.pop("authorization", None) self.output = kwargs.pop("output", None) self.segments = kwargs.pop("segments", None) self.output_extention = None def __repr__(self): return "".format(self.__class__.__name__, self.bitrate) @property def name(self): pass def _download_url(self, url, audio=False, total_size=None): cookies = self.kwargs["cookies"] data = self.http.request("get", url, cookies=cookies, headers={"Range": "bytes=0-8192"}) if not total_size: try: total_size = data.headers["Content-Range"] total_size = total_size[total_size.find("/") + 1 :] total_size = int(total_size) except KeyError: raise KeyError("Can't get the total size.") bytes_so_far = 8192 if audio: file_d = output(copy.copy(self.output), self.config, "m4a") else: file_d = output(self.output, self.config, "mp4") if file_d is None: return file_d.write(data.content) eta = ETA(total_size) while bytes_so_far < total_size: if not self.config.get("silent"): eta.update(bytes_so_far) progressbar(total_size, bytes_so_far, "".join(["ETA: ", str(eta)])) old = bytes_so_far + 1 bytes_so_far = total_size bytes_range = "bytes={}-{}".format(old, bytes_so_far) data = self.http.request("get", url, cookies=cookies, headers={"Range": bytes_range}) file_d.write(data.content) file_d.close() progressbar(bytes_so_far, total_size, "ETA: complete") # progress_stream.write('\n') self.finished = True svtplay-dl-2.4/lib/svtplay_dl/fetcher/dash.py000066400000000000000000000246101354320321400213160ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import copy import math import os import re import time import xml.etree.ElementTree as ET from datetime import datetime from urllib.parse import urljoin from svtplay_dl.error import ServiceError from svtplay_dl.error import UIException from svtplay_dl.fetcher import VideoRetriever from svtplay_dl.utils.output import ETA from svtplay_dl.utils.output import output from svtplay_dl.utils.output import progress_stream from svtplay_dl.utils.output import progressbar class DASHException(UIException): def __init__(self, url, message): self.url = url super().__init__(message) class LiveDASHException(DASHException): def __init__(self, url): super().__init__(url, "This is a live DASH stream, and they are not supported.") class DASHattibutes: def __init__(self): self.default = {} def set(self, key, value): self.default[key] = value def get(self, key): if key in self.default: return self.default[key] return 0 def templateelemt(attributes, element, filename, idnumber): files = [] init = element.attrib["initialization"] media = element.attrib["media"] if "startNumber" in element.attrib: start = int(element.attrib["startNumber"]) else: start = 1 if "timescale" in element.attrib: attributes.set("timescale", float(element.attrib["timescale"])) else: attributes.set("timescale", 1) if "duration" in element.attrib: attributes.set("duration", float(element.attrib["duration"])) segments = [] timeline = element.findall("{urn:mpeg:dash:schema:mpd:2011}SegmentTimeline/{urn:mpeg:dash:schema:mpd:2011}S") if timeline: t = -1 for s in timeline: duration = int(s.attrib["d"]) repeat = int(s.attrib["r"]) if "r" in s.attrib else 0 segmenttime = int(s.attrib["t"]) if "t" in s.attrib else 0 if t < 0: t = segmenttime count = repeat + 1 end = start + len(segments) + count number = start + len(segments) while number < end: segments.append({"number": number, "duration": math.ceil(duration / attributes.get("timescale")), "time": t}) t += duration number += 1 else: # Saw this on dynamic live content start = 0 now = time.time() periodStartWC = time.mktime(attributes.get("availabilityStartTime").timetuple()) + start periodEndWC = now + attributes.get("minimumUpdatePeriod") periodDuration = periodEndWC - periodStartWC segmentCount = math.ceil(periodDuration * attributes.get("timescale") / attributes.get("duration")) availableStart = math.floor( (now - periodStartWC - attributes.get("timeShiftBufferDepth")) * attributes.get("timescale") / attributes.get("duration") ) availableEnd = math.floor((now - periodStartWC) * attributes.get("timescale") / attributes.get("duration")) start = max(0, availableStart) end = min(segmentCount, availableEnd) for number in range(start, end): segments.append({"number": number, "duration": int(attributes.get("duration") / attributes.get("timescale"))}) name = media.replace("$RepresentationID$", idnumber).replace("$Bandwidth$", attributes.get("bandwidth")) files.append(urljoin(filename, init.replace("$RepresentationID$", idnumber).replace("$Bandwidth$", attributes.get("bandwidth")))) for segment in segments: if "$Time$" in media: new = name.replace("$Time$", str(segment["time"])) if "$Number" in name: if re.search(r"\$Number(\%\d+)d\$", name): vname = name.replace("$Number", "").replace("$", "") new = vname % segment["number"] else: new = name.replace("$Number$", str(segment["number"])) files.append(urljoin(filename, new)) return files def adaptionset(attributes, element, url, baseurl=None): streams = {} dirname = os.path.dirname(url) + "/" if baseurl: dirname = urljoin(dirname, baseurl) template = element[0].find("{urn:mpeg:dash:schema:mpd:2011}SegmentTemplate") represtation = element[0].findall(".//{urn:mpeg:dash:schema:mpd:2011}Representation") for i in represtation: files = [] segments = False filename = dirname attributes.set("bandwidth", i.attrib["bandwidth"]) bitrate = int(i.attrib["bandwidth"]) / 1000 idnumber = i.attrib["id"] if i.find("{urn:mpeg:dash:schema:mpd:2011}BaseURL") is not None: filename = urljoin(filename, i.find("{urn:mpeg:dash:schema:mpd:2011}BaseURL").text) if i.find("{urn:mpeg:dash:schema:mpd:2011}SegmentBase") is not None: segments = True files.append(filename) if template is not None: segments = True files = templateelemt(attributes, template, filename, idnumber) elif i.find("{urn:mpeg:dash:schema:mpd:2011}SegmentTemplate") is not None: segments = True files = templateelemt(attributes, i.find("{urn:mpeg:dash:schema:mpd:2011}SegmentTemplate"), filename, idnumber) if files: streams[bitrate] = {"segments": segments, "files": files} return streams def dashparse(config, res, url, output=None): streams = {} if not res: return streams if res.status_code >= 400: streams[0] = ServiceError("Can't read DASH playlist. {}".format(res.status_code)) return streams if len(res.text) < 1: streams[0] = ServiceError("Can't read DASH playlist. {}, size: {}".format(res.status_code, len(res.text))) return streams return _dashparse(config, res.text, url, output, res.cookies) def _dashparse(config, text, url, output, cookies): streams = {} baseurl = None attributes = DASHattibutes() xml = ET.XML(text) if xml.find("./{urn:mpeg:dash:schema:mpd:2011}BaseURL") is not None: baseurl = xml.find("./{urn:mpeg:dash:schema:mpd:2011}BaseURL").text if "availabilityStartTime" in xml.attrib: attributes.set("availabilityStartTime", parse_dates(xml.attrib["availabilityStartTime"])) attributes.set("publishTime", parse_dates(xml.attrib["publishTime"])) if "mediaPresentationDuration" in xml.attrib: attributes.set("mediaPresentationDuration", parse_duration(xml.attrib["mediaPresentationDuration"])) if "timeShiftBufferDepth" in xml.attrib: attributes.set("timeShiftBufferDepth", parse_duration(xml.attrib["timeShiftBufferDepth"])) if "minimumUpdatePeriod" in xml.attrib: attributes.set("minimumUpdatePeriod", parse_duration(xml.attrib["minimumUpdatePeriod"])) attributes.set("type", xml.attrib["type"]) temp = xml.findall('.//{urn:mpeg:dash:schema:mpd:2011}AdaptationSet[@mimeType="audio/mp4"]') if len(temp) == 0: temp = xml.findall('.//{urn:mpeg:dash:schema:mpd:2011}AdaptationSet[@contentType="audio"]') audiofiles = adaptionset(attributes, temp, url, baseurl) temp = xml.findall('.//{urn:mpeg:dash:schema:mpd:2011}AdaptationSet[@mimeType="video/mp4"]') if len(temp) == 0: temp = xml.findall('.//{urn:mpeg:dash:schema:mpd:2011}AdaptationSet[@contentType="video"]') videofiles = adaptionset(attributes, temp, url, baseurl) if not audiofiles or not videofiles: streams[0] = ServiceError("Found no Audiofiles or Videofiles to download.") return streams for i in videofiles.keys(): bitrate = i + list(audiofiles.keys())[0] streams[bitrate] = DASH( copy.copy(config), url, bitrate, cookies=cookies, audio=audiofiles[list(audiofiles.keys())[0]]["files"], files=videofiles[i]["files"], output=output, segments=videofiles[i]["segments"], ) return streams def parse_duration(duration): match = re.search(r"P(?:(\d*)Y)?(?:(\d*)M)?(?:(\d*)D)?(?:T(?:(\d*)H)?(?:(\d*)M)?(?:([\d.]*)S)?)?", duration) if not match: return 0 year = int(match.group(1)) * 365 * 24 * 60 * 60 if match.group(1) else 0 month = int(match.group(2)) * 30 * 24 * 60 * 60 if match.group(2) else 0 day = int(match.group(3)) * 24 * 60 * 60 if match.group(3) else 0 hour = int(match.group(4)) * 60 * 60 if match.group(4) else 0 minute = int(match.group(5)) * 60 if match.group(5) else 0 second = float(match.group(6)) if match.group(6) else 0 return year + month + day + hour + minute + second def parse_dates(date_str): date_patterns = ["%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%SZ"] dt = None for pattern in date_patterns: try: dt = datetime.strptime(date_str, pattern) break except Exception: pass if not dt: raise ValueError("Can't parse date format: {}".format(date_str)) return dt class DASH(VideoRetriever): @property def name(self): return "dash" def download(self): self.output_extention = "mp4" if self.config.get("live") and not self.config.get("force"): raise LiveDASHException(self.url) if self.segments: if self.audio: self._download2(self.audio, audio=True) self._download2(self.files) else: if self.audio: self._download_url(self.audio, audio=True) self._download_url(self.url) def _download2(self, files, audio=False): cookies = self.kwargs["cookies"] if audio: file_d = output(copy.copy(self.output), self.config, extension="m4a") else: file_d = output(self.output, self.config, extension="mp4") if file_d is None: return eta = ETA(len(files)) n = 1 for i in files: if not self.config.get("silent"): eta.increment() progressbar(len(files), n, "".join(["ETA: ", str(eta)])) n += 1 data = self.http.request("get", i, cookies=cookies) if data.status_code == 404: break data = data.content file_d.write(data) file_d.close() if not self.config.get("silent"): progress_stream.write("\n") self.finished = True svtplay-dl-2.4/lib/svtplay_dl/fetcher/hds.py000066400000000000000000000216331354320321400211570ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import base64 import binascii import copy import struct import xml.etree.ElementTree as ET from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.error import UIException from svtplay_dl.fetcher import VideoRetriever from svtplay_dl.utils.output import ETA from svtplay_dl.utils.output import output from svtplay_dl.utils.output import progress_stream from svtplay_dl.utils.output import progressbar def _chr(temp): return chr(temp) class HDSException(UIException): def __init__(self, url, message): self.url = url super().__init__(message) class LiveHDSException(HDSException): def __init__(self, url): super().__init__(url, "This is a live HDS stream, and they are not supported.") def hdsparse(config, res, manifest, output=None): streams = {} bootstrap = {} if not res: return streams if res.status_code >= 400: streams[0] = ServiceError("Can't read HDS playlist. {}".format(res.status_code)) return streams data = res.text xml = ET.XML(data) bootstrapIter = xml.iter("{http://ns.adobe.com/f4m/1.0}bootstrapInfo") mediaIter = xml.iter("{http://ns.adobe.com/f4m/1.0}media") if xml.find("{http://ns.adobe.com/f4m/1.0}drmAdditionalHeader") is not None: streams[0] = ServiceError("HDS DRM protected content.") return streams for i in bootstrapIter: if "id" in i.attrib: bootstrap[i.attrib["id"]] = i.text else: bootstrap["0"] = i.text parse = urlparse(manifest) querystring = parse.query url = "{}://{}{}".format(parse.scheme, parse.netloc, parse.path) for i in mediaIter: bootstrapid = bootstrap[i.attrib["bootstrapInfoId"]] streams[int(i.attrib["bitrate"])] = HDS( copy.copy(config), url, i.attrib["bitrate"], url_id=i.attrib["url"], bootstrap=bootstrapid, metadata=i.find("{http://ns.adobe.com/f4m/1.0}metadata").text, querystring=querystring, cookies=res.cookies, output=output, ) return streams class HDS(VideoRetriever): @property def name(self): return "hds" def download(self): self.output_extention = "flv" if self.config.get("live") and not self.config.get("force"): raise LiveHDSException(self.url) querystring = self.kwargs["querystring"] cookies = self.kwargs["cookies"] bootstrap = base64.b64decode(self.kwargs["bootstrap"]) box = readboxtype(bootstrap, 0) antal = None if box[2] == b"abst": antal = readbox(bootstrap, box[0]) baseurl = self.url[0 : self.url.rfind("/")] file_d = output(self.output, self.config, "flv") if file_d is None: return metasize = struct.pack(">L", len(base64.b64decode(self.kwargs["metadata"])))[1:] file_d.write(binascii.a2b_hex(b"464c560105000000090000000012")) file_d.write(metasize) file_d.write(binascii.a2b_hex(b"00000000000000")) file_d.write(base64.b64decode(self.kwargs["metadata"])) file_d.write(binascii.a2b_hex(b"00000000")) i = 1 start = antal[1]["first"] total = antal[1]["total"] eta = ETA(total) while i <= total: url = "{}/{}Seg1-Frag{}?{}".format(baseurl, self.kwargs["url_id"], start, querystring) if not self.config.get("silent"): eta.update(i) progressbar(total, i, "".join(["ETA: ", str(eta)])) data = self.http.request("get", url, cookies=cookies) if data.status_code == 404: break data = data.content number = decode_f4f(i, data) file_d.write(data[number:]) i += 1 start += 1 file_d.close() if not self.config.get("silent"): progress_stream.write("\n") self.finished = True def readbyte(data, pos): return struct.unpack("B", bytes(_chr(data[pos]), "ascii"))[0] def read16(data, pos): endpos = pos + 2 return struct.unpack(">H", data[pos:endpos])[0] def read24(data, pos): end = pos + 3 return struct.unpack(">L", "\x00" + data[pos:end])[0] def read32(data, pos): end = pos + 4 return struct.unpack(">i", data[pos:end])[0] def readu32(data, pos): end = pos + 4 return struct.unpack(">I", data[pos:end])[0] def read64(data, pos): end = pos + 8 return struct.unpack(">Q", data[pos:end])[0] def readstring(data, pos): length = 0 while bytes(_chr(data[pos + length]), "ascii") != b"\x00": length += 1 endpos = pos + length string = data[pos:endpos] pos += length + 1 return pos, string def readboxtype(data, pos): boxsize = read32(data, pos) tpos = pos + 4 endpos = tpos + 4 boxtype = data[tpos:endpos] if boxsize > 1: boxsize -= 8 pos += 8 return pos, boxsize, boxtype # Note! A lot of variable assignments are commented out. These are # accessible values that we currently don't use. def readbox(data, pos): # version = readbyte(data, pos) pos += 1 # flags = read24(data, pos) pos += 3 # bootstrapversion = read32(data, pos) pos += 4 # byte = readbyte(data, pos) pos += 1 # profile = (byte & 0xC0) >> 6 # live = (byte & 0x20) >> 5 # update = (byte & 0x10) >> 4 # timescale = read32(data, pos) pos += 4 # currentmediatime = read64(data, pos) pos += 8 # smptetimecodeoffset = read64(data, pos) pos += 8 temp = readstring(data, pos) # movieidentifier = temp[1] pos = temp[0] serverentrycount = readbyte(data, pos) pos += 1 serverentrytable = [] i = 0 while i < serverentrycount: temp = readstring(data, pos) serverentrytable.append(temp[1]) pos = temp[0] i += 1 qualityentrycount = readbyte(data, pos) pos += 1 qualityentrytable = [] i = 0 while i < qualityentrycount: temp = readstring(data, pos) qualityentrytable.append(temp[1]) pos = temp[0] i += 1 tmp = readstring(data, pos) # drm = tmp[1] pos = tmp[0] tmp = readstring(data, pos) # metadata = tmp[1] pos = tmp[0] segmentruntable = readbyte(data, pos) pos += 1 if segmentruntable > 0: tmp = readboxtype(data, pos) boxtype = tmp[2] boxsize = tmp[1] pos = tmp[0] if boxtype == b"asrt": antal = readasrtbox(data, pos) pos += boxsize fragRunTableCount = readbyte(data, pos) pos += 1 i = 0 first = 1 while i < fragRunTableCount: tmp = readboxtype(data, pos) boxtype = tmp[2] boxsize = tmp[1] pos = tmp[0] if boxtype == b"afrt": first = readafrtbox(data, pos) pos += boxsize i += 1 antal[1]["first"] = first return antal # Note! A lot of variable assignments are commented out. These are # accessible values that we currently don't use. def readafrtbox(data, pos): # version = readbyte(data, pos) pos += 1 # flags = read24(data, pos) pos += 3 # timescale = read32(data, pos) pos += 4 qualityentry = readbyte(data, pos) pos += 1 i = 0 while i < qualityentry: temp = readstring(data, pos) # qualitysegmulti = temp[1] pos = temp[0] i += 1 fragrunentrycount = read32(data, pos) pos += 4 i = 0 first = 1 skip = False while i < fragrunentrycount: firstfragment = readu32(data, pos) if not skip: first = firstfragment skip = True pos += 4 # timestamp = read64(data, pos) pos += 8 # duration = read32(data, pos) pos += 4 i += 1 return first # Note! A lot of variable assignments are commented out. These are # accessible values that we currently don't use. def readasrtbox(data, pos): # version = readbyte(data, pos) pos += 1 # flags = read24(data, pos) pos += 3 qualityentrycount = readbyte(data, pos) pos += 1 qualitysegmentmodifers = [] i = 0 while i < qualityentrycount: temp = readstring(data, pos) qualitysegmentmodifers.append(temp[1]) pos = temp[0] i += 1 seqCount = read32(data, pos) pos += 4 ret = {} i = 0 while i < seqCount: firstseg = read32(data, pos) pos += 4 fragPerSeg = read32(data, pos) pos += 4 tmp = i + 1 ret[tmp] = {"first": firstseg, "total": fragPerSeg} i += 1 return ret def decode_f4f(fragID, fragData): start = fragData.find(b"mdat") + 4 if fragID > 1: tagLen, = struct.unpack_from(">L", fragData, start) tagLen &= 0x00FFFFFF start += tagLen + 11 + 4 return start svtplay-dl-2.4/lib/svtplay_dl/fetcher/hls.py000066400000000000000000000423211354320321400211640ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import binascii import copy import os import random import re import time from datetime import datetime from datetime import timedelta from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.ciphers import algorithms from cryptography.hazmat.primitives.ciphers import Cipher from cryptography.hazmat.primitives.ciphers import modes from svtplay_dl.error import ServiceError from svtplay_dl.error import UIException from svtplay_dl.fetcher import VideoRetriever from svtplay_dl.subtitle import subtitle from svtplay_dl.utils.http import get_full_url from svtplay_dl.utils.output import ETA from svtplay_dl.utils.output import output from svtplay_dl.utils.output import progress_stream from svtplay_dl.utils.output import progressbar class HLSException(UIException): def __init__(self, url, message): self.url = url super().__init__(message) class LiveHLSException(HLSException): def __init__(self, url): super().__init__(url, "This is a live HLS stream, and they are not supported.") def hlsparse(config, res, url, **kwargs): streams = {} if not res: return streams if res.status_code > 400: streams[0] = ServiceError("Can't read HLS playlist. {}".format(res.status_code)) return streams m3u8 = M3U8(res.text) keycookie = kwargs.pop("keycookie", None) authorization = kwargs.pop("authorization", None) httpobject = kwargs.pop("httpobject", None) output = kwargs.pop("output", None) media = {} subtitles = {} segments = None if m3u8.master_playlist: for i in m3u8.master_playlist: audio_url = None if i["TAG"] == "EXT-X-MEDIA": if "AUTOSELECT" in i and (i["AUTOSELECT"].upper() == "YES"): if i["TYPE"] and i["TYPE"] != "SUBTITLES": if "URI" in i: if segments is None: segments = True if i["GROUP-ID"] not in media: media[i["GROUP-ID"]] = [] media[i["GROUP-ID"]].append(i["URI"]) else: segments = False if i["TYPE"] == "SUBTITLES": if "URI" in i: if i["GROUP-ID"] not in subtitles: subtitles[i["GROUP-ID"]] = [] item = [i["URI"], i["LANGUAGE"]] if item not in subtitles[i["GROUP-ID"]]: subtitles[i["GROUP-ID"]].append(item) continue elif i["TAG"] == "EXT-X-STREAM-INF": bit_rate = float(i["BANDWIDTH"]) / 1000 if "AUDIO" in i and (i["AUDIO"] in media): audio_url = get_full_url(media[i["AUDIO"]][0], url) urls = get_full_url(i["URI"], url) else: continue # Needs to be changed to utilise other tags. streams[int(bit_rate)] = HLS( copy.copy(config), urls, bit_rate, cookies=res.cookies, keycookie=keycookie, authorization=authorization, audio=audio_url, output=output, segments=bool(segments), kwargs=kwargs, ) if subtitles and httpobject: for sub in list(subtitles.keys()): for n in subtitles[sub]: m3u8s = M3U8(httpobject.request("get", get_full_url(n[0], url), cookies=res.cookies).text) if "cmore" in url: subtype = "wrstsegment" # this have been seen in tv4play else: subtype = "wrst" streams[int(random.randint(1, 40))] = subtitle( copy.copy(config), subtype, get_full_url(m3u8s.media_segment[0]["URI"], url), subfix=n[1], output=copy.copy(output), m3u8=m3u8s, ) elif m3u8.media_segment: config.set("segments", False) streams[0] = HLS( copy.copy(config), url, 0, cookies=res.cookies, keycookie=keycookie, authorization=authorization, output=output, segments=False ) else: streams[0] = ServiceError("Can't find HLS playlist in m3u8 file.") return streams class HLS(VideoRetriever): @property def name(self): return "hls" def download(self): self.output_extention = "ts" if self.segments: if self.audio: self._download(self.audio, file_name=(copy.copy(self.output), "audio.ts")) self._download(self.url, file_name=(self.output, "ts")) else: # Ignore audio self.audio = None self._download(self.url, file_name=(self.output, "ts")) def _download(self, url, file_name): cookies = self.kwargs.get("cookies", None) start_time = time.time() m3u8 = M3U8(self.http.request("get", url, cookies=cookies).text) key = None def random_iv(): return os.urandom(16) file_d = output(file_name[0], self.config, file_name[1]) if file_d is None: return hls_time_stamp = self.kwargs.pop("hls_time_stamp", False) decryptor = None size_media = len(m3u8.media_segment) eta = ETA(size_media) total_duration = 0 duration = 0 max_duration = 0 for index, i in enumerate(m3u8.media_segment): if "duration" in i["EXTINF"]: duration = i["EXTINF"]["duration"] max_duration = max(max_duration, duration) total_duration += duration item = get_full_url(i["URI"], url) if not self.config.get("silent"): if self.config.get("live"): progressbar(size_media, index + 1, "".join(["DU: ", str(timedelta(seconds=int(total_duration)))])) else: eta.increment() progressbar(size_media, index + 1, "".join(["ETA: ", str(eta)])) data = self.http.request("get", item, cookies=cookies) if data.status_code == 404: break data = data.content if m3u8.encrypted: headers = {} if self.keycookie: keycookies = self.keycookie else: keycookies = cookies if self.authorization: headers["authorization"] = self.authorization # Update key/decryptor if "EXT-X-KEY" in i: keyurl = get_full_url(i["EXT-X-KEY"]["URI"], url) if keyurl and keyurl[:4] == "skd:": raise HLSException(keyurl, "Can't decrypt beacuse of DRM") key = self.http.request("get", keyurl, cookies=keycookies, headers=headers).content iv = binascii.unhexlify(i["EXT-X-KEY"]["IV"][2:].zfill(32)) if "IV" in i["EXT-X-KEY"] else random_iv() backend = default_backend() cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend) decryptor = cipher.decryptor() if decryptor: data = decryptor.update(data) else: raise ValueError("No decryptor found for encrypted hls steam.") file_d.write(data) if self.config.get("capture_time") > 0 and total_duration >= self.config.get("capture_time") * 60: break if (size_media == (index + 1)) and self.config.get("live"): sleep_int = (start_time + max_duration * 2) - time.time() if sleep_int > 0: time.sleep(sleep_int) size_media_old = size_media while size_media_old == size_media: start_time = time.time() if hls_time_stamp: end_time_stamp = (datetime.utcnow() - timedelta(minutes=1, seconds=max_duration * 2)).replace(microsecond=0) start_time_stamp = end_time_stamp - timedelta(minutes=1) base_url = url.split(".m3u8")[0] url = "{}.m3u8?in={}&out={}?".format(base_url, start_time_stamp.isoformat(), end_time_stamp.isoformat()) new_m3u8 = M3U8(self.http.request("get", url, cookies=cookies).text) for n_m3u in new_m3u8.media_segment: if not any(d["URI"] == n_m3u["URI"] for d in m3u8.media_segment): m3u8.media_segment.append(n_m3u) size_media = len(m3u8.media_segment) if size_media_old == size_media: time.sleep(max_duration) file_d.close() if not self.config.get("silent"): progress_stream.write("\n") self.finished = True class M3U8: # Created for hls version <=7 # https://tools.ietf.org/html/rfc8216 MEDIA_SEGMENT_TAGS = ("EXTINF", "EXT-X-BYTERANGE", "EXT-X-DISCONTINUITY", "EXT-X-KEY", "EXT-X-MAP", "EXT-X-PROGRAM-DATE-TIME", "EXT-X-DATERANGE") MEDIA_PLAYLIST_TAGS = ( "EXT-X-TARGETDURATION", "EXT-X-MEDIA-SEQUENCE", "EXT-X-DISCONTINUITY-SEQUENCE", "EXT-X-ENDLIST", "EXT-X-PLAYLIST-TYPE", "EXT-X-I-FRAMES-ONLY", ) MASTER_PLAYLIST_TAGS = ("EXT-X-MEDIA", "EXT-X-STREAM-INF", "EXT-X-I-FRAME-STREAM-INF", "EXT-X-SESSION-DATA", "EXT-X-SESSION-KEY") MEDIA_OR_MASTER_PLAYLIST_TAGS = ("EXT-X-INDEPENDENT-SEGMENTS", "EXT-X-START") TAG_TYPES = {"MEDIA_SEGMENT": 0, "MEDIA_PLAYLIST": 1, "MASTER_PLAYLIST": 2} def __init__(self, data): self.version = None self.media_segment = [] self.media_playlist = {} self.master_playlist = [] self.encrypted = False self.independent_segments = False self.parse_m3u(data) def __str__(self): return "Version: {}\nMedia Segment: {}\nMedia Playlist: {}\nMaster Playlist: {}\nEncrypted: {}\tIndependent_segments: {}".format( self.version, self.media_segment, self.media_playlist, self.master_playlist, self.encrypted, self.independent_segments ) def parse_m3u(self, data): if not data.startswith("#EXTM3U"): raise ValueError("Does not appear to be an 'EXTM3U' file.") data = data.replace("\r\n", "\n") lines = data.split("\n")[1:] last_tag_type = None tag_type = None media_segment_info = {} for index, l in enumerate(lines): if not l: continue elif l.startswith("#EXT"): info = {} tag, attr = _get_tag_attribute(l) if tag == "EXT-X-VERSION": self.version = int(attr) # 4.3.2. Media Segment Tags elif tag in M3U8.MEDIA_SEGMENT_TAGS: tag_type = M3U8.TAG_TYPES["MEDIA_SEGMENT"] # 4.3.2.1. EXTINF if tag == "EXTINF": if "," in attr: dur, title = attr.split(",", 1) else: dur = attr title = None info["duration"] = float(dur) info["title"] = title # 4.3.2.2. EXT-X-BYTERANGE elif tag == "EXT-X-BYTERANGE": if "@" in attr: n, o = attr.split("@", 1) info["n"], info["o"] = (int(n), int(o)) else: info["n"] = int(attr) info["o"] = 0 # 4.3.2.3. EXT-X-DISCONTINUITY elif tag == "EXT-X-DISCONTINUITY": pass # 4.3.2.4. EXT-X-KEY elif tag == "EXT-X-KEY": self.encrypted = True info = _get_tuple_attribute(attr) # 4.3.2.5. EXT-X-MAP elif tag == "EXT-X-MAP": info = _get_tuple_attribute(attr) # 4.3.2.6. EXT-X-PROGRAM-DATE-TIME" elif tag == "EXT-X-PROGRAM-DATE-TIME": info = attr # 4.3.2.7. EXT-X-DATERANGE elif tag == "EXT-X-DATERANGE": info = _get_tuple_attribute(attr) media_segment_info[tag] = info # 4.3.3. Media Playlist Tags elif tag in M3U8.MEDIA_PLAYLIST_TAGS: tag_type = M3U8.TAG_TYPES["MEDIA_PLAYLIST"] # 4.3.3.1. EXT-X-TARGETDURATION if tag == "EXT-X-TARGETDURATION": info = int(attr) # 4.3.3.2. EXT-X-MEDIA-SEQUENCE elif tag == "EXT-X-MEDIA-SEQUENCE": info = int(attr) # 4.3.3.3. EXT-X-DISCONTINUITY-SEQUENCE elif tag == "EXT-X-DISCONTINUITY-SEQUENCE": info = int(attr) # 4.3.3.4. EXT-X-ENDLIST elif tag == "EXT-X-ENDLIST": break # 4.3.3.5. EXT-X-PLAYLIST-TYPE elif tag == "EXT-X-PLAYLIST-TYPE": info = attr # 4.3.3.6. EXT-X-I-FRAMES-ONLY elif tag == "EXT-X-I-FRAMES-ONLY": pass self.media_playlist[tag] = info # 4.3.4. Master Playlist Tags elif tag in M3U8.MASTER_PLAYLIST_TAGS: tag_type = M3U8.TAG_TYPES["MASTER_PLAYLIST"] # 4.3.4.1. EXT-X-MEDIA if tag == "EXT-X-MEDIA": info = _get_tuple_attribute(attr) # 4.3.4.2. EXT-X-STREAM-INF elif tag == "EXT-X-STREAM-INF": info = _get_tuple_attribute(attr) if "BANDWIDTH" not in info: raise ValueError("Can't find 'BANDWIDTH' in 'EXT-X-STREAM-INF'") info["URI"] = lines[index + 1] # 4.3.4.3. EXT-X-I-FRAME-STREAM-INF elif tag == "EXT-X-I-FRAME-STREAM-INF": info = _get_tuple_attribute(attr) # 4.3.4.4. EXT-X-SESSION-DATA elif tag == "EXT-X-SESSION-DATA": info = _get_tuple_attribute(attr) # 4.3.4.5. EXT-X-SESSION-KEY elif tag == "EXT-X-SESSION-KEY": self.encrypted = True info = _get_tuple_attribute(attr) info["TAG"] = tag self.master_playlist.append(info) # 4.3.5. Media or Master Playlist Tags elif tag in M3U8.MEDIA_OR_MASTER_PLAYLIST_TAGS: tag_type = M3U8.TAG_TYPES["MEDIA_PLAYLIST"] # 4.3.5.1. EXT-X-INDEPENDENT-SEGMENTS if tag == "EXT-X-INDEPENDENT-SEGMENTS": self.independent_segments = True # 4.3.5.2. EXT-X-START elif tag == "EXT-X-START": info = _get_tuple_attribute(attr) self.media_playlist[tag] = info # Unused tags else: pass # This is a comment elif l.startswith("#"): pass # This must be a url/uri else: tag_type = None if last_tag_type is M3U8.TAG_TYPES["MEDIA_SEGMENT"]: media_segment_info["URI"] = l self.media_segment.append(media_segment_info) media_segment_info = {} last_tag_type = tag_type if self.media_segment and self.master_playlist: raise ValueError("This 'M3U8' file contains data for both 'Media Segment' and 'Master Playlist'. This is not allowed.") def _get_tag_attribute(line): line = line[1:] try: search_line = re.search(r"^([A-Z\-]*):(.*)", line) return search_line.group(1), search_line.group(2) except Exception: return line, None def _get_tuple_attribute(attribute): attr_tuple = {} for art_l in re.split(""",(?=(?:[^'"]|'[^']*'|"[^"]*")*$)""", attribute): if art_l: name, value = art_l.split("=", 1) name = name.strip() # Checks for attribute name if not re.match(r"^[A-Z0-9\-]*$", name): raise ValueError("Not a valid attribute name.") # Remove extra quotes of string if value.startswith('"') and value.endswith('"'): value = value[1:-1] attr_tuple[name] = value return attr_tuple svtplay-dl-2.4/lib/svtplay_dl/fetcher/http.py000066400000000000000000000025541354320321400213610ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import os from svtplay_dl.fetcher import VideoRetriever from svtplay_dl.utils.output import ETA from svtplay_dl.utils.output import output from svtplay_dl.utils.output import progressbar class HTTP(VideoRetriever): @property def name(self): return "http" def download(self): """ Get the stream from HTTP """ _, ext = os.path.splitext(self.url) if ext == ".mp3": self.output_extention = "mp3" else: self.output_extention = "mp4" # this might be wrong.. data = self.http.request("get", self.url, stream=True) try: total_size = data.headers["content-length"] except KeyError: total_size = 0 total_size = int(total_size) bytes_so_far = 0 file_d = output(self.output, self.config, self.output_extention) if file_d is None: return eta = ETA(total_size) for i in data.iter_content(8192): bytes_so_far += len(i) file_d.write(i) if not self.config.get("silent"): eta.update(bytes_so_far) progressbar(total_size, bytes_so_far, "".join(["ETA: ", str(eta)])) file_d.close() self.finished = True svtplay-dl-2.4/lib/svtplay_dl/log.py000066400000000000000000000003371354320321400175400ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import logging import sys log = logging.getLogger("svtplay_dl") progress_stream = sys.stderr svtplay-dl-2.4/lib/svtplay_dl/postprocess/000077500000000000000000000000001354320321400207665ustar00rootroot00000000000000svtplay-dl-2.4/lib/svtplay_dl/postprocess/__init__.py000066400000000000000000000232261354320321400231040ustar00rootroot00000000000000import logging import os import platform import re from json import dumps from random import sample from re import match from shutil import which from requests import codes from requests import post from requests import Timeout from svtplay_dl.utils.output import formatname from svtplay_dl.utils.proc import run_program class postprocess: def __init__(self, stream, config, subfixes=None): self.stream = stream self.config = config self.subfixes = subfixes self.detect = None for i in ["ffmpeg", "avconv"]: self.detect = which(i) if self.detect: break def sublanguage(self): # parse() function partly borrowed from a guy on github. /thanks! # https://github.com/riobard/srt.py/blob/master/srt.py def parse(self): def parse_block(block): lines = block.strip("-").split("\n") txt = "\r\n".join(lines[2:]) return txt if platform.system() == "Windows": fd = open(self, encoding="utf8") else: fd = open(self) return list(map(parse_block, fd.read().strip().replace("\r", "").split("\n\n"))) def query(self): _ = parse(self) random_sentences = " ".join(sample(_, len(_) if len(_) < 8 else 8)).replace("\r\n", "") url = "https://whatlanguage.herokuapp.com" payload = {"query": random_sentences} # Note: requests handles json from version 2.4.2 and onwards so i use json.dumps for now. headers = {"content-type": "application/json"} try: # Note: reasonable timeout i guess? svtplay-dl is mainly used while multitasking i presume, # and it is heroku after all (fast enough) r = post(url, data=dumps(payload), headers=headers, timeout=30) if r.status_code == codes.ok: try: response = r.json() return response["language"] except TypeError: return "und" else: logging.error("Server error appeared. Setting language as undetermined.") return "und" except Timeout: logging.error("30 seconds server timeout reached. Setting language as undetermined.") return "und" langs = [] exceptions = {"lulesamiska": "smj", "meankieli": "fit", "jiddisch": "yid"} if self.subfixes and len(self.subfixes) >= 2: logging.info("Determining the languages of the subtitles.") else: logging.info("Determining the language of the subtitle.") if self.config.get("get_all_subtitles"): for subfix in self.subfixes: if [exceptions[key] for key in exceptions.keys() if match(key, subfix.strip("-"))]: if "oversattning" in subfix.strip("-"): subfix = subfix.strip("-").split(".")[0] else: subfix = subfix.strip("-") langs += [exceptions[subfix]] continue subfile = "{}.srt".format(os.path.splitext(formatname(self.stream.output, self.config, self.stream.output_extention))[0] + subfix) langs += [query(subfile)] else: subfile = "{}.srt".format(os.path.splitext(formatname(self.stream.output, self.config, self.stream.output_extention))[0]) langs += [query(subfile)] if len(langs) >= 2: logging.info("Language codes: " + ", ".join(langs)) else: logging.info("Language code: " + langs[0]) return langs def remux(self): if self.detect is None: logging.error("Cant detect ffmpeg or avconv. Cant mux files without it.") return if self.stream.finished is False: return if formatname(self.stream.output, self.config, self.stream.output_extention).endswith(".mp4") is False: orig_filename = formatname(self.stream.output, self.config, self.stream.output_extention) name, ext = os.path.splitext(orig_filename) new_name = "{}.mp4".format(name) cmd = [self.detect, "-i", orig_filename] _, stdout, stderr = run_program(cmd, False) # return 1 is good here. videotrack, audiotrack = self._checktracks(stderr) if self.config.get("merge_subtitle"): logging.info("Muxing {} and merging its subtitle into {}".format(orig_filename, new_name)) else: logging.info("Muxing {} into {}".format(orig_filename, new_name)) tempfile = "{}.temp".format(orig_filename) arguments = ["-map", "0:{}".format(videotrack), "-map", "0:{}".format(audiotrack), "-c", "copy", "-f", "mp4"] if ext == ".ts": arguments += ["-bsf:a", "aac_adtstoasc"] if self.config.get("merge_subtitle"): langs = self.sublanguage() for stream_num, language in enumerate(langs): arguments += [ "-map", str(stream_num + 1), "-c:s:" + str(stream_num), "mov_text", "-metadata:s:s:" + str(stream_num), "language=" + language, ] if self.subfixes and len(self.subfixes) >= 2: for subfix in self.subfixes: subfile = "{}.srt".format(name + subfix) cmd += ["-i", subfile] else: subfile = "{}.srt".format(name) cmd += ["-i", subfile] arguments += ["-y", tempfile] cmd += arguments returncode, stdout, stderr = run_program(cmd) if returncode != 0: return if self.config.get("merge_subtitle") and not self.config.get("subtitle"): logging.info("Muxing done, removing the old files.") if self.subfixes and len(self.subfixes) >= 2: for subfix in self.subfixes: subfile = "{}.srt".format(name + subfix) os.remove(subfile) else: os.remove(subfile) else: logging.info("Muxing done, removing the old file.") os.remove(orig_filename) os.rename(tempfile, new_name) def merge(self): if self.detect is None: logging.error("Cant detect ffmpeg or avconv. Cant mux files without it.") return if self.stream.finished is False: return orig_filename = formatname(self.stream.output, self.config, self.stream.output_extention) cmd = [self.detect, "-i", orig_filename] _, stdout, stderr = run_program(cmd, False) # return 1 is good here. videotrack, audiotrack = self._checktracks(stderr) if self.config.get("merge_subtitle"): logging.info("Merge audio, video and subtitle into {}".format(orig_filename)) else: logging.info("Merge audio and video into {}".format(orig_filename)) tempfile = "{}.temp".format(orig_filename) name, ext = os.path.splitext(orig_filename) arguments = ["-c:v", "copy", "-c:a", "copy", "-f", "mp4"] if ext == ".ts": audio_filename = "{}.audio.ts".format(name) arguments += ["-bsf:a", "aac_adtstoasc"] else: audio_filename = "{}.m4a".format(name) cmd = [self.detect, "-i", orig_filename, "-i", audio_filename] arguments += ["-map", "{}".format(videotrack), "-map", "{}".format(audiotrack)] if self.config.get("merge_subtitle"): langs = self.sublanguage() for stream_num, language in enumerate(langs, start=audiotrack + 1): arguments += [ "-map", str(stream_num), "-c:s:" + str(stream_num - 2), "mov_text", "-metadata:s:s:" + str(stream_num - 2), "language=" + language, ] if self.subfixes and len(self.subfixes) >= 2: for subfix in self.subfixes: subfile = "{}.srt".format(name + subfix) cmd += ["-i", subfile] else: subfile = "{}.srt".format(name) cmd += ["-i", subfile] arguments += ["-y", tempfile] cmd += arguments returncode, stdout, stderr = run_program(cmd) if returncode != 0: return logging.info("Merging done, removing old files.") os.remove(orig_filename) os.remove(audio_filename) if self.config.get("merge_subtitle") and not self.config.get("subtitle"): if self.subfixes and len(self.subfixes) >= 2: for subfix in self.subfixes: subfile = "{}.srt".format(name + subfix) os.remove(subfile) else: os.remove(subfile) os.rename(tempfile, orig_filename) def _checktracks(self, output): allstuff = re.findall(r"Stream \#\d:(\d)\[[^\[]+\]([\(\)\w]+)?: (Video|Audio): (.*)", output) videotrack = 0 audiotrack = 1 for stream in allstuff: if stream[2] == "Video": videotrack = stream[0] if stream[2] == "Audio": if stream[3] == "mp3, 0 channels": continue audiotrack = stream[0] return videotrack, audiotrack svtplay-dl-2.4/lib/svtplay_dl/service/000077500000000000000000000000001354320321400200425ustar00rootroot00000000000000svtplay-dl-2.4/lib/svtplay_dl/service/__init__.py000066400000000000000000000207621354320321400221620ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import logging import os import re from urllib.parse import urlparse from svtplay_dl.utils.http import download_thumbnails from svtplay_dl.utils.http import HTTP from svtplay_dl.utils.parser import merge from svtplay_dl.utils.parser import readconfig from svtplay_dl.utils.parser import setup_defaults class Service: supported_domains = [] supported_domains_re = [] def __init__(self, config, _url, http=None): self._url = _url self._urldata = None self._error = False self.subtitle = None self.cookies = {} self.auto_name = None self.output = { "title": None, "season": None, "episode": None, "episodename": None, "id": None, "service": self.__class__.__name__.lower(), "tvshow": None, "title_nice": None, "showdescription": None, "episodedescription": None, "showthumbnailurl": None, "episodethumbnailurl": None, "publishing_datetime": None, } if not http: self.http = HTTP(config) else: self.http = http # Config if config.get("configfile") and os.path.isfile(config.get("configfile")): self.config = merge( readconfig(setup_defaults(), config.get("configfile"), service=self.__class__.__name__.lower()).get_variable(), config.get_variable() ) else: self.config = config logging.debug("service: {}".format(self.__class__.__name__.lower())) @property def url(self): return self._url def get_urldata(self): if self._urldata is None: self._urldata = self.http.request("get", self.url).text return self._urldata @classmethod def handles(cls, url): urlp = urlparse(url) # Apply supported_domains_re regexp to the netloc. This # is meant for 'dynamic' domains, e.g. containing country # information etc. for domain_re in [re.compile(x) for x in cls.supported_domains_re]: if domain_re.match(urlp.netloc): return True if urlp.netloc in cls.supported_domains: return True # For every listed domain, try with www.subdomain as well. if urlp.netloc in ["www." + x for x in cls.supported_domains]: return True return False def get_subtitle(self, options): pass # the options parameter is unused, but is part of the # interface, so we don't want to remove it. Thus, the # pylint ignore. def find_all_episodes(self, options): # pylint: disable-msg=unused-argument logging.warning("--all-episodes not implemented for this service") return [self.url] def opengraph_get(html, prop): """ Extract specified OpenGraph property from html. >>> opengraph_get('>> opengraph_get('>> opengraph_get(']*property="og:' + prop + '" content="([^"]*)"', html) if match is None: match = re.search(']*content="([^"]*)" property="og:' + prop + '"', html) if match is None: return None return match.group(1) class OpenGraphThumbMixin: """ Mix this into the service class to grab thumbnail from OpenGraph properties. """ def get_thumbnail(self, options): url = opengraph_get(self.get_urldata(), "image") if url is None: return download_thumbnails(options, [(False, url)]) class MetadataThumbMixin: """ Mix this into the service class to grab thumbnail from extracted metadata. """ def get_thumbnail(self, options): urls = [] if self.output["showthumbnailurl"] is not None: urls.append((True, self.output["showthumbnailurl"])) if self.output["episodethumbnailurl"] is not None: urls.append((False, self.output["episodethumbnailurl"])) if urls: download_thumbnails(self.output, options, urls) class Generic(Service): """ Videos embed in sites """ def get(self, sites): data = self.http.request("get", self.url).text match = re.search(r"src=(\"|\')(http://www.svt.se/wd[^\'\"]+)(\"|\')", data) stream = None if match: url = match.group(2) for i in sites: if i.handles(url): url = url.replace("&", "&").replace("&", "&") return url, i(self.config, url) match = re.search(r"src=\"(http://player.vimeo.com/video/[0-9]+)\" ", data) if match: for i in sites: if i.handles(match.group(1)): return match.group(1), i(self.config, url) match = re.search(r"tv4play.se/iframe/video/(\d+)?", data) if match: url = "http://www.tv4play.se/?video_id=%s" % match.group(1) for i in sites: if i.handles(url): return url, i(self.config, url) match = re.search(r"embed.bambuser.com/broadcast/(\d+)", data) if match: url = "http://bambuser.com/v/%s" % match.group(1) for i in sites: if i.handles(url): return url, i(self.config, url) match = re.search(r'src="(http://tv.aftonbladet[^"]*)"', data) if match: url = match.group(1) for i in sites: if i.handles(url): return url, i(self.config, url) match = re.search(r'a href="(http://tv.aftonbladet[^"]*)" class="abVi', data) if match: url = match.group(1) for i in sites: if i.handles(url): return url, i(self.config, url) match = re.search(r"iframe src='(http://www.svtplay[^']*)'", data) if match: url = match.group(1) for i in sites: if i.handles(url): return url, i(self.config, url) match = re.search('src="(http://mm-resource-service.herokuapp.com[^"]*)"', data) if match: url = match.group(1) for i in sites: if i.handles(url): return self.url, i(self.config, self.url) match = re.search(r'src="([^.]+\.solidtango.com[^"+]+)"', data) if match: url = match.group(1) for i in sites: if i.handles(url): return self.url, i(self.config, url) match = re.search("(lemonwhale|lwcdn.com)", data) if match: url = "http://lemonwhale.com" for i in sites: if i.handles(url): return self.url, i(self.config, self.url) match = re.search('s.src="(https://csp-ssl.picsearch.com[^"]+|http://csp.picsearch.com/rest[^"]+)', data) if match: url = match.group(1) for i in sites: if i.handles(url): return self.url, i(self.config, self.url) match = re.search("(picsearch_ajax_auth|screen9-ajax-auth)", data) if match: url = "http://csp.picsearch.com" for i in sites: if i.handles(url): return self.url, i(self.config, self.url) match = re.search('iframe src="(//csp.screen9.com[^"]+)"', data) if match: url = "http:%s" % match.group(1) for i in sites: if i.handles(url): return self.url, i(self.config, self.url) match = re.search('source src="([^"]+)" type="application/x-mpegURL"', data) if match: for i in sites: if i.__name__ == "Raw": return self.url, i(self.config, match.group(1)) return self.url, stream def service_handler(sites, options, url): handler = None for i in sites: if i.handles(url): handler = i(options, url) break return handler svtplay-dl-2.4/lib/svtplay_dl/service/aftonbladet.py000066400000000000000000000043731354320321400227060ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import json import re from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import Service from svtplay_dl.utils.text import decode_html_entities class Aftonbladettv(Service): supported_domains = ["svd.se"] def get(self): data = self.get_urldata() match = re.search('data-player-config="([^"]+)"', data) if not match: match = re.search('data-svpPlayer-video="([^"]+)"', data) if not match: yield ServiceError("Can't find video info") return data = json.loads(decode_html_entities(match.group(1))) streams = hlsparse(self.config, self.http.request("get", data["streamUrls"]["hls"]), data["streamUrls"]["hls"], output=self.output) for n in list(streams.keys()): yield streams[n] class Aftonbladet(Service): supported_domains = ["aftonbladet.se", "tv.aftonbladet.se"] def get(self): data = self.get_urldata() match = re.search("window.FLUX_STATE = ({.*})", data) if not match: yield ServiceError("Can't find video info") return try: janson = json.loads(match.group(1)) except json.decoder.JSONDecodeError: yield ServiceError("Can't decode api request: {}".format(match.group(1))) return videos = self._get_video(janson) yield from videos def _get_video(self, janson): collections = janson["collections"] for n in list(collections.keys()): contents = collections[n]["contents"]["items"] for i in list(contents.keys()): if "type" in contents[i] and contents[i]["type"] == "video": streams = hlsparse( self.config, self.http.request("get", contents[i]["videoAsset"]["streamUrls"]["hls"]), contents[i]["videoAsset"]["streamUrls"]["hls"], output=self.output, ) for key in list(streams.keys()): yield streams[key] svtplay-dl-2.4/lib/svtplay_dl/service/atg.py000066400000000000000000000027261354320321400211760ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import json from datetime import datetime from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import Service class Atg(Service): supported_domains = ["atgplay.se"] def get(self): parse = urlparse(self.url) if not parse.path.startswith("/video"): yield ServiceError("Can't find video info") return wanted_id = parse.path[7:] current_time = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds() * 1000) api_url = "https://www.atgplay.se/api/{}/video/{}".format(current_time, wanted_id) video_assets = self.http.request("get", api_url) try: janson = json.loads(video_assets.text) except json.decoder.JSONDecodeError: yield ServiceError("Can't decode api request: {}".format(video_assets.text)) return if "title" in janson: self.output["title"] = janson["title"] if "urls" in janson: for i in janson["urls"]: if "m3u" == i: stream = hlsparse(self.config, self.http.request("get", janson["urls"]["m3u"]), janson["urls"]["m3u"], output=self.output) for key in list(stream.keys()): yield stream[key] svtplay-dl-2.4/lib/svtplay_dl/service/barnkanalen.py000066400000000000000000000066211354320321400226750ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import json import logging import re from urllib.parse import parse_qs from urllib.parse import urljoin from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.service.svtplay import Svtplay class Barnkanalen(Svtplay): supported_domains = ["svt.se"] supported_path = "/barnkanalen" @classmethod def handles(cls, url): urlp = urlparse(url) correctpath = urlp.path.startswith(cls.supported_path) if urlp.netloc in cls.supported_domains and correctpath: return True # For every listed domain, try with www. subdomain as well. if urlp.netloc in ["www." + x for x in cls.supported_domains] and correctpath: return True return False def get(self): parse = urlparse(self.url) query = parse_qs(parse.query) self.access = None if "accessService" in query: self.access = query["accessService"] match = re.search("__barnplay'] = ({.*});", self.get_urldata()) if not match: yield ServiceError("Can't find video info.") return janson = json.loads(match.group(1))["context"]["dispatcher"]["stores"]["ApplicationStateStore"]["data"] if "episodeModel" not in janson["categoryStateCache"]["karaktarer"]: yield ServiceError("No videos found") return janson["video"] = janson["categoryStateCache"]["karaktarer"]["episodeModel"] if "title" not in janson["video"]: yield ServiceError("Can't find any video on that page.") return if "live" in janson["video"]: self.config.set("live", janson["video"]["live"]) self.outputfilename(janson["video"]) self.extrametadata(janson) if "programVersionId" in janson["video"]: vid = janson["video"]["programVersionId"] else: vid = janson["video"]["id"] res = self.http.get("http://api.svt.se/videoplayer-api/video/{}".format(vid)) try: janson = res.json() except json.decoder.JSONDecodeError: yield ServiceError("Can't decode api request: {}".format(res.request.url)) return videos = self._get_video(janson) yield from videos def find_all_episodes(self, config): videos = [] match = re.search("__barnplay'] = ({.*});", self.get_urldata()) if not match: logging.error("Couldn't retrieve episode list.") return else: dataj = json.loads(match.group(1)) dataj = dataj["context"]["dispatcher"]["stores"]["EpisodesStore"] showId = list(dataj["data"].keys())[0] items = dataj["data"][showId]["episodes"] for i in items: program = i videos = self.videos_to_list(program, videos) videos.reverse() episodes = [urljoin("http://www.svt.se", x) for x in videos] if config.get("all_last") > 0: return episodes[-config.get("all_last") :] return episodes def videos_to_list(self, lvideos, videos): url = self.url + "/" + str(lvideos["id"]) parse = urlparse(url) if parse.path not in videos: videos.append(parse.path) return videos svtplay-dl-2.4/lib/svtplay_dl/service/bigbrother.py000066400000000000000000000055611354320321400225520ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import copy import json import re from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hds import hdsparse from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.fetcher.http import HTTP from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service class Bigbrother(Service, OpenGraphThumbMixin): supported_domains = ["bigbrother.se"] def get(self): data = self.get_urldata() match = re.search(r'id="(bcPl[^"]+)"', data) if not match: yield ServiceError("Can't find flash id.") return flashid = match.group(1) match = re.search(r'playerID" value="([^"]+)"', self.get_urldata()) if not match: yield ServiceError("Can't find playerID") return playerid = match.group(1) match = re.search(r'playerKey" value="([^"]+)"', self.get_urldata()) if not match: yield ServiceError("Can't find playerKey") return playerkey = match.group(1) match = re.search(r'videoPlayer" value="([^"]+)"', self.get_urldata()) if not match: yield ServiceError("Can't find videoPlayer info") return videoplayer = match.group(1) dataurl = ( "http://c.brightcove.com/services/viewer/htmlFederated?flashID={}&playerID={}&playerKey={}" "&isVid=true&isUI=true&dynamicStreaming=true&@videoPlayer={}".format(flashid, playerid, playerkey, videoplayer) ) data = self.http.request("get", dataurl).content match = re.search(r"experienceJSON = ({.*});", data) if not match: yield ServiceError("Can't find json data") return jsondata = json.loads(match.group(1)) renditions = jsondata["data"]["programmedContent"]["videoPlayer"]["mediaDTO"]["renditions"] if jsondata["data"]["publisherType"] == "PREMIUM": yield ServiceError("Premium content") return for i in renditions: if i["defaultURL"].endswith("f4m"): streams = hdsparse( copy.copy(self.config), self.http.request("get", i["defaultURL"], params={"hdcore": "3.7.0"}), i["defaultURL"], output=self.output ) for n in list(streams.keys()): yield streams[n] if i["defaultURL"].endswith("m3u8"): streams = hlsparse(self.config, self.http.request("get", i["defaultURL"]), i["defaultURL"], output=self.output) for n in list(streams.keys()): yield streams[n] if i["defaultURL"].endswith("mp4"): yield HTTP(copy.copy(self.config), i["defaultURL"], i["encodingRate"] / 1024, output=self.output) svtplay-dl-2.4/lib/svtplay_dl/service/cmore.py000066400000000000000000000115531354320321400215260ustar00rootroot00000000000000from __future__ import absolute_import from __future__ import unicode_literals import logging import re from urllib.parse import urljoin from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import Service class Cmore(Service): supported_domains = ["www.cmore.se", "www.cmore.dk", "www.cmore.no", "www.cmore.fi"] def get(self): if not self.config.get("username") or not self.config.get("password"): yield ServiceError("You need username and password to download things from this site.") return token, message = self._login() if not token: yield ServiceError(message) return vid = self._get_vid() if not vid: yield ServiceError("Can't find video id") return tld = self._gettld() self.output["id"] = vid metaurl = "https://playback-api.b17g.net/asset/{}?service=cmore.{}" "&device=browser&drm=widevine&protocol=dash%2Chls".format( self.output["id"], tld ) res = self.http.get(metaurl) janson = res.json() self._autoname(janson) if janson["metadata"]["isDrmProtected"]: yield ServiceError("Can't play this because the video got drm.") return url = "https://playback-api.b17g.net/media/{}?service=cmore.{}&device=browser&protocol=hls%2Cdash&drm=widevine".format(self.output["id"], tld) res = self.http.request("get", url, cookies=self.cookies, headers={"authorization": "Bearer {}".format(token)}) if res.status_code > 200: yield ServiceError("Can't play this because the video is geoblocked.") return if res.json()["playbackItem"]["type"] == "hls": streams = hlsparse( self.config, self.http.request("get", res.json()["playbackItem"]["manifestUrl"]), res.json()["playbackItem"]["manifestUrl"], output=self.output, ) for n in list(streams.keys()): yield streams[n] def find_all_episodes(self, config): episodes = [] token, message = self._login() if not token: logging.error(message) return res = self.http.get(self.url) tags = re.findall(' 0: return sorted(episodes[-config.get("all_last") :]) return sorted(episodes) def _gettld(self): if isinstance(self.url, list): parse = urlparse(self.url[0]) else: parse = urlparse(self.url) return re.search(r"\.(\w{2})$", parse.netloc).group(1) def _login(self): tld = self._gettld() url = "https://www.cmore.{}/login".format(tld) res = self.http.get(url, cookies=self.cookies) if self.config.get("cmoreoperator"): post = { "username": self.config.get("username"), "password": self.config.get("password"), "operator": self.config.get("cmoreoperator"), "country_code": tld, } else: post = {"username": self.config.get("username"), "password": self.config.get("password")} res = self.http.post("https://account.cmore.{}/session?client=cmore-web-prod".format(tld), json=post, cookies=self.cookies) if res.status_code >= 400: return None, "Wrong username or password" janson = res.json() token = janson["data"]["vimond_token"] return token, None def operatorlist(self): res = self.http.get("https://tve.cmore.se/country/{}/operator?client=cmore-web".format(self._gettld())) for i in res.json()["data"]["operators"]: print("operator: '{}'".format(i["name"].lower())) def _get_vid(self): res = self.http.get(self.url) match = re.search('data-asset-id="([^"]+)"', res.text) if match: return match.group(1) parse = urlparse(self.url) match = re.search(r"/(\d+)-[\w-]+$", parse.path) if match: return match.group(1) return None def _autoname(self, janson): if "seriesTitle" in janson["metadata"]: self.output["title"] = janson["metadata"]["seriesTitle"] self.output["episodename"] = janson["metadata"]["episodeTitle"] else: self.output["title"] = janson["metadata"]["title"] self.output["season"] = janson["metadata"]["seasonNumber"] self.output["episode"] = janson["metadata"]["episodeNumber"] self.config.set("live", janson["metadata"]["isLive"]) svtplay-dl-2.4/lib/svtplay_dl/service/disney.py000066400000000000000000000100111354320321400217000ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import copy import json import re from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.fetcher.http import HTTP from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service class Disney(Service, OpenGraphThumbMixin): supported_domains = ["disney.se", "video.disney.se", "disneyjunior.disney.se"] def get(self): parse = urlparse(self.url) if parse.hostname == "video.disney.se" or parse.hostname == "disneyjunior.disney.se": data = self.get_urldata() match = re.search(r"Grill.burger=({.*}):", data) if not match: yield ServiceError("Can't find video info") return jsondata = json.loads(match.group(1)) for n in jsondata["stack"]: if len(n["data"]) > 0: for x in n["data"]: if "flavors" in x: for i in x["flavors"]: if i["format"] == "mp4": res = self.http.get(i["url"]) match = re.search('button primary" href="([^"]+)"', res.text) if match: yield HTTP(copy.copy(self.config), match.group(1), i["bitrate"], output=self.output) else: data = self.get_urldata() match = re.search(r"uniqueId : '([^']+)'", data) if not match: yield ServiceError("Can't find video info") return uniq = match.group(1) match = re.search("entryId : '([^']+)'", self.get_urldata()) entryid = match.group(1) match = re.search("partnerId : '([^']+)'", self.get_urldata()) partnerid = match.group(1) match = re.search("uiConfId : '([^']+)'", self.get_urldata()) uiconfid = match.group(1) match = re.search("json : ({.*}}),", self.get_urldata()) jsondata = json.loads(match.group(1)) parse = urlparse(self.url) if len(parse.fragment) > 0: entry = parse.fragment[parse.fragment.rindex("/") + 1 :] if entry in jsondata["idlist"]: entryid = jsondata["idlist"][entry] else: yield ServiceError("Cant find video info") return for i in jsondata["playlists"][0]["playlist"]: if entryid in i["id"]: title = i["longId"] break self.output["title"] = title url = ( "http://cdnapi.kaltura.com/html5/html5lib/v1.9.7.6/mwEmbedFrame.php?&wid={}&uiconf_id={}&entry_id={}" "&playerId={}&forceMobileHTML5=true&urid=1.9.7.6&callback=mwi".format(partnerid, uiconfid, entryid, uniq) ) data = self.http.request("get", url).text match = re.search(r"mwi\(({.*})\);", data) jsondata = json.loads(match.group(1)) data = jsondata["content"] match = re.search(r"window.kalturaIframePackageData = ({.*});", data) jsondata = json.loads(match.group(1)) ks = jsondata["enviornmentConfig"]["ks"] name = jsondata["entryResult"]["meta"]["name"] self.output["title"] = name url = ( "http://cdnapi.kaltura.com/p/{}/sp/{}00/playManifest/entryId/{}/format/applehttp/protocol/http/a.m3u8" "?ks={}&referrer=aHR0cDovL3d3dy5kaXNuZXkuc2U=&".format(partnerid[1:], partnerid[1:], entryid, ks) ) redirect = self.http.check_redirect(url) streams = hlsparse(self.config, self.http.request("get", redirect), redirect, output=self.output) for n in list(streams.keys()): yield streams[n] svtplay-dl-2.4/lib/svtplay_dl/service/dplay.py000066400000000000000000000161061354320321400215310ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import hashlib import logging import random import re from urllib.parse import urlparse from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import Service from svtplay_dl.subtitle import subtitle country = {"sv": ".se", "da": ".dk", "no": ".no"} class Dplay(Service): supported_domains = ["dplay.se", "dplay.dk", "dplay.no"] def get(self): parse = urlparse(self.url) self.domain = re.search(r"(dplay\.\w\w)", parse.netloc).group(1) if not self._token(): logging.error("Something went wrong getting token for requests") if self.config.get("username") and self.config.get("password"): premium = self._login() if not premium: logging.warning("Wrong username/password.") channel = False if "kanaler" in parse.path: match = re.search("kanaler/([^/]+)$", parse.path) path = "/channels/{}".format(match.group(1)) url = "https://disco-api.{}/content{}".format(self.domain, path) channel = True self.config.set("live", True) elif "program" in parse.path: match = re.search("(programmer|program)/([^/]+)$", parse.path) path = "/shows/{}".format(match.group(2)) url = "https://disco-api.{}/content{}".format(self.domain, path) res = self.http.get(url, headers={"x-disco-client": "WEB:UNKNOWN:dplay-client:0.0.1"}) programid = res.json()["data"]["id"] qyerystring = ( "include=primaryChannel,show&filter[videoType]=EPISODE&filter[show.id]={}&" "page[size]=100&sort=seasonNumber,episodeNumber,-earliestPlayableStart".format(programid) ) res = self.http.get("https://disco-api.{}/content/videos?{}".format(self.domain, qyerystring)) janson = res.json() vid = 0 slug = None for i in janson["data"]: if int(i["id"]) > vid: vid = int(i["id"]) slug = i["attributes"]["path"] if slug: url = "https://disco-api.{}/content/videos/{}".format(self.domain, slug) else: yield ServiceError("Cant find latest video on program url") return else: match = re.search("(videos|videoer)/(.*)$", parse.path) url = "https://disco-api.{}/content/videos/{}".format(self.domain, match.group(2)) res = self.http.get(url, headers={"x-disco-client": "WEB:UNKNOWN:dplay-client:0.0.1"}) janson = res.json() if "errors" in janson: yield ServiceError("Cant find any videos on this url") return if channel: name = janson["data"]["attributes"]["name"] self.output["title"] = name else: name = self._autoname(janson) if name is None: yield ServiceError("Cant find vid id for autonaming") return self.output["id"] = janson["data"]["id"] api = "https://disco-api.{}/playback/videoPlaybackInfo/{}".format(self.domain, janson["data"]["id"]) res = self.http.get(api) if res.status_code > 400: yield ServiceError("You dont have permission to watch this") return streams = hlsparse( self.config, self.http.request("get", res.json()["data"]["attributes"]["streaming"]["hls"]["url"]), res.json()["data"]["attributes"]["streaming"]["hls"]["url"], httpobject=self.http, output=self.output, ) for n in list(streams.keys()): if isinstance(streams[n], subtitle): # we get the subtitles from the hls playlist. if self.config.get("get_all_subtitles"): yield streams[n] else: if streams[n].subfix in country and country[streams[n].subfix] in self.domain: yield streams[n] else: yield streams[n] def _autoname(self, jsondata): match = re.search("^([^/]+)/", jsondata["data"]["attributes"]["path"]) self.output["title"] = match.group(1) self.output["season"] = int(jsondata["data"]["attributes"]["seasonNumber"]) self.output["episode"] = int(jsondata["data"]["attributes"]["episodeNumber"]) self.output["episodename"] = jsondata["data"]["attributes"]["name"] return self.output["title"] def find_all_episodes(self, config): parse = urlparse(self.url) self.domain = re.search(r"(dplay\.\w\w)", parse.netloc).group(1) match = re.search("^/(program|programmer|videos|videoer)/([^/]+)", parse.path) if not match: logging.error("Can't find show name") return None if not self._token(): logging.error("Something went wrong getting token for requests") premium = False if self.config.get("username") and self.config.get("password"): premium = self._login() if not premium: logging.warning("Wrong username/password.") url = "https://disco-api.{}/content/shows/{}".format(self.domain, match.group(2)) res = self.http.get(url) programid = res.json()["data"]["id"] seasons = res.json()["data"]["attributes"]["seasonNumbers"] episodes = [] for season in seasons: qyerystring = ( "include=primaryChannel,show&filter[videoType]=EPISODE&filter[show.id]={}&filter[seasonNumber]={}&" "page[size]=100&sort=seasonNumber,episodeNumber,-earliestPlayableStart".format(programid, season) ) res = self.http.get("https://disco-api.{}/content/videos?{}".format(self.domain, qyerystring)) janson = res.json() for i in janson["data"]: if not premium and "Free" not in i["attributes"]["packages"]: continue episodes.append("https://www.{}/videos/{}".format(self.domain, i["attributes"]["path"])) if len(episodes) == 0: logging.error("Cant find any playable files") if config.get("all_last") > 0: return episodes[: config.get("all_last")] return episodes def _login(self): url = "https://disco-api.{}/login".format(self.domain) login = {"credentials": {"username": self.config.get("username"), "password": self.config.get("password")}} res = self.http.post(url, json=login) if res.status_code > 400: return False return True def _token(self): # random device id for cookietoken deviceid = hashlib.sha256(bytes(int(random.random() * 1000))).hexdigest() url = "https://disco-api.{}/token?realm={}&deviceId={}&shortlived=true".format(self.domain, self.domain.replace(".", ""), deviceid) res = self.http.get(url) if res.status_code > 400: return False return True svtplay-dl-2.4/lib/svtplay_dl/service/dr.py000066400000000000000000000135571354320321400210340ustar00rootroot00000000000000# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import base64 import binascii import copy import hashlib import json import re from urllib.parse import urljoin from urllib.parse import urlparse from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.ciphers import algorithms from cryptography.hazmat.primitives.ciphers import Cipher from cryptography.hazmat.primitives.ciphers import modes from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.hds import hdsparse from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.service import OpenGraphThumbMixin from svtplay_dl.service import Service from svtplay_dl.subtitle import subtitle class Dr(Service, OpenGraphThumbMixin): supported_domains = ["dr.dk"] def get(self): data = self.get_urldata() match = re.search(r'resource:[ ]*"([^"]*)",', data) if match: resource_url = match.group(1) resource_data = self.http.request("get", resource_url).content resource = json.loads(resource_data) streams = self.find_stream(self.config, resource) yield from streams else: match = re.search(r'resource="([^"]*)"', data) if not match: yield ServiceError("Cant find resource info for this video") return if match.group(1)[:4] != "http": resource_url = "http:{}".format(match.group(1)) else: resource_url = match.group(1) resource_data = self.http.request("get", resource_url).text resource = json.loads(resource_data) if "Links" not in resource: yield ServiceError("Cant access this video. its geoblocked.") return if "SubtitlesList" in resource and len(resource["SubtitlesList"]) > 0: suburl = resource["SubtitlesList"][0]["Uri"] yield subtitle(copy.copy(self.config), "wrst", suburl, output=self.output) if "Data" in resource: streams = self.find_stream(self.config, resource) yield from streams else: for stream in resource["Links"]: uri = stream["Uri"] if uri is None: uri = self._decrypt(stream["EncryptedUri"]) if stream["Target"] == "HDS": streams = hdsparse(copy.copy(self.config), self.http.request("get", uri, params={"hdcore": "3.7.0"}), uri, output=self.output) if streams: for n in list(streams.keys()): yield streams[n] if stream["Target"] == "HLS": streams = hlsparse(self.config, self.http.request("get", uri), uri, output=self.output) for n in list(streams.keys()): yield streams[n] def find_all_episodes(self, config): episodes = [] matches = re.findall(r'