pax_global_header00006660000000000000000000000064150106164550014515gustar00rootroot0000000000000052 comment=3331a424c1e65e1e8b6f025bd6a2a90a672e4a63 redis-py-6.1.0/000077500000000000000000000000001501061645500132555ustar00rootroot00000000000000redis-py-6.1.0/.coveragerc000066400000000000000000000000251501061645500153730ustar00rootroot00000000000000[run] source = redis redis-py-6.1.0/.dockerignore000066400000000000000000000000561501061645500157320ustar00rootroot00000000000000**/__pycache__ **/*.pyc .coverage .coverage.* redis-py-6.1.0/.github/000077500000000000000000000000001501061645500146155ustar00rootroot00000000000000redis-py-6.1.0/.github/CODEOWNERS000066400000000000000000000000351501061645500162060ustar00rootroot00000000000000doctests/* @dmaier-redislabs redis-py-6.1.0/.github/ISSUE_TEMPLATE.md000066400000000000000000000010571501061645500173250ustar00rootroot00000000000000Thanks for wanting to report an issue you've found in redis-py. Please delete this text and fill in the template below. It is of course not always possible to reduce your code to a small test case, but it's highly appreciated to have as much data as possible. Thank you! **Version**: What redis-py and what redis version is the issue happening on? **Platform**: What platform / version? (For example Python 3.5.1 on Windows 7 / Ubuntu 15.10 / Azure) **Description**: Description of your issue, stack traces from errors and code that reproduces the issue redis-py-6.1.0/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000013161501061645500204170ustar00rootroot00000000000000### Pull Request check-list _Please make sure to review and check all of these items:_ - [ ] Do tests and lints pass with this change? - [ ] Do the CI tests pass with this change (enable it first in your forked repo and wait for the github action build to finish)? - [ ] Is the new or changed code fully tested? - [ ] Is a documentation update included (if this change modifies existing APIs, or introduces new ones)? - [ ] Is there an example added to the examples folder (if applicable)? - [ ] Was the change added to CHANGES file? _NOTE: these things are not required to open a PR and can be done afterwards / while the PR is open._ ### Description of change _Please provide a description of the change here._ redis-py-6.1.0/.github/actions/000077500000000000000000000000001501061645500162555ustar00rootroot00000000000000redis-py-6.1.0/.github/actions/run-tests/000077500000000000000000000000001501061645500202215ustar00rootroot00000000000000redis-py-6.1.0/.github/actions/run-tests/action.yml000066400000000000000000000127731501061645500222330ustar00rootroot00000000000000name: 'Run redis-py tests' description: 'Runs redis-py tests against different Redis versions and configurations' inputs: python-version: description: 'Python version to use for running tests' default: '3.12' parser-backend: description: 'Parser backend to use: plain or hiredis' required: true redis-version: description: 'Redis version to test against' required: true hiredis-version: description: 'hiredis version to test against' required: false default: '>3.0.0' hiredis-branch: description: 'hiredis branch to test against' required: false default: 'master' event-loop: description: 'Event loop to use' required: false default: 'asyncio' runs: using: "composite" steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: ${{ inputs.python-version }} cache: 'pip' - uses: actions/checkout@v4 if: ${{ inputs.parser-backend == 'hiredis' && inputs.hiredis-version == 'unstable' }} with: repository: redis/hiredis-py submodules: true path: hiredis-py ref: ${{ inputs.hiredis-branch }} - name: Setup Test environment env: REDIS_VERSION: ${{ inputs.redis-version }} CLIENT_LIBS_TEST_IMAGE_TAG: ${{ inputs.redis-version }} run: | set -e echo "::group::Installing dependencies" pip install -r dev_requirements.txt pip uninstall -y redis # uninstall Redis package installed via redis-entraid pip install -e .[jwt] # install the working copy if [ "${{inputs.parser-backend}}" == "hiredis" ]; then if [[ "${{inputs.hiredis-version}}" == "unstable" ]]; then echo "Installing unstable version of hiredis from local directory" pip install -e ./hiredis-py else pip install "hiredis${{inputs.hiredis-version}}" fi echo "PARSER_BACKEND=$(echo "${{inputs.parser-backend}}_${{inputs.hiredis-version}}" | sed 's/[^a-zA-Z0-9]/_/g')" >> $GITHUB_ENV else echo "PARSER_BACKEND=${{inputs.parser-backend}}" >> $GITHUB_ENV fi echo "::endgroup::" echo "::group::Starting Redis servers" redis_major_version=$(echo "$REDIS_VERSION" | grep -oP '^\d+') echo "REDIS_MAJOR_VERSION=${redis_major_version}" >> $GITHUB_ENV if (( redis_major_version < 8 )); then echo "Using redis-stack for module tests" # Mapping of redis version to stack version declare -A redis_stack_version_mapping=( ["7.4.2"]="rs-7.4.0-v2" ["7.2.7"]="rs-7.2.0-v14" ["6.2.17"]="rs-6.2.6-v18" ) if [[ -v redis_stack_version_mapping[$REDIS_VERSION] ]]; then export CLIENT_LIBS_TEST_STACK_IMAGE_TAG=${redis_stack_version_mapping[$REDIS_VERSION]} echo "REDIS_MOD_URL=redis://127.0.0.1:6479/0" >> $GITHUB_ENV else echo "Version not found in the mapping." exit 1 fi if (( redis_major_version < 7 )); then export REDIS_STACK_EXTRA_ARGS="--tls-auth-clients optional --save ''" export REDIS_EXTRA_ARGS="--tls-auth-clients optional --save ''" fi invoke devenv --endpoints=all-stack else echo "Using redis CE for module tests" echo "REDIS_MOD_URL=redis://127.0.0.1:6379" >> $GITHUB_ENV invoke devenv --endpoints all fi sleep 10 # time to settle echo "::endgroup::" shell: bash - name: Run tests run: | set -e run_tests() { local protocol=$1 local eventloop="" if [ "${{inputs.event-loop}}" == "uvloop" ]; then eventloop="--uvloop" fi echo "::group::RESP${protocol} standalone tests" echo "REDIS_MOD_URL=${REDIS_MOD_URL}" if (( $REDIS_MAJOR_VERSION < 7 )) && [ "$protocol" == "3" ]; then echo "Skipping module tests: Modules doesn't support RESP3 for Redis versions < 7" invoke standalone-tests --redis-mod-url=${REDIS_MOD_URL} $eventloop --protocol="${protocol}" --extra-markers="not redismod and not cp_integration" else invoke standalone-tests --redis-mod-url=${REDIS_MOD_URL} $eventloop --protocol="${protocol}" fi echo "::endgroup::" echo "::group::RESP${protocol} cluster tests" invoke cluster-tests $eventloop --protocol=${protocol} echo "::endgroup::" } run_tests 2 "${{inputs.event-loop}}" run_tests 3 "${{inputs.event-loop}}" shell: bash - name: Debug if: failure() run: | sudo apt-get install -y redis-tools echo "Docker Containers:" docker ps redis-cli -p 16379 CLUSTER NODES shell: bash - name: Upload test results and profiling data uses: actions/upload-artifact@v4 with: name: pytest-results-redis_${{inputs.redis-version}}-python_${{inputs.python-version}}-parser_${{env.PARSER_BACKEND}}-el_${{inputs.event-loop}} path: | *-results.xml prof/** profile_output* if-no-files-found: error retention-days: 10 - name: Upload codecov coverage uses: codecov/codecov-action@v4 with: fail_ci_if_error: false redis-py-6.1.0/.github/dependabot.yml000066400000000000000000000002311501061645500174410ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "github-actions" directory: "/" labels: - "maintenance" schedule: interval: "monthly" redis-py-6.1.0/.github/release-drafter-config.yml000066400000000000000000000020571501061645500216540ustar00rootroot00000000000000name-template: '$NEXT_MINOR_VERSION' tag-template: 'v$NEXT_MINOR_VERSION' filter-by-commitish: true commitish: master autolabeler: - label: 'maintenance' files: - '*.md' - '.github/*' - label: 'bug' branch: - '/bug-.+' - label: 'maintenance' branch: - '/maintenance-.+' - label: 'feature' branch: - '/feature-.+' categories: - title: '🔥 Breaking Changes' labels: - 'breakingchange' - title: '🧪 Experimental Features' labels: - 'experimental' - title: '🚀 New Features' labels: - 'feature' - 'enhancement' - title: '🐛 Bug Fixes' labels: - 'fix' - 'bugfix' - 'bug' - 'BUG' - title: '🧰 Maintenance' labels: - 'maintenance' - 'dependencies' - 'documentation' - 'docs' - 'testing' change-template: '- $TITLE (#$NUMBER)' exclude-labels: - 'skip-changelog' template: | # Changes $CHANGES ## Contributors We'd like to thank all the contributors who worked on this release! $CONTRIBUTORS redis-py-6.1.0/.github/spellcheck-settings.yml000066400000000000000000000010251501061645500213110ustar00rootroot00000000000000matrix: - name: Markdown expect_match: false apsell: lang: en d: en_US ignore-case: true dictionary: wordlists: - .github/wordlist.txt output: wordlist.dic pipeline: - pyspelling.filters.markdown: markdown_extensions: - markdown.extensions.extra: - pyspelling.filters.html: comments: false attributes: - alt ignores: - ':matches(code, pre)' - code - pre - blockquote - img sources: - '*.md' - 'docs/*.rst' - 'docs/*.ipynb' redis-py-6.1.0/.github/wordlist.txt000066400000000000000000000024341501061645500172300ustar00rootroot00000000000000APM ARGV BFCommands CacheImpl CAS CFCommands CMSCommands ClusterNode ClusterNodes ClusterPipeline ClusterPubSub ConnectionPool CoreCommands EVAL EVALSHA Grokzen's INCR IOError Instrumentations JSONCommands Jaeger Ludovico Magnocavallo McCurdy NOSCRIPT NUMPAT NUMPT NUMSUB OSS OpenCensus OpenTelemetry OpenTracing Otel PubSub READONLY RediSearch RedisBloom RedisCluster RedisClusterCommands RedisClusterException RedisClusters RedisInstrumentor RedisJSON RedisTimeSeries SHA SearchCommands SentinelCommands SentinelConnectionPool Sharded Solovyov SpanKind Specfiying StatusCode TCP TOPKCommands TimeSeriesCommands Uptrace ValueError WATCHed WatchError api args async asyncio autoclass automodule backoff bdb behaviour bool boolean booleans bysource charset del dev docstring docstrings eg exc firsttimersonly fo genindex gmail hiredis http idx iff ini json keyslot keyspace kwarg linters localhost lua makeapullrequest maxdepth mget microservice microservices mset multikey mykey nonatomic observability opentelemetry oss performant pmessage png pre psubscribe pubsub punsubscribe py pypi quickstart readonly readwrite redis redismodules reinitialization replicaof repo runtime sedrik sharded ssl str stunnel subcommands thevalueofmykey timeseries toctree topk triaging txt un unicode url virtualenv www yaml redis-py-6.1.0/.github/workflows/000077500000000000000000000000001501061645500166525ustar00rootroot00000000000000redis-py-6.1.0/.github/workflows/codeql-analysis.yml000066400000000000000000000043741501061645500224750ustar00rootroot00000000000000# For most projects, this workflow file will not need changing; you simply need # to commit it to your repository. # # You may wish to alter this file to override the set of languages analyzed, # or to provide custom queries or build logic. # # ******** NOTE ******** # We have attempted to detect the languages in your repository. Please check # the `language` matrix defined below to confirm you have the correct set of # supported CodeQL languages. # name: "CodeQL" on: push: branches: [ master ] pull_request: # The branches below must be a subset of the branches above branches: [ master ] jobs: analyze: name: Analyze runs-on: ubuntu-latest permissions: actions: read contents: read security-events: write strategy: fail-fast: false matrix: language: [ 'python' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] # Learn more about CodeQL language support at https://git.io/codeql-language-support steps: - name: Checkout repository uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # queries: ./path/to/local/query, your-org/your-repo/queries@main # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild uses: github/codeql-action/autobuild@v3 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines # and modify them (or add more) to build your code if your project # uses a compiled language #- run: | # make bootstrap # make release - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 redis-py-6.1.0/.github/workflows/docs.yaml000066400000000000000000000020261501061645500204660ustar00rootroot00000000000000name: Docs CI on: push: branches: - master - '[0-9].[0-9]' pull_request: branches: - master - '[0-9].[0-9]' schedule: - cron: '0 1 * * *' # nightly build concurrency: group: ${{ github.event.pull_request.number || github.ref }}-docs cancel-in-progress: true permissions: contents: read # to fetch code (actions/checkout) jobs: build-docs: name: Build docs runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: 3.9 cache: 'pip' - name: install deps run: | sudo apt-get update -yqq sudo apt-get install -yqq pandoc make - name: run code linters run: | pip install -r dev_requirements.txt -r docs/requirements.txt invoke build-docs - name: upload docs uses: actions/upload-artifact@v4 with: name: redis-py-docs path: | docs/_build/html redis-py-6.1.0/.github/workflows/hiredis-py-integration.yaml000066400000000000000000000041021501061645500241310ustar00rootroot00000000000000name: Hiredis-py integration tests on: workflow_dispatch: inputs: redis-py-branch: description: 'redis-py branch to run tests on' required: true default: 'master' hiredis-branch: description: 'hiredis-py branch to run tests on' required: true default: 'master' concurrency: group: ${{ github.event.pull_request.number || github.ref }}-hiredis-integration cancel-in-progress: true permissions: contents: read # to fetch code (actions/checkout) env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} # this speeds up coverage with Python 3.12: https://github.com/nedbat/coveragepy/issues/1665 COVERAGE_CORE: sysmon CURRENT_CLIENT_LIBS_TEST_STACK_IMAGE_TAG: 'rs-7.4.0-v2' CURRENT_REDIS_VERSION: '7.4.2' jobs: redis_version: runs-on: ubuntu-latest outputs: CURRENT: ${{ env.CURRENT_REDIS_VERSION }} steps: - name: Compute outputs run: | echo "CURRENT=${{ env.CURRENT_REDIS_VERSION }}" >> $GITHUB_OUTPUT hiredis-tests: runs-on: ubuntu-latest needs: [redis_version] timeout-minutes: 60 strategy: max-parallel: 15 fail-fast: false matrix: redis-version: [ '${{ needs.redis_version.outputs.CURRENT }}' ] python-version: [ '3.8', '3.13'] parser-backend: [ 'hiredis' ] hiredis-version: [ 'unstable' ] event-loop: [ 'asyncio' ] env: ACTIONS_ALLOW_UNSECURE_COMMANDS: true name: Redis ${{ matrix.redis-version }}; Python ${{ matrix.python-version }}; RESP Parser:${{matrix.parser-backend}} (${{ matrix.hiredis-version }}); EL:${{matrix.event-loop}} steps: - uses: actions/checkout@v4 with: ref: ${{ inputs.redis-py-branch }} - name: Run tests uses: ./.github/actions/run-tests with: python-version: ${{ matrix.python-version }} parser-backend: ${{ matrix.parser-backend }} redis-version: ${{ matrix.redis-version }} hiredis-version: ${{ matrix.hiredis-version }} hiredis-branch: ${{ inputs.hiredis-branch }}redis-py-6.1.0/.github/workflows/install_and_test.sh000077500000000000000000000021521501061645500225400ustar00rootroot00000000000000#!/bin/bash set -e SUFFIX=$1 if [ -z ${SUFFIX} ]; then echo "Supply valid python package extension such as whl or tar.gz. Exiting." exit 3 fi script=`pwd`/${BASH_SOURCE[0]} HERE=`dirname ${script}` ROOT=`realpath ${HERE}/../..` cd ${ROOT} DESTENV=${ROOT}/.venvforinstall if [ -d ${DESTENV} ]; then rm -rf ${DESTENV} fi python -m venv ${DESTENV} source ${DESTENV}/bin/activate pip install --upgrade --quiet pip pip install --quiet -r dev_requirements.txt pip uninstall -y redis # uninstall Redis package installed via redis-entraid invoke devenv --endpoints=all-stack invoke package # find packages PKG=`ls ${ROOT}/dist/*.${SUFFIX}` ls -l ${PKG} TESTDIR=${ROOT}/STAGETESTS if [ -d ${TESTDIR} ]; then rm -rf ${TESTDIR} fi mkdir ${TESTDIR} cp -R ${ROOT}/tests ${TESTDIR}/tests cd ${TESTDIR} # install, run tests pip install ${PKG} # Redis tests pytest -m 'not onlycluster' # RedisCluster tests CLUSTER_URL="redis://localhost:16379/0" CLUSTER_SSL_URL="rediss://localhost:27379/0" pytest -m 'not onlynoncluster and not redismod and not ssl' \ --redis-url="${CLUSTER_URL}" --redis-ssl-url="${CLUSTER_SSL_URL}" redis-py-6.1.0/.github/workflows/integration.yaml000066400000000000000000000147031501061645500220660ustar00rootroot00000000000000name: CI on: push: paths-ignore: - 'docs/**' - '**/*.rst' - '**/*.md' branches: - master - '[0-9].[0-9]' pull_request: branches: - master - '[0-9].[0-9]' schedule: - cron: '0 1 * * *' # nightly build concurrency: group: ${{ github.event.pull_request.number || github.ref }}-integration cancel-in-progress: true permissions: contents: read # to fetch code (actions/checkout) env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} # this speeds up coverage with Python 3.12: https://github.com/nedbat/coveragepy/issues/1665 COVERAGE_CORE: sysmon CURRENT_CLIENT_LIBS_TEST_STACK_IMAGE_TAG: 'rs-7.4.0-v2' CURRENT_REDIS_VERSION: '7.4.2' jobs: dependency-audit: name: Dependency audit runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: pypa/gh-action-pip-audit@v1.0.8 with: inputs: dev_requirements.txt ignore-vulns: | GHSA-w596-4wvx-j9j6 # subversion related git pull, dependency for pytest. There is no impact here. lint: name: Code linters runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: 3.9 cache: 'pip' - name: run code linters run: | pip install -r dev_requirements.txt pip uninstall -y redis # uninstall Redis package installed via redis-entraid invoke linters redis_version: runs-on: ubuntu-latest outputs: CURRENT: ${{ env.CURRENT_REDIS_VERSION }} steps: - name: Compute outputs run: | echo "CURRENT=${{ env.CURRENT_REDIS_VERSION }}" >> $GITHUB_OUTPUT tests: runs-on: ubuntu-latest timeout-minutes: 60 needs: redis_version strategy: max-parallel: 15 fail-fast: false matrix: redis-version: ['8.0.1-pre', '${{ needs.redis_version.outputs.CURRENT }}', '7.2.7', '6.2.17'] python-version: ['3.8', '3.13'] parser-backend: ['plain'] event-loop: ['asyncio'] env: ACTIONS_ALLOW_UNSECURE_COMMANDS: true name: Redis ${{ matrix.redis-version }}; Python ${{ matrix.python-version }}; RESP Parser:${{matrix.parser-backend}}; EL:${{matrix.event-loop}} steps: - uses: actions/checkout@v4 - name: Run tests uses: ./.github/actions/run-tests with: python-version: ${{ matrix.python-version }} parser-backend: ${{ matrix.parser-backend }} redis-version: ${{ matrix.redis-version }} python-compatibility-tests: runs-on: ubuntu-latest needs: [ redis_version, tests ] timeout-minutes: 60 strategy: max-parallel: 15 fail-fast: false matrix: redis-version: [ '${{ needs.redis_version.outputs.CURRENT }}' ] python-version: ['3.9', '3.10', '3.11', '3.12', 'pypy-3.9', 'pypy-3.10'] parser-backend: [ 'plain' ] event-loop: [ 'asyncio' ] env: ACTIONS_ALLOW_UNSECURE_COMMANDS: true name: Redis ${{ matrix.redis-version }}; Python ${{ matrix.python-version }}; RESP Parser:${{matrix.parser-backend}}; EL:${{matrix.event-loop}} steps: - uses: actions/checkout@v4 - name: Run tests uses: ./.github/actions/run-tests with: python-version: ${{ matrix.python-version }} parser-backend: ${{ matrix.parser-backend }} redis-version: ${{ matrix.redis-version }} hiredis-tests: runs-on: ubuntu-latest needs: [redis_version, tests] timeout-minutes: 60 strategy: max-parallel: 15 fail-fast: false matrix: redis-version: [ '${{ needs.redis_version.outputs.CURRENT }}' ] python-version: [ '3.8', '3.13'] parser-backend: [ 'hiredis' ] hiredis-version: [ '>=3.0.0', '<3.0.0' ] event-loop: [ 'asyncio' ] env: ACTIONS_ALLOW_UNSECURE_COMMANDS: true name: Redis ${{ matrix.redis-version }}; Python ${{ matrix.python-version }}; RESP Parser:${{matrix.parser-backend}} (${{ matrix.hiredis-version }}); EL:${{matrix.event-loop}} steps: - uses: actions/checkout@v4 - name: Run tests uses: ./.github/actions/run-tests with: python-version: ${{ matrix.python-version }} parser-backend: ${{ matrix.parser-backend }} redis-version: ${{ matrix.redis-version }} hiredis-version: ${{ matrix.hiredis-version }} uvloop-tests: runs-on: ubuntu-latest needs: [redis_version, tests] timeout-minutes: 60 strategy: max-parallel: 15 fail-fast: false matrix: redis-version: [ '${{ needs.redis_version.outputs.CURRENT }}' ] python-version: [ '3.8', '3.13' ] parser-backend: [ 'plain' ] event-loop: [ 'uvloop' ] env: ACTIONS_ALLOW_UNSECURE_COMMANDS: true name: Redis ${{ matrix.redis-version }}; Python ${{ matrix.python-version }}; RESP Parser:${{matrix.parser-backend}}; EL:${{matrix.event-loop}} steps: - uses: actions/checkout@v4 - name: Run tests uses: ./.github/actions/run-tests with: python-version: ${{ matrix.python-version }} parser-backend: ${{ matrix.parser-backend }} redis-version: ${{ matrix.redis-version }} event-loop: ${{ matrix.event-loop }} build-and-test-package: name: Validate building and installing the package runs-on: ubuntu-latest needs: [tests] strategy: fail-fast: false matrix: extension: ['tar.gz', 'whl'] steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: 3.9 - name: Run installed unit tests env: CLIENT_LIBS_TEST_IMAGE_TAG: ${{ env.CURRENT_REDIS_VERSION }} CLIENT_LIBS_TEST_STACK_IMAGE_TAG: ${{ env.CURRENT_CLIENT_LIBS_TEST_STACK_IMAGE_TAG }} run: | bash .github/workflows/install_and_test.sh ${{ matrix.extension }} install-package-from-commit: name: Install package from commit hash runs-on: ubuntu-latest strategy: fail-fast: false matrix: python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13', 'pypy-3.9', 'pypy-3.10'] steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'pip' - name: install from pip run: | pip install --quiet git+${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git@${GITHUB_SHA} redis-py-6.1.0/.github/workflows/pypi-publish.yaml000066400000000000000000000013331501061645500221630ustar00rootroot00000000000000name: Publish tag to Pypi on: release: types: [published] workflow_dispatch: permissions: contents: read # to fetch code (actions/checkout) jobs: build_and_package: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: install python uses: actions/setup-python@v5 with: python-version: 3.9 - run: pip install build twine - name: Build package run: python -m build . - name: Basic package test prior to upload run: | twine check dist/* - name: Publish to Pypi uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.PYPI_API_TOKEN }} redis-py-6.1.0/.github/workflows/release-drafter.yml000066400000000000000000000013771501061645500224520ustar00rootroot00000000000000name: Release Drafter on: push: # branches to consider in the event; optional, defaults to all branches: - master permissions: {} jobs: update_release_draft: permissions: pull-requests: write # to add label to PR (release-drafter/release-drafter) contents: write # to create a github release (release-drafter/release-drafter) runs-on: ubuntu-latest steps: # Drafts your next Release notes as Pull Requests are merged into "master" - uses: release-drafter/release-drafter@v6 with: # (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml config-name: release-drafter-config.yml env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} redis-py-6.1.0/.github/workflows/spellcheck.yml000066400000000000000000000005251501061645500215140ustar00rootroot00000000000000name: spellcheck on: pull_request: jobs: check-spelling: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Check Spelling uses: rojopolis/spellcheck-github-actions@0.48.0 with: config_path: .github/spellcheck-settings.yml task_name: Markdown redis-py-6.1.0/.github/workflows/stale-issues.yml000066400000000000000000000014311501061645500220150ustar00rootroot00000000000000name: "Close stale issues" on: schedule: - cron: "0 0 * * *" permissions: {} jobs: stale: permissions: issues: write # to close stale issues (actions/stale) pull-requests: write # to close stale PRs (actions/stale) runs-on: ubuntu-latest steps: - uses: actions/stale@v9 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: 'This issue is marked stale. It will be closed in 30 days if it is not updated.' stale-pr-message: 'This pull request is marked stale. It will be closed in 30 days if it is not updated.' days-before-stale: 365 days-before-close: 30 stale-issue-label: "Stale" stale-pr-label: "Stale" operations-per-run: 20 remove-stale-when-updated: true redis-py-6.1.0/.gitignore000066400000000000000000000005261501061645500152500ustar00rootroot00000000000000*.pyc redis.egg-info build/ dist/ dump.rdb _build vagrant/.vagrant .python-version .cache .eggs .idea .vscode .coverage env venv coverage.xml .venv* *.xml .coverage* prof profile_output* docker/stunnel/keys /dockers/*/node-*/* /dockers/*/tls/* /dockers/standalone/ /dockers/cluster/ /dockers/replica/ /dockers/sentinel/ /dockers/redis-stack/ redis-py-6.1.0/.mypy.ini000066400000000000000000000011621501061645500150320ustar00rootroot00000000000000[mypy] #, docs/examples, tests files = redis check_untyped_defs = True follow_imports_for_stubs asyncio.= True #disallow_any_decorated = True disallow_subclassing_any = True #disallow_untyped_calls = True disallow_untyped_decorators = True #disallow_untyped_defs = True implicit_reexport = False no_implicit_optional = True show_error_codes = True strict_equality = True warn_incomplete_stub = True warn_redundant_casts = True warn_unreachable = True warn_unused_ignores = True disallow_any_unimported = True #warn_return_any = True [mypy-redis.asyncio.lock] # TODO: Remove once locks has been rewritten ignore_errors = True redis-py-6.1.0/.readthedocs.yml000066400000000000000000000003061501061645500163420ustar00rootroot00000000000000version: 2 python: install: - requirements: docs/requirements.txt - method: pip path: . build: os: ubuntu-20.04 tools: python: "3.9" sphinx: configuration: docs/conf.py redis-py-6.1.0/CHANGES000066400000000000000000002006211501061645500142510ustar00rootroot00000000000000 * Support transactions in ClusterPipeline * Removing support for RedisGraph module. RedisGraph support is deprecated since Redis Stack 7.2 (https://redis.com/blog/redisgraph-eol/) * Fix lock.extend() typedef to accept float TTL extension * Update URL in the readme linking to Redis University * Move doctests (doc code examples) to main branch * Update `ResponseT` type hint * Allow to control the minimum SSL version * Add an optional lock_name attribute to LockError. * Fix return types for `get`, `set_path` and `strappend` in JSONCommands * Connection.register_connect_callback() is made public. * Fix async `read_response` to use `disable_decoding`. * Add 'aclose()' methods to async classes, deprecate async close(). * Fix #2831, add auto_close_connection_pool=True arg to asyncio.Redis.from_url() * Fix incorrect redis.asyncio.Cluster type hint for `retry_on_error` * Fix dead weakref in sentinel connection causing ReferenceError (#2767) * Fix #2768, Fix KeyError: 'first-entry' in parse_xinfo_stream. * Fix #2749, remove unnecessary __del__ logic to close connections. * Fix #2754, adding a missing argument to SentinelManagedConnection * Fix `xadd` command to accept non-negative `maxlen` including 0 * Revert #2104, #2673, add `disconnect_on_error` option to `read_response()` (issues #2506, #2624) * Add `address_remap` parameter to `RedisCluster` * Fix incorrect usage of once flag in async Sentinel * asyncio: Fix memory leak caused by hiredis (#2693) * Allow data to drain from async PythonParser when reading during a disconnect() * Use asyncio.timeout() instead of async_timeout.timeout() for python >= 3.11 (#2602) * Add a Dependabot configuration to auto-update GitHub action versions. * Add test and fix async HiredisParser when reading during a disconnect() (#2349) * Use hiredis-py pack_command if available. * Support `.unlink()` in ClusterPipeline * Simplify synchronous SocketBuffer state management * Fix string cleanse in Redis Graph * Make PythonParser resumable in case of error (#2510) * Add `timeout=None` in `SentinelConnectionManager.read_response` * Documentation fix: password protected socket connection (#2374) * Allow `timeout=None` in `PubSub.get_message()` to wait forever * add `nowait` flag to `asyncio.Connection.disconnect()` * Update README.md links * Fix timezone handling for datetime to unixtime conversions * Fix start_id type for XAUTOCLAIM * Remove verbose logging from cluster.py * Add retry mechanism to async version of Connection * Compare commands case-insensitively in the asyncio command parser * Allow negative `retries` for `Retry` class to retry forever * Add `items` parameter to `hset` signature * Create codeql-analysis.yml (#1988). Thanks @chayim * Add limited support for Lua scripting with RedisCluster * Implement `.lock()` method on RedisCluster * Fix cursor returned by SCAN for RedisCluster & change default target to PRIMARIES * Fix scan_iter for RedisCluster * Remove verbose logging when initializing ClusterPubSub, ClusterPipeline or RedisCluster * Fix broken connection writer lock-up for asyncio (#2065) * Fix auth bug when provided with no username (#2086) * Fix missing ClusterPipeline._lock (#2189) * Added dynaminc_startup_nodes configuration to RedisCluster * Fix reusing the old nodes' connections when cluster topology refresh is being done * Fix RedisCluster to immediately raise AuthenticationError without a retry * ClusterPipeline Doesn't Handle ConnectionError for Dead Hosts (#2225) * Remove compatibility code for old versions of Hiredis, drop Packaging dependency * The `deprecated` library is no longer a dependency * Failover handling improvements for RedisCluster and Async RedisCluster (#2377) * Fixed "cannot pickle '_thread.lock' object" bug (#2354, #2297) * Added CredentialsProvider class to support password rotation * Enable Lock for asyncio cluster mode * Fix Sentinel.execute_command doesn't execute across the entire sentinel cluster bug (#2458) * Added a replacement for the default cluster node in the event of failure (#2463) * Fix for Unhandled exception related to self.host with unix socket (#2496) * Improve error output for master discovery * Make `ClusterCommandsProtocol` an actual Protocol * Add `sum` to DUPLICATE_POLICY documentation of `TS.CREATE`, `TS.ADD` and `TS.ALTER` * Prevent async ClusterPipeline instances from becoming "false-y" in case of empty command stack (#3061) * Close Unix sockets if the connection attempt fails. This prevents `ResourceWarning`s. (#3314) * Close SSL sockets if the connection attempt fails, or if validations fail. (#3317) * Eliminate mutable default arguments in the `redis.commands.core.Script` class. (#3332) * Fix SSL verification with `ssl_cert_reqs="none"` and `ssl_check_hostname=True` by automatically setting `check_hostname=False` when `verify_mode=ssl.CERT_NONE` (#3635) * Allow newer versions of PyJWT as dependency. (#3630) * 4.1.3 (Feb 8, 2022) * Fix flushdb and flushall (#1926) * Add redis5 and redis4 dockers (#1871) * Change json.clear test multi to be up to date with redisjson (#1922) * Fixing volume for unstable_cluster docker (#1914) * Update changes file with changes since 4.0.0-beta2 (#1915) * 4.1.2 (Jan 27, 2022) * Invalid OCSP certificates should raise ConnectionError on failed validation (#1907) * Added retry mechanism on socket timeouts when connecting to the server (#1895) * LMOVE, BLMOVE return incorrect responses (#1906) * Fixing AttributeError in UnixDomainSocketConnection (#1903) * Fixing TypeError in GraphCommands.explain (#1901) * For tests, increasing wait time for the cluster (#1908) * Increased pubsub's wait_for_messages timeout to prevent flaky tests (#1893) * README code snippets formatted to highlight properly (#1888) * Fix link in the main page (#1897) * Documentation fixes: JSON Example, SSL Connection Examples, RTD version (#1887) * Direct link to readthedocs (#1885) * 4.1.1 (Jan 17, 2022) * Add retries to connections in Sentinel Pools (#1879) * OCSP Stapling Support (#1873) * Define incr/decr as aliases of incrby/decrby (#1874) * FT.CREATE - support MAXTEXTFIELDS, TEMPORARY, NOHL, NOFREQS, SKIPINITIALSCAN (#1847) * Timeseries docs fix (#1877) * get_connection: catch OSError too (#1832) * Set keys var otherwise variable not created (#1853) * Clusters should optionally require full slot coverage (#1845) * Triple quote docstrings in client.py PEP 257 (#1876) * syncing requirements (#1870) * Typo and typing in GraphCommands documentation (#1855) * Allowing poetry and redis-py to install together (#1854) * setup.py: Add project_urls for PyPI (#1867) * Support test with redis unstable docker (#1850) * Connection examples (#1835) * Documentation cleanup (#1841) * 4.1.0 (Dec 26, 2021) * OCSP stapling support (#1820) * Support for SELECT (#1825) * Support for specifying error types with retry (#1817) * Support for RESET command since Redis 6.2.0 (#1824) * Support CLIENT TRACKING (#1612) * Support WRITE in CLIENT PAUSE (#1549) * JSON set_file and set_path support (#1818) * Allow ssl_ca_path with rediss:// urls (#1814) * Support for password-encrypted SSL private keys (#1782) * Support SYNC and PSYNC (#1741) * Retry on error exception and timeout fixes (#1821) * Fixing read race condition during pubsub (#1737) * Fixing exception in listen (#1823) * Fixed MovedError, and stopped iterating through startup nodes when slots are fully covered (#1819) * Socket not closing after server disconnect (#1797) * Single sourcing the package version (#1791) * Ensure redis_connect_func is set on uds connection (#1794) * SRTALGO - Skip for redis versions greater than 7.0.0 (#1831) * Documentation updates (#1822) * Add CI action to install package from repository commit hash (#1781) (#1790) * Fix link in lmove docstring (#1793) * Disabling JSON.DEBUG tests (#1787) * Migrated targeted nodes to kwargs in Cluster Mode (#1762) * Added support for MONITOR in clusters (#1756) * Adding ROLE Command (#1610) * Integrate RedisBloom support (#1683) * Adding RedisGraph support (#1556) * Allow overriding connection class via keyword arguments (#1752) * Aggregation LOAD * support for RediSearch (#1735) * Adding cluster, bloom, and graph docs (#1779) * Add packaging to setup_requires, and use >= to play nice to setup.py (fixes #1625) (#1780) * Fixing the license link in the readme (#1778) * Removing distutils from tests (#1773) * Fix cluster ACL tests (#1774) * Improved RedisCluster's reinitialize_steps and documentation (#1765) * Added black and isort (#1734) * Link Documents for all module commands (#1711) * Pyupgrade + flynt + f-strings (#1759) * Remove unused aggregation subclasses in RediSearch (#1754) * Adding RedisCluster client to support Redis Cluster Mode (#1660) * Support RediSearch FT.PROFILE command (#1727) * Adding support for non-decodable commands (#1731) * COMMAND GETKEYS support (#1738) * RedisJSON 2.0.4 behaviour support (#1747) * Removing deprecating distutils (PEP 632) (#1730) * Updating PR template (#1745) * Removing duplication of Script class (#1751) * Splitting documentation for read the docs (#1743) * Improve code coverage for aggregation tests (#1713) * Fixing COMMAND GETKEYS tests (#1750) * GitHub release improvements (#1684) * 4.0.2 (Nov 22, 2021) * Restoring Sentinel commands to redis client (#1723) * Better removal of hiredis warning (#1726) * Adding links to redis documents in function calls (#1719) * 4.0.1 (Nov 17, 2021) * Removing command on initial connections (#1722) * Removing hiredis warning when not installed (#1721) * 4.0.0 (Nov 15, 2021) * FT.EXPLAINCLI intentionally raising NotImplementedError * Restoring ZRANGE desc for Redis < 6.2.0 (#1697) * Response parsing occasionally fails to parse floats (#1692) * Re-enabling read-the-docs (#1707) * Call HSET after FT.CREATE to avoid keyspace scan (#1706) * Unit tests fixes for compatibility (#1703) * Improve documentation about Locks (#1701) * Fixes to allow --redis-url to pass through all tests (#1700) * Fix unit tests running against Redis 4.0.0 (#1699) * Search alias test fix (#1695) * Adding RediSearch/RedisJSON tests (#1691) * Updating codecov rules (#1689) * Tests to validate custom JSON decoders (#1681) * Added breaking icon to release drafter (#1702) * Removing dependency on six (#1676) * Re-enable pipeline support for JSON and TimeSeries (#1674) * Export Sentinel, and SSL like other classes (#1671) * Restore zrange functionality for older versions of Redis (#1670) * Fixed garbage collection deadlock (#1578) * Tests to validate built python packages (#1678) * Sleep for flaky search test (#1680) * Test function renames, to match standards (#1679) * Docstring improvements for Redis class (#1675) * Fix georadius tests (#1672) * Improvements to JSON coverage (#1666) * Add python_requires setuptools check for python > 3.6 (#1656) * SMISMEMBER support (#1667) * Exposing the module version in loaded_modules (#1648) * RedisTimeSeries support (#1652) * Support for json multipath ($) (#1663) * Added boolean parsing to PEXPIRE and PEXPIREAT (#1665) * Add python_requires setuptools check for python > 3.6 (#1656) * Adding vulture for static analysis (#1655) * Starting to clean the docs (#1657) * Update README.md (#1654) * Adding description format for package (#1651) * Publish to pypi as releases are generated with the release drafter (#1647) * Restore actions to prs (#1653) * Fixing the package to include commands (#1649) * Re-enabling codecov as part of CI process (#1646) * Adding support for redisearch (#1640) Thanks @chayim * redisjson support (#1636) Thanks @chayim * Sentinel: Add SentinelManagedSSLConnection (#1419) Thanks @AbdealiJK * Enable floating parameters in SET (ex and px) (#1635) Thanks @AvitalFineRedis * Add warning when hiredis not installed. Recommend installation. (#1621) Thanks @adiamzn * Raising NotImplementedError for SCRIPT DEBUG and DEBUG SEGFAULT (#1624) Thanks @chayim * CLIENT REDIR command support (#1623) Thanks @chayim * REPLICAOF command implementation (#1622) Thanks @chayim * Add support to NX XX and CH to GEOADD (#1605) Thanks @AvitalFineRedis * Add support to ZRANGE and ZRANGESTORE parameters (#1603) Thanks @AvitalFineRedis * Pre 6.2 redis should default to None for script flush (#1641) Thanks @chayim * Add FULL option to XINFO SUMMARY (#1638) Thanks @agusdmb * Geosearch test should use any=True (#1594) Thanks @Andrew-Chen-Wang * Removing packaging dependency (#1626) Thanks @chayim * Fix client_kill_filter docs for skimpy (#1596) Thanks @Andrew-Chen-Wang * Normalize minid and maxlen docs (#1593) Thanks @Andrew-Chen-Wang * Update docs for multiple usernames for ACL DELUSER (#1595) Thanks @Andrew-Chen-Wang * Fix grammar of get param in set command (#1588) Thanks @Andrew-Chen-Wang * Fix docs for client_kill_filter (#1584) Thanks @Andrew-Chen-Wang * Convert README & CONTRIBUTING from rst to md (#1633) Thanks @davidylee * Test BYLEX param in zrangestore (#1634) Thanks @AvitalFineRedis * Tox integrations with invoke and docker (#1632) Thanks @chayim * Adding the release drafter to help simplify release notes (#1618). Thanks @chayim * BACKWARDS INCOMPATIBLE: Removed support for end of life Python 2.7. #1318 * BACKWARDS INCOMPATIBLE: All values within Redis URLs are unquoted via urllib.parse.unquote. Prior versions of redis-py supported this by specifying the ``decode_components`` flag to the ``from_url`` functions. This is now done by default and cannot be disabled. #589 * POTENTIALLY INCOMPATIBLE: Redis commands were moved into a mixin (see commands.py). Anyone importing ``redis.client`` to access commands directly should import ``redis.commands``. #1534, #1550 * Removed technical debt on REDIS_6_VERSION placeholder. Thanks @chayim #1582. * Various docus fixes. Thanks @Andrew-Chen-Wang #1585, #1586. * Support for LOLWUT command, available since Redis 5.0.0. Thanks @brainix #1568. * Added support for CLIENT REPLY, available in Redis 3.2.0. Thanks @chayim #1581. * Support for Auto-reconnect PubSub on get_message. Thanks @luhn #1574. * Fix RST syntax error in README/ Thanks @JanCBrammer #1451. * IDLETIME and FREQ support for RESTORE. Thanks @chayim #1580. * Supporting args with MODULE LOAD. Thanks @chayim #1579. * Updating RedisLabs with Redis. Thanks @gkorland #1575. * Added support for ASYNC to SCRIPT FLUSH available in Redis 6.2.0. Thanks @chayim. #1567 * Added CLIENT LIST fix to support multiple client ids available in Redis 2.8.12. Thanks @chayim #1563. * Added DISCARD support for pipelines available in Redis 2.0.0. Thanks @chayim #1565. * Added ACL DELUSER support for deleting lists of users available in Redis 6.2.0. Thanks @chayim. #1562 * Added CLIENT TRACKINFO support available in Redis 6.2.0. Thanks @chayim. #1560 * Added GEOSEARCH and GEOSEARCHSTORE support available in Redis 6.2.0. Thanks @AvitalFine Redis. #1526 * Added LPUSHX support for lists available in Redis 4.0.0. Thanks @chayim. #1559 * Added support for QUIT available in Redis 1.0.0. Thanks @chayim. #1558 * Added support for COMMAND COUNT available in Redis 2.8.13. Thanks @chayim. #1554. * Added CREATECONSUMER support for XGROUP available in Redis 6.2.0. Thanks @AvitalFineRedis. #1553 * Including slowly complexity in INFO if available. Thanks @ian28223 #1489. * Added support for STRALGO available in Redis 6.0.0. Thanks @AvitalFineRedis. #1528 * Addes support for ZMSCORE available in Redis 6.2.0. Thanks @2014BDuck and @jiekun.zhu. #1437 * Support MINID and LIMIT on XADD available in Redis 6.2.0. Thanks @AvitalFineRedis. #1548 * Added sentinel commands FLUSHCONFIG, CKQUORUM, FAILOVER, and RESET available in Redis 2.8.12. Thanks @otherpirate. #834 * Migrated Version instead of StrictVersion for Python 3.10. Thanks @tirkarthi. #1552 * Added retry mechanism with backoff. Thanks @nbraun-amazon. #1494 * Migrated commands to a mixin. Thanks @chayim. #1534 * Added support for ZUNION, available in Redis 6.2.0. Thanks @AvitalFineRedis. #1522 * Added support for CLIENT LIST with ID, available in Redis 6.2.0. Thanks @chayim. #1505 * Added support for MINID and LIMIT with xtrim, available in Reds 6.2.0. Thanks @chayim. #1508 * Implemented LMOVE and BLMOVE commands, available in Redis 6.2.0. Thanks @chayim. #1504 * Added GET argument to SET command, available in Redis 6.2.0. Thanks @2014BDuck. #1412 * Documentation fixes. Thanks @enjoy-binbin @jonher937. #1496 #1532 * Added support for XAUTOCLAIM, available in Redis 6.2.0. Thanks @AvitalFineRedis. #1529 * Added IDLE support for XPENDING, available in Redis 6.2.0. Thanks @AvitalFineRedis. #1523 * Add a count parameter to lpop/rpop, available in Redis 6.2.0. Thanks @wavenator. #1487 * Added a (pypy) trove classifier for Python 3.9. Thanks @D3X. #1535 * Added ZINTER support, available in Redis 6.2.0. Thanks @AvitalFineRedis. #1520 * Added ZINTER support, available in Redis 6.2.0. Thanks @AvitalFineRedis. #1520 * Added ZDIFF and ZDIFFSTORE support, available in Redis 6.2.0. Thanks @AvitalFineRedis. #1518 * Added ZRANGESTORE support, available in Redis 6.2.0. Thanks @AvitalFineRedis. #1521 * Added LT and GT support for ZADD, available in Redis 6.2.0. Thanks @chayim. #1509 * Added ZRANDMEMBER support, available in Redis 6.2.0. Thanks @AvitalFineRedis. #1519 * Added GETDEL support, available in Redis 6.2.0. Thanks @AvitalFineRedis. #1514 * Added CLIENT KILL laddr filter, available in Redis 6.2.0. Thanks @chayim. #1506 * Added CLIENT UNPAUSE, available in Redis 6.2.0. Thanks @chayim. #1512 * Added NOMKSTREAM support for XADD, available in Redis 6.2.0. Thanks @chayim. #1507 * Added HRANDFIELD support, available in Redis 6.2.0. Thanks @AvitalFineRedis. #1513 * Added CLIENT INFO support, available in Redis 6.2.0. Thanks @AvitalFineRedis. #1517 * Added GETEX support, available in Redis 6.2.0. Thanks @AvitalFineRedis. #1515 * Added support for COPY command, available in Redis 6.2.0. Thanks @malinaa96. #1492 * Provide a development and testing environment via docker. Thanks @abrookins. #1365 * Added support for the LPOS command available in Redis 6.0.6. Thanks @aparcar #1353/#1354 * Added support for the ACL LOG command available in Redis 6. Thanks @2014BDuck. #1307 * Added support for ABSTTL option of the RESTORE command available in Redis 5.0. Thanks @charettes. #1423 * 3.5.3 (June 1, 2020) * Restore try/except clauses to __del__ methods. These will be removed in 4.0 when more explicit resource management if enforced. #1339 * Update the master_address when Sentinels promote a new master. #847 * Update SentinelConnectionPool to not forcefully disconnect other in-use connections which can negatively affect threaded applications. #1345 * 3.5.2 (May 14, 2020) * Tune the locking in ConnectionPool.get_connection so that the lock is not held while waiting for the socket to establish and validate the TCP connection. * 3.5.1 (May 9, 2020) * Fix for HSET argument validation to allow any non-None key. Thanks @AleksMat, #1337, #1341 * 3.5.0 (April 29, 2020) * Removed exception trapping from __del__ methods. redis-py objects that hold various resources implement __del__ cleanup methods to release those resources when the object goes out of scope. This provides a fallback for when these objects aren't explicitly closed by user code. Prior to this change any errors encountered in closing these resources would be hidden from the user. Thanks @jdufresne. #1281 * Expanded support for connection strings specifying a username connecting to pre-v6 servers. #1274 * Optimized Lock's blocking_timeout and sleep. If the lock cannot be acquired and the sleep value would cause the loop to sleep beyond blocking_timeout, fail immediately. Thanks @clslgrnc. #1263 * Added support for passing Python memoryviews to Redis command args that expect strings or bytes. The memoryview instance is sent directly to the socket such that there are zero copies made of the underlying data during command packing. Thanks @Cody-G. #1265, #1285 * HSET command now can accept multiple pairs. HMSET has been marked as deprecated now. Thanks to @laixintao #1271 * Don't manually DISCARD when encountering an ExecAbortError. Thanks @nickgaya, #1300/#1301 * Reset the watched state of pipelines after calling exec. This saves a roundtrip to the server by not having to call UNWATCH within Pipeline.reset(). Thanks @nickgaya, #1299/#1302 * Added the KEEPTTL option for the SET command. Thanks @laixintao #1304/#1280 * Added the MEMORY STATS command. #1268 * Lock.extend() now has a new option, `replace_ttl`. When False (the default), Lock.extend() adds the `additional_time` to the lock's existing TTL. When replace_ttl=True, the lock's existing TTL is replaced with the value of `additional_time`. * Add testing and support for PyPy. * 3.4.1 * Move the username argument in the Redis and Connection classes to the end of the argument list. This helps those poor souls that specify all their connection options as non-keyword arguments. #1276 * Prior to ACL support, redis-py ignored the username component of Connection URLs. With ACL support, usernames are no longer ignored and are used to authenticate against an ACL rule. Some cloud vendors with managed Redis instances (like Heroku) provide connection URLs with a username component pre-ACL that is not intended to be used. Sending that username to Redis servers < 6.0.0 results in an error. Attempt to detect this condition and retry the AUTH command with only the password such that authentication continues to work for these users. #1274 * Removed the __eq__ hooks to Redis and ConnectionPool that were added in 3.4.0. This ended up being a bad idea as two separate connection pools be considered equal yet manage a completely separate set of connections. * 3.4.0 * Allow empty pipelines to be executed if there are WATCHed keys. This is a convenient way to test if any of the watched keys changed without actually running any other commands. Thanks @brianmaissy. #1233, #1234 * Removed support for end of life Python 3.4. * Added support for all ACL commands in Redis 6. Thanks @IAmATeaPot418 for helping. * Pipeline instances now always evaluate to True. Prior to this change, pipeline instances relied on __len__ for boolean evaluation which meant that pipelines with no commands on the stack would be considered False. #994 * Client instances and Connection pools now support a 'client_name' argument. If supplied, all connections created will call CLIENT SETNAME as soon as the connection is opened. Thanks to @Habbie for supplying the basis of this change. #802 * Added the 'ssl_check_hostname' argument to specify whether SSL connections should require the server hostname to match the hostname specified in the SSL cert. By default 'ssl_check_hostname' is False for backwards compatibility. #1196 * Slightly optimized command packing. Thanks @Deneby67. #1255 * Added support for the TYPE argument to SCAN. Thanks @netocp. #1220 * Better thread and fork safety in ConnectionPool and BlockingConnectionPool. Added better locking to synchronize critical sections rather than relying on CPython-specific implementation details relating to atomic operations. Adjusted how the pools identify and deal with a fork. Added a ChildDeadlockedError exception that is raised by child processes in the very unlikely chance that a deadlock is encountered. Thanks @gmbnomis, @mdellweg, @yht804421715. #1270, #1138, #1178, #906, #1262 * Added __eq__ hooks to the Redis and ConnectionPool classes. Thanks @brainix. #1240 * 3.3.11 * Further fix for the SSLError -> TimeoutError mapping to work on obscure releases of Python 2.7. * 3.3.10 * Fixed a potential error handling bug for the SSLError -> TimeoutError mapping introduced in 3.3.9. Thanks @zbristow. #1224 * 3.3.9 * Mapped Python 2.7 SSLError to TimeoutError where appropriate. Timeouts should now consistently raise TimeoutErrors on Python 2.7 for both unsecured and secured connections. Thanks @zbristow. #1222 * 3.3.8 * Fixed MONITOR parsing to properly parse IPv6 client addresses, unix socket connections and commands issued from Lua. Thanks @kukey. #1201 * 3.3.7 * Fixed a regression introduced in 3.3.0 where socket.error exceptions (or subclasses) could potentially be raised instead of redis.exceptions.ConnectionError. #1202 * 3.3.6 * Fixed a regression in 3.3.5 that caused PubSub.get_message() to raise a socket.timeout exception when passing a timeout value. #1200 * 3.3.5 * Fix an issue where socket.timeout errors could be handled by the wrong exception handler in Python 2.7. * 3.3.4 * More specifically identify nonblocking read errors for both SSL and non-SSL connections. 3.3.1, 3.3.2 and 3.3.3 on Python 2.7 could potentially mask a ConnectionError. #1197 * 3.3.3 * The SSL module in Python < 2.7.9 handles non-blocking sockets differently than 2.7.9+. This patch accommodates older versions. #1197 * 3.3.2 * Further fixed a regression introduced in 3.3.0 involving SSL and non-blocking sockets. #1197 * 3.3.1 * Fixed a regression introduced in 3.3.0 involving SSL and non-blocking sockets. #1197 * 3.3.0 * Resolve a race condition with the PubSubWorkerThread. #1150 * Cleanup socket read error messages. Thanks Vic Yu. #1159 * Cleanup the Connection's selector correctly. Thanks Bruce Merry. #1153 * Added a Monitor object to make working with MONITOR output easy. Thanks Roey Prat #1033 * Internal cleanup: Removed the legacy Token class which was necessary with older version of Python that are no longer supported. #1066 * Response callbacks are now case insensitive. This allows users that call Redis.execute_command() directly to pass lower-case command names and still get reasonable responses. #1168 * Added support for hiredis-py 1.0.0 encoding error support. This should make the PythonParser and the HiredisParser behave identically when encountering encoding errors. Thanks Brian Candler. #1161/#1162 * All authentication errors now properly raise AuthenticationError. AuthenticationError is now a subclass of ConnectionError, which will cause the connection to be disconnected and cleaned up appropriately. #923 * Add READONLY and READWRITE commands. Thanks @theodesp. #1114 * Remove selectors in favor of nonblocking sockets. Selectors had issues in some environments including eventlet and gevent. This should resolve those issues with no other side effects. * Fixed an issue with XCLAIM and previously claimed but not removed messages. Thanks @thomdask. #1192/#1191 * Allow for single connection client instances. These instances are not thread safe but offer other benefits including a subtle performance increase. * Added extensive health checks that keep the connections lively. Passing the "health_check_interval=N" option to the Redis client class or to a ConnectionPool ensures that a round trip PING/PONG is successful before any command if the underlying connection has been idle for more than N seconds. ConnectionErrors and TimeoutErrors are automatically retried once for health checks. * Changed the PubSubWorkerThread to use a threading.Event object rather than a boolean to control the thread's life cycle. Thanks Timothy Rule. #1194/#1195. * Fixed a bug in Pipeline error handling that would incorrectly retry ConnectionErrors. * 3.2.1 * Fix SentinelConnectionPool to work in multiprocess/forked environments. * 3.2.0 * Added support for `select.poll` to test whether data can be read on a socket. This should allow for significantly more connections to be used with pubsub. Fixes #486/#1115 * Attempt to guarantee that the ConnectionPool hands out healthy connections. Healthy connections are those that have an established socket connection to the Redis server, are ready to accept a command and have no data available to read. Fixes #1127/#886 * Use the socket.IPPROTO_TCP constant instead of socket.SOL_TCP. IPPROTO_TCP is available on more interpreters (Jython for instance). Thanks @Junnplus. #1130 * Fixed a regression introduced in 3.0 that mishandles exceptions not derived from the base Exception class. KeyboardInterrupt and gevent.timeout notable. Thanks Christian Fersch. #1128/#1129 * Significant improvements to handing connections with forked processes. Parent and child processes no longer trample on each others' connections. Thanks to Jay Rolette for the patch and highlighting this issue. #504/#732/#784/#863 * PythonParser no longer closes the associated connection's socket. The connection itself will close the socket. #1108/#1085 * 3.1.0 * Connection URLs must have one of the following schemes: redis://, rediss://, unix://. Thanks @jdupl123. #961/#969 * Fixed an issue with retry_on_timeout logic that caused some TimeoutErrors to be retried. Thanks Aaron Yang. #1022/#1023 * Added support for SNI for SSL. Thanks @oridistor and Roey Prat. #1087 * Fixed ConnectionPool repr for pools with no connections. Thanks Cody Scott. #1043/#995 * Fixed GEOHASH to return a None value when specifying a place that doesn't exist on the server. Thanks @guybe7. #1126 * Fixed XREADGROUP to return an empty dictionary for messages that have been deleted but still exist in the unacknowledged queue. Thanks @xeizmendi. #1116 * Added an owned method to Lock objects. owned returns a boolean indicating whether the current lock instance still owns the lock. Thanks Dave Johansen. #1112 * Allow lock.acquire() to accept an optional token argument. If provided, the token argument is used as the unique value used to claim the lock. Thankd Dave Johansen. #1112 * Added a reacquire method to Lock objects. reacquire attempts to renew the lock such that the timeout is extended to the same value that the lock was initially acquired with. Thanks Ihor Kalnytskyi. #1014 * Stream names found within XREAD and XREADGROUP responses now properly respect the decode_responses flag. * XPENDING_RANGE now requires the user the specify the min, max and count arguments. Newer versions of Redis prevent count from being infinite so it's left to the user to specify these values explicitly. * ZADD now returns None when xx=True and incr=True and an element is specified that doesn't exist in the sorted set. This matches what the server returns in this case. #1084 * Added client_kill_filter that accepts various filters to identify and kill clients. Thanks Theofanis Despoudis. #1098 * Fixed a race condition that occurred when unsubscribing and resubscribing to the same channel or pattern in rapid succession. Thanks Marcin Raczyński. #764 * Added a LockNotOwnedError that is raised when trying to extend or release a lock that is no longer owned. This is a subclass of LockError so previous code should continue to work as expected. Thanks Joshua Harlow. #1095 * Fixed a bug in GEORADIUS that forced decoding of places without respecting the decode_responses option. Thanks Bo Bayles. #1082 * 3.0.1 * Fixed regression with UnixDomainSocketConnection caused by 3.0.0. Thanks Jyrki Muukkonen * Fixed an issue with the new asynchronous flag on flushdb and flushall. Thanks rogeryen * Updated Lock.locked() method to indicate whether *any* process has acquired the lock, not just the current one. This is in line with the behavior of threading.Lock. Thanks Alan Justino da Silva * 3.0.0 BACKWARDS INCOMPATIBLE CHANGES * When using a Lock as a context manager and the lock fails to be acquired a LockError is now raised. This prevents the code block inside the context manager from being executed if the lock could not be acquired. * Renamed LuaLock to Lock. * Removed the pipeline based Lock implementation in favor of the LuaLock implementation. * Only bytes, strings and numbers (ints, longs and floats) are acceptable for keys and values. Previously redis-py attempted to cast other types to str() and store the result. This caused must confusion and frustration when passing boolean values (cast to 'True' and 'False') or None values (cast to 'None'). It is now the user's responsibility to cast all key names and values to bytes, strings or numbers before passing the value to redis-py. * The StrictRedis class has been renamed to Redis. StrictRedis will continue to exist as an alias of Redis for the foreseeable future. * The legacy Redis client class has been removed. It caused much confusion to users. * ZINCRBY arguments 'value' and 'amount' have swapped order to match the the Redis server. The new argument order is: keyname, amount, value. * MGET no longer raises an error if zero keys are passed in. Instead an empty list is returned. * MSET and MSETNX now require all keys/values to be specified in a single dictionary argument named mapping. This was changed to allow for future options to these commands in the future. * ZADD now requires all element names/scores be specified in a single dictionary argument named mapping. This was required to allow the NX, XX, CH and INCR options to be specified. * ssl_cert_reqs now has a default value of 'required' by default. This should make connecting to a remote Redis server over SSL more secure. Thanks u2mejc * Removed support for EOL Python 2.6 and 3.3. Thanks jdufresne OTHER CHANGES * Added missing DECRBY command. Thanks derek-dchu * CLUSTER INFO and CLUSTER NODES responses are now properly decoded to strings. * Added a 'locked()' method to Lock objects. This method returns True if the lock has been acquired and owned by the current process, otherwise False. * EXISTS now supports multiple keys. It's return value is now the number of keys in the list that exist. * Ensure all commands can accept key names as bytes. This fixes issues with BLPOP, BRPOP and SORT. * All errors resulting from bad user input are raised as DataError exceptions. DataError is a subclass of RedisError so this should be transparent to anyone previously catching these. * Added support for NX, XX, CH and INCR options to ZADD * Added support for the MIGRATE command * Added support for the MEMORY USAGE and MEMORY PURGE commands. Thanks Itamar Haber * Added support for the 'asynchronous' argument to FLUSHDB and FLUSHALL commands. Thanks Itamar Haber * Added support for the BITFIELD command. Thanks Charles Leifer and Itamar Haber * Improved performance on pipeline requests with large chunks of data. Thanks tzickel * Fixed test suite to not fail if another client is connected to the server the tests are running against. * Added support for SWAPDB. Thanks Itamar Haber * Added support for all STREAM commands. Thanks Roey Prat and Itamar Haber * SHUTDOWN now accepts the 'save' and 'nosave' arguments. Thanks dwilliams-kenzan * Added support for ZPOPMAX, ZPOPMIN, BZPOPMAX, BZPOPMIN. Thanks Itamar Haber * Added support for the 'type' argument in CLIENT LIST. Thanks Roey Prat * Added support for CLIENT PAUSE. Thanks Roey Prat * Added support for CLIENT ID and CLIENT UNBLOCK. Thanks Itamar Haber * GEODIST now returns a None value when referencing a place that does not exist. Thanks qingping209 * Added a ping() method to pubsub objects. Thanks krishan-carbon * Fixed a bug with keys in the INFO dict that contained ':' symbols. Thanks mzalimeni * Fixed the select system call retry compatibility with Python 2.x. Thanks lddubeau * max_connections is now a valid querystring argument for creating connection pools from URLs. Thanks mmaslowskicc * Added the UNLINK command. Thanks yozel * Added socket_type option to Connection for configurability. Thanks garlicnation * Lock.do_acquire now atomically sets acquires the lock and sets the expire value via set(nx=True, px=timeout). Thanks 23doors * Added 'count' argument to SPOP. Thanks AlirezaSadeghi * Fixed an issue parsing client_list responses that contained an '='. Thanks swilly22 * 2.10.6 * Various performance improvements. Thanks cjsimpson * Fixed a bug with SRANDMEMBER where the behavior for `number=0` did not match the spec. Thanks Alex Wang * Added HSTRLEN command. Thanks Alexander Putilin * Added the TOUCH command. Thanks Anis Jonischkeit * Remove unnecessary calls to the server when registering Lua scripts. Thanks Ben Greenberg * SET's EX and PX arguments now allow values of zero. Thanks huangqiyin * Added PUBSUB {CHANNELS, NUMPAT, NUMSUB} commands. Thanks Angus Pearson * PubSub connections that encounter `InterruptedError`s now retry automatically. Thanks Carlton Gibson and Seth M. Larson * LPUSH and RPUSH commands run on PyPy now correctly returns the number of items of the list. Thanks Jeong YunWon * Added support to automatically retry socket EINTR errors. Thanks Thomas Steinacher * PubSubWorker threads started with `run_in_thread` are now daemonized so the thread shuts down when the running process goes away. Thanks Keith Ainsworth * Added support for GEO commands. Thanks Pau Freixes, Alex DeBrie and Abraham Toriz * Made client construction from URLs smarter. Thanks Tim Savage * Added support for CLUSTER * commands. Thanks Andy Huang * The RESTORE command now accepts an optional `replace` boolean. Thanks Yoshinari Takaoka * Attempt to connect to a new Sentinel if a TimeoutError occurs. Thanks Bo Lopker * Fixed a bug in the client's `__getitem__` where a KeyError would be raised if the value returned by the server is an empty string. Thanks Javier Candeira. * Socket timeouts when connecting to a server are now properly raised as TimeoutErrors. * 2.10.5 * Allow URL encoded parameters in Redis URLs. Characters like a "/" can now be URL encoded and redis-py will correctly decode them. Thanks Paul Keene. * Added support for the WAIT command. Thanks * Better shutdown support for the PubSub Worker Thread. It now properly cleans up the connection, unsubscribes from any channels and patterns previously subscribed to and consumes any waiting messages on the socket. * Added the ability to sleep for a brief period in the event of a WatchError occurring. Thanks Joshua Harlow. * Fixed a bug with pipeline error reporting when dealing with characters in error messages that could not be encoded to the connection's character set. Thanks Hendrik Muhs. * Fixed a bug in Sentinel connections that would inadvertently connect to the master when the connection pool resets. Thanks * Better timeout support in Pubsub get_message. Thanks Andy Isaacson. * Fixed a bug with the HiredisParser that would cause the parser to get stuck in an endless loop if a specific number of bytes were delivered from the socket. This fix also increases performance of parsing large responses from the Redis server. * Added support for ZREVRANGEBYLEX. * ConnectionErrors are now raised if Redis refuses a connection due to the maxclients limit being exceeded. Thanks Roman Karpovich. * max_connections can now be set when instantiating client instances. Thanks Ohad Perry. * 2.10.4 (skipped due to a PyPI snafu) * 2.10.3 * Fixed a bug with the bytearray support introduced in 2.10.2. Thanks Josh Owen. * 2.10.2 * Added support for Hiredis's new bytearray support. Thanks * POSSIBLE BACKWARDS INCOMPATIBLE CHANGE: Fixed a possible race condition when multiple threads share the same Lock instance with a timeout. Lock tokens are now stored in thread local storage by default. If you have code that acquires a lock in one thread and passes that lock instance to another thread to release it, you need to disable thread local storage. Refer to the doc strings on the Lock class about the thread_local argument information. * Fixed a regression in from_url where "charset" and "errors" weren't valid options. "encoding" and "encoding_errors" are still accepted and preferred. * The "charset" and "errors" options have been deprecated. Passing either to StrictRedis.__init__ or from_url will still work but will also emit a DeprecationWarning. Instead use the "encoding" and "encoding_errors" options. * Fixed a compatibility bug with Python 3 when the server closes a connection. * Added BITPOS command. Thanks . * Fixed a bug when attempting to send large values to Redis in a Pipeline. * 2.10.1 * Fixed a bug where Sentinel connections to a server that's no longer a master and receives a READONLY error will disconnect and reconnect to the master. * 2.10.0 * Discontinued support for Python 2.5. Upgrade. You'll be happier. * The HiRedis parser will now properly raise ConnectionErrors. * Completely refactored PubSub support. Fixes all known PubSub bugs and adds a bunch of new features. Docs can be found in the README under the new "Publish / Subscribe" section. * Added the new HyperLogLog commands (PFADD, PFCOUNT, PFMERGE). Thanks Pepijn de Vos and Vincent Ohprecio. * Updated TTL and PTTL commands with Redis 2.8+ semantics. Thanks Markus Kaiserswerth. * *SCAN commands now return a long (int on Python3) cursor value rather than the string representation. This might be slightly backwards incompatible in code using*SCAN commands loops such as "while cursor != '0':". * Added extra *SCAN commands that return iterators instead of the normal [cursor, data] type. Use scan_iter, hscan_iter, sscan_iter, and zscan_iter for iterators. Thanks Mathieu Longtin. * Added support for SLOWLOG commands. Thanks Rick van Hattem. * Added lexicographical commands ZRANGEBYLEX, ZREMRANGEBYLEX, and ZLEXCOUNT for sorted sets. * Connection objects now support an optional argument, socket_read_size, indicating how much data to read during each socket.recv() call. After benchmarking, increased the default size to 64k, which dramatically improves performance when fetching large values, such as many results in a pipeline or a large (>1MB) string value. * Improved the pack_command and send_packed_command functions to increase performance when sending large (>1MB) values. * Sentinel Connections to master servers now detect when a READONLY error is encountered and disconnect themselves and all other active connections to the same master so that the new master can be discovered. * Fixed Sentinel state parsing on Python 3. * Added support for SENTINEL MONITOR, SENTINEL REMOVE, and SENTINEL SET commands. Thanks Greg Murphy. * INFO output that doesn't follow the "key:value" format will now be appended to a key named "__raw__" in the INFO dictionary. Thanks Pedro Larroy. * The "vagrant" directory contains a complete vagrant environment for redis-py developers. The environment runs a Redis master, a Redis slave, and 3 Sentinels. Future iterations of the test suite will incorporate more integration style tests, ensuring things like failover happen correctly. * It's now possible to create connection pool instances from a URL. StrictRedis.from_url() now uses this feature to create a connection pool instance and use that when creating a new client instance. Thanks * When creating client instances or connection pool instances from an URL, it's now possible to pass additional options to the connection pool with querystring arguments. * Fixed a bug where some encodings (like utf-16) were unusable on Python 3 as command names and literals would get encoded. * Added an SSLConnection class that allows for secure connections through stunnel or other means. Construct an SSL connection with the ssl=True option on client classes, using the rediss:// scheme from an URL, or by passing the SSLConnection class to a connection pool's connection_class argument. Thanks . * Added a socket_connect_timeout option to control how long to wait while establishing a TCP connection before timing out. This lets the client fail fast when attempting to connect to a downed server while keeping a more lenient timeout for all other socket operations. * Added TCP Keep-alive support by passing use the socket_keepalive=True option. Finer grain control can be achieved using the socket_keepalive_options option which expects a dictionary with any of the keys (socket.TCP_KEEPIDLE, socket.TCP_KEEPCNT, socket.TCP_KEEPINTVL) and integers for values. Thanks Yossi Gottlieb. * Added a `retry_on_timeout` option that controls how socket.timeout errors are handled. By default it is set to False and will cause the client to raise a TimeoutError anytime a socket.timeout is encountered. If `retry_on_timeout` is set to True, the client will retry a command that timed out once like other `socket.error`s. * Completely refactored the Lock system. There is now a LuaLock class that's used when the Redis server is capable of running Lua scripts along with a fallback class for Redis servers < 2.6. The new locks fix several subtle race consider that the old lock could face. In additional, a new method, "extend" is available on lock instances that all a lock owner to extend the amount of time they have the lock for. Thanks to Eli Finkelshteyn and for contributions. * 2.9.1 * IPv6 support. Thanks * 2.9.0 * Performance improvement for packing commands when using the PythonParser. Thanks Guillaume Viot. * Executing an empty pipeline transaction no longer sends MULTI/EXEC to the server. Thanks EliFinkelshteyn. * Errors when authenticating (incorrect password) and selecting a database now close the socket. * Full Sentinel support thanks to Vitja Makarov. Thanks! * Better repr support for client and connection pool instances. Thanks Mark Roberts. * Error messages that the server sends to the client are now included in the client error message. Thanks Sangjin Lim. * Added the SCAN, SSCAN, HSCAN, and ZSCAN commands. Thanks Jingchao Hu. * ResponseErrors generated by pipeline execution provide addition context including the position of the command in the pipeline and the actual command text generated the error. * ConnectionPools now play nicer in threaded environments that fork. Thanks Christian Joergensen. * 2.8.0 * redis-py should play better with gevent when a gevent Timeout is raised. Thanks leifkb. * Added SENTINEL command. Thanks Anna Janackova. * Fixed a bug where pipelines could potentially corrupt a connection if the MULTI command generated a ResponseError. Thanks EliFinkelshteyn for the report. * Connections now call socket.shutdown() prior to socket.close() to ensure communication ends immediately per the note at Thanks to David Martin for pointing this out. * Lock checks are now based on floats rather than ints. Thanks Vitja Makarov. * 2.7.6 * Added CONFIG RESETSTAT command. Thanks Yossi Gottlieb. * Fixed a bug introduced in 2.7.3 that caused issues with script objects and pipelines. Thanks Carpentier Pierre-Francois. * Converted redis-py's test suite to use the awesome py.test library. * Fixed a bug introduced in 2.7.5 that prevented a ConnectionError from being raised when the Redis server is LOADING data. * Added a BusyLoadingError exception that's raised when the Redis server is starting up and not accepting commands yet. BusyLoadingError subclasses ConnectionError, which this state previously returned. Thanks Yossi Gottlieb. * 2.7.5 * DEL, HDEL and ZREM commands now return the numbers of keys deleted instead of just True/False. * from_url now supports URIs with a port number. Thanks Aaron Westendorf. * 2.7.4 * Added missing INCRBY method. Thanks Krzysztof Dorosz. * SET now accepts the EX, PX, NX and XX options from Redis 2.6.12. These options will generate errors if these options are used when connected to a Redis server < 2.6.12. Thanks George Yoshida. * 2.7.3 * Fixed a bug with BRPOPLPUSH and lists with empty strings. * All empty except: clauses have been replaced to only catch Exception subclasses. This prevents a KeyboardInterrupt from triggering exception handlers. Thanks Lucian Branescu Mihaila. * All exceptions that are the result of redis server errors now share a command Exception subclass, ServerError. Thanks Matt Robenolt. * Prevent DISCARD from being called if MULTI wasn't also called. Thanks Pete Aykroyd. * SREM now returns an integer indicating the number of items removed from the set. Thanks . * Fixed a bug with BGSAVE and BGREWRITEAOF response callbacks with Python3. Thanks Nathan Wan. * Added CLIENT GETNAME and CLIENT SETNAME commands. Thanks . * It's now possible to use len() on a pipeline instance to determine the number of commands that will be executed. Thanks Jon Parise. * Fixed a bug in INFO's parse routine with floating point numbers. Thanks Ali Onur Uyar. * Fixed a bug with BITCOUNT to allow `start` and `end` to both be zero. Thanks Tim Bart. * The transaction() method now accepts a boolean keyword argument, value_from_callable. By default, or if False is passes, the transaction() method will return the value of the pipelines execution. Otherwise, it will return whatever func() returns. * Python3 compatibility fix ensuring we're not already bytes(). Thanks Salimane Adjao Moustapha. * Added PSETEX. Thanks YAMAMOTO Takashi. * Added a BlockingConnectionPool to limit the number of connections that can be created. Thanks James Arthur. * SORT now accepts a `groups` option that if specified, will return tuples of n-length, where n is the number of keys specified in the GET argument. This allows for convenient row-based iteration. Thanks Ionuț Arțăriși. * 2.7.2 * Parse errors are now *always* raised on multi/exec pipelines, regardless of the `raise_on_error` flag. See for more info. * 2.7.1 * Packaged tests with source code * 2.7.0 * Added BITOP and BITCOUNT commands. Thanks Mark Tozzi. * Added the TIME command. Thanks Jason Knight. * Added support for LUA scripting. Thanks to Angus Peart, Drew Smathers, Issac Kelly, Louis-Philippe Perron, Sean Bleier, Jeffrey Kaditz, and Dvir Volk for various patches and contributions to this feature. * Changed the default error handling in pipelines. By default, the first error in a pipeline will now be raised. A new parameter to the pipeline's execute, `raise_on_error`, can be set to False to keep the old behavior of embeedding the exception instances in the result. * Fixed a bug with pipelines where parse errors won't corrupt the socket. * Added the optional `number` argument to SRANDMEMBER for use with Redis 2.6+ servers. * Added PEXPIRE/PEXPIREAT/PTTL commands. Thanks Luper Rouch. * Added INCRBYFLOAT/HINCRBYFLOAT commands. Thanks Nikita Uvarov. * High precision floating point values won't lose their precision when being sent to the Redis server. Thanks Jason Oster and Oleg Pudeyev. * Added CLIENT LIST/CLIENT KILL commands * 2.6.2 * `from_url` is now available as a classmethod on client classes. Thanks Jon Parise for the patch. * Fixed several encoding errors resulting from the Python 3.x support. * 2.6.1 * Python 3.x support! Big thanks to Alex Grönholm. * Fixed a bug in the PythonParser's read_response that could hide an error from the client (#251). * 2.6.0 * Changed (p)subscribe and (p)unsubscribe to no longer return messages indicating the channel was subscribed/unsubscribed to. These messages are available in the listen() loop instead. This is to prevent the following scenario: * Client A is subscribed to "foo" * Client B publishes message to "foo" * Client A subscribes to channel "bar" at the same time. Prior to this change, the subscribe() call would return the published messages on "foo" rather than the subscription confirmation to "bar". * Added support for GETRANGE, thanks Jean-Philippe Caruana * A new setting "decode_responses" specifies whether return values from Redis commands get decoded automatically using the client's charset value. Thanks to Frankie Dintino for the patch. * 2.4.13 * redis.from_url() can take an URL representing a Redis connection string and return a client object. Thanks Kenneth Reitz for the patch. * 2.4.12 * ConnectionPool is now fork-safe. Thanks Josiah Carson for the patch. * 2.4.11 * AuthenticationError will now be correctly raised if an invalid password is supplied. * If Hiredis is unavailable, the HiredisParser will raise a RedisError if selected manually. * Made the INFO command more tolerant of Redis changes formatting. Fix for #217. * 2.4.10 * Buffer reads from socket in the PythonParser. Fix for a Windows-specific bug (#205). * Added the OBJECT and DEBUG OBJECT commands. * Added __del__ methods for classes that hold on to resources that need to be cleaned up. This should prevent resource leakage when these objects leave scope due to misuse or unhandled exceptions. Thanks David Wolever for the suggestion. * Added the ECHO command for completeness. * Fixed a bug where attempting to subscribe to a PubSub channel of a Redis server that's down would blow out the stack. Fixes #179 and #195. Thanks Ovidiu Predescu for the test case. * StrictRedis's TTL command now returns a -1 when querying a key with no expiration. The Redis class continues to return None. * ZADD and SADD now return integer values indicating the number of items added. Thanks Homer Strong. * Renamed the base client class to StrictRedis, replacing ZADD and LREM in favor of their official argument order. The Redis class is now a subclass of StrictRedis, implementing the legacy redis-py implementations of ZADD and LREM. Docs have been updated to suggesting the use of StrictRedis. * SETEX in StrictRedis is now compliant with official Redis SETEX command. the name, value, time implementation moved to "Redis" for backwards compatibility. * 2.4.9 * Removed socket retry logic in Connection. This is the responsibility of the caller to determine if the command is safe and can be retried. Thanks David Wolver. * Added some extra guards around various types of exceptions being raised when sending or parsing data. Thanks David Wolver and Denis Bilenko. * 2.4.8 * Imported with_statement from __future__ for Python 2.5 compatibility. * 2.4.7 * Fixed a bug where some connections were not getting released back to the connection pool after pipeline execution. * Pipelines can now be used as context managers. This is the preferred way of use to ensure that connections get cleaned up properly. Thanks David Wolever. * Added a convenience method called transaction() on the base Redis class. This method eliminates much of the boilerplate used when using pipelines to watch Redis keys. See the documentation for details on usage. * 2.4.6 * Variadic arguments for SADD, SREM, ZREN, HDEL, LPUSH, and RPUSH. Thanks Raphaël Vinot. * (CRITICAL) Fixed an error in the Hiredis parser that occasionally caused the socket connection to become corrupted and unusable. This became noticeable once connection pools started to be used. * ZRANGE, ZREVRANGE, ZRANGEBYSCORE, and ZREVRANGEBYSCORE now take an additional optional argument, score_cast_func, which is a callable used to cast the score value in the return type. The default is float. * Removed the PUBLISH method from the PubSub class. Connections that are [P]SUBSCRIBEd cannot issue PUBLISH commands, so it doesn't make sense to have it here. * Pipelines now contain WATCH and UNWATCH. Calling WATCH or UNWATCH from the base client class will result in a deprecation warning. After WATCHing one or more keys, the pipeline will be placed in immediate execution mode until UNWATCH or MULTI are called. Refer to the new pipeline docs in the README for more information. Thanks to David Wolever and Randall Leeds for greatly helping with this. * 2.4.5 * The PythonParser now works better when reading zero length strings. * 2.4.4 * Fixed a typo introduced in 2.4.3 * 2.4.3 * Fixed a bug in the UnixDomainSocketConnection caused when trying to form an error message after a socket error. * 2.4.2 * Fixed a bug in pipeline that caused an exception while trying to reconnect after a connection timeout. * 2.4.1 * Fixed a bug in the PythonParser if disconnect is called before connect. * 2.4.0 * WARNING: 2.4 contains several backwards incompatible changes. * Completely refactored Connection objects. Moved much of the Redis protocol packing for requests here, and eliminated the nasty dependencies it had on the client to do AUTH and SELECT commands on connect. * Connection objects now have a parser attribute. Parsers are responsible for reading data Redis sends. Two parsers ship with redis-py: a PythonParser and the HiRedis parser. redis-py will automatically use the HiRedis parser if you have the Python hiredis module installed, otherwise it will fall back to the PythonParser. You can force or the other, or even an external one by passing the `parser_class` argument to ConnectionPool. * Added a UnixDomainSocketConnection for users wanting to talk to the Redis instance running on a local machine only. You can use this connection by passing it to the `connection_class` argument of the ConnectionPool. * Connections no longer derive from threading.local. See threading.local note below. * ConnectionPool has been completely refactored. The ConnectionPool now maintains a list of connections. The redis-py client only hangs on to a ConnectionPool instance, calling get_connection() anytime it needs to send a command. When get_connection() is called, the command name and any keys involved in the command are passed as arguments. Subclasses of ConnectionPool could use this information to identify the shard the keys belong to and return a connection to it. ConnectionPool also implements disconnect() to force all connections in the pool to disconnect from the Redis server. * redis-py no longer support the SELECT command. You can still connect to a specific database by specifying it when instantiating a client instance or by creating a connection pool. If you need to talk to multiple databases within your application, you should use a separate client instance for each database you want to talk to. * Completely refactored Publish/Subscribe support. The subscribe and listen commands are no longer available on the redis-py Client class. Instead, the `pubsub` method returns an instance of the PubSub class which contains all publish/subscribe support. Note, you can still PUBLISH from the redis-py client class if you desire. * Removed support for all previously deprecated commands or options. * redis-py no longer uses threading.local in any way. Since the Client class no longer holds on to a connection, it's no longer needed. You can now pass client instances between threads, and commands run on those threads will retrieve an available connection from the pool, use it and release it. It should now be trivial to use redis-py with eventlet or greenlet. * ZADD now accepts pairs of value=score keyword arguments. This should help resolve the long standing #72. The older value and score arguments have been deprecated in favor of the keyword argument style. * Client instances now get their own copy of RESPONSE_CALLBACKS. The new set_response_callback method adds a user defined callback to the instance. * Support Jython, fixing #97. Thanks to Adam Vandenberg for the patch. * Using __getitem__ now properly raises a KeyError when the key is not found. Thanks Ionuț Arțăriși for the patch. * Newer Redis versions return a LOADING message for some commands while the database is loading from disk during server start. This could cause problems with SELECT. We now force a socket disconnection prior to raising a ResponseError so subsequent connections have to reconnect and re-select the appropriate database. Thanks to Benjamin Anderson for finding this and fixing. * 2.2.4 * WARNING: Potential backwards incompatible change - Changed order of parameters of ZREVRANGEBYSCORE to match those of the actual Redis command. This is only backwards-incompatible if you were passing max and min via keyword args. If passing by normal args, nothing in user code should have to change. Thanks Stéphane Angel for the fix. * Fixed INFO to properly parse the Redis data correctly for both 2.2.x and 2.3+. Thanks Stéphane Angel for the fix. * Lock objects now store their timeout value as a float. This allows floats to be used as timeout values. No changes to existing code required. * WATCH now supports multiple keys. Thanks Rich Schumacher. * Broke out some code that was Python 2.4 incompatible. redis-py should now be usable on 2.4, but this hasn't actually been tested. Thanks Dan Colish for the patch. * Optimized some code using izip and islice. Should have a pretty good speed up on larger data sets. Thanks Dan Colish. * Better error handling when submitting an empty mapping to HMSET. Thanks Dan Colish. * Subscription status is now reset after every (re)connection. * 2.2.3 * Added support for Hiredis. To use, simply "pip install hiredis" or "easy_install hiredis". Thanks for Pieter Noordhuis for the hiredis-py bindings and the patch to redis-py. * The connection class is chosen based on whether hiredis is installed or not. To force the use of the PythonConnection, simply create your own ConnectionPool instance with the connection_class argument assigned to to PythonConnection class. * Added missing command ZREVRANGEBYSCORE. Thanks Jay Baird for the patch. * The INFO command should be parsed correctly on 2.2.x server versions and is backwards compatible with older versions. Thanks Brett Hoerner. * 2.2.2 * Fixed a bug in ZREVRANK where retrieving the rank of a value not in the zset would raise an error. * Fixed a bug in Connection.send where the errno import was getting overwritten by a local variable. * Fixed a bug in SLAVEOF when promoting an existing slave to a master. * Reverted change of download URL back to redis-VERSION.tar.gz. 2.2.1's change of this actually broke Pypi for Pip installs. Sorry! * 2.2.1 * Changed archive name to redis-py-VERSION.tar.gz to not conflict with the Redis server archive. * 2.2.0 * Implemented SLAVEOF * Implemented CONFIG as config_get and config_set * Implemented GETBIT/SETBIT * Implemented BRPOPLPUSH * Implemented STRLEN * Implemented PERSIST * Implemented SETRANGE * Changed type annotation of the `num` parameter in `zrange` from `int` to `Optional[int]redis-py-6.1.0/CONTRIBUTING.md000066400000000000000000000132351501061645500155120ustar00rootroot00000000000000# Contributing ## Introduction We appreciate your interest in considering contributing to redis-py. Community contributions mean a lot to us. ## Contributions we need You may already know how you'd like to contribute, whether it's a fix for a bug you encountered, or a new feature your team wants to use. If you don't know where to start, consider improving documentation, bug triaging, and writing tutorials are all examples of helpful contributions that mean less work for you. ## Your First Contribution Unsure where to begin contributing? You can start by looking through [help-wanted issues](https://github.com/andymccurdy/redis-py/issues?q=is%3Aopen+is%3Aissue+label%3ahelp-wanted). Never contributed to open source before? Here are a couple of friendly tutorials: - - ## Getting Started Here's how to get started with your code contribution: 1. Create your own fork of redis-py 2. Do the changes in your fork 3. Create a virtualenv and install the development dependencies from the dev_requirements.txt file: ``` python -m venv .venv source .venv/bin/activate pip install -r dev_requirements.txt pip install -e .[jwt] ``` 4. If you need a development environment, run `invoke devenv`. Note: this relies on docker-compose to build environments, and assumes that you have a version supporting [docker profiles](https://docs.docker.com/compose/profiles/). 5. While developing, make sure the tests pass by running `invoke tests` 6. If you like the change and think the project could use it, send a pull request To see what else is part of the automation, run `invoke -l` ## The Development Environment Running `invoke devenv` installs the development dependencies specified in the dev_requirements.txt. It starts all of the dockers used by this project, and leaves them running. These can be easily cleaned up with `invoke clean`. NOTE: it is assumed that the user running these tests, can execute docker and its various commands. - A master Redis node - A Redis replica node - Three sentinel Redis nodes - A redis cluster - An stunnel docker, fronting the master Redis node The replica node, is a replica of the master node, using the [leader-follower replication](https://redis.io/topics/replication) feature. The sentinels monitor the master node in a [sentinel high-availability configuration](https://redis.io/topics/sentinel). ## Testing Call `invoke tests` to run all tests, or `invoke all-tests` to run linters tests as well. With the 'tests' and 'all-tests' targets, all Redis and RedisCluster tests will be run. It is possible to run only Redis client tests (with cluster mode disabled) by using `invoke standalone-tests`; similarly, RedisCluster tests can be run by using `invoke cluster-tests`. Each run of tests starts and stops the various dockers required. Sometimes things get stuck, an `invoke clean` can help. ## Documentation If relevant, update the code documentation, via docstrings, or in `/docs`. You can check how the documentation looks locally by running `invoke build-docs` and loading the generated HTML files in a browser. Historically there is a mix of styles in the docstrings, but the preferred way of documenting code is by applying the [Google style](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html). Type hints should be added according to PEP484, and should not be repeated in the docstrings. ### Docker Tips Following are a few tips that can help you work with the Docker-based development environment. To get a bash shell inside of a container: `$ docker run -it /bin/bash` Containers run a minimal Debian image that probably lacks tools you want to use. To install packages, first get a bash session (see previous tip) and then run: `$ apt update && apt install ` You can see the logging output of a containers like this: `$ docker logs -f ` ### Troubleshooting If you get any errors when running `make dev` or `make test`, make sure that you are using supported versions of Docker. Please try at least versions of Docker. - Docker 19.03.12 ## How to Report a Bug ### Security Vulnerabilities **NOTE**: If you find a security vulnerability, do NOT open an issue. Email [Redis Open Source ()](mailto:oss@redis.com) instead. In order to determine whether you are dealing with a security issue, ask yourself these two questions: - Can I access something that's not mine, or something I shouldn't have access to? - Can I disable something for other people? If the answer to either of those two questions are *yes*, then you're probably dealing with a security issue. Note that even if you answer *no* to both questions, you may still be dealing with a security issue, so if you're unsure, just email [us](mailto:oss@redis.com). ### Everything Else When filing an issue, make sure to answer these five questions: 1. What version of redis-py are you using? 2. What version of redis are you using? 3. What did you do? 4. What did you expect to see? 5. What did you see instead? ## Suggest a feature or enhancement If you'd like to contribute a new feature, make sure you check our issue list to see if someone has already proposed it. Work may already be underway on the feature you want or we may have rejected a feature like it already. If you don't see anything, open a new issue that describes the feature you would like and how it should work. ## Code review process The core team regularly looks at pull requests. We will provide feedback as soon as possible. After receiving our feedback, please respond within two weeks. After that time, we may close your PR if it isn't showing any activity. redis-py-6.1.0/LICENSE000066400000000000000000000020621501061645500142620ustar00rootroot00000000000000MIT License Copyright (c) 2022-2023, Redis, inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. redis-py-6.1.0/README.md000066400000000000000000000211671501061645500145430ustar00rootroot00000000000000# redis-py The Python interface to the Redis key-value store. [![CI](https://github.com/redis/redis-py/workflows/CI/badge.svg?branch=master)](https://github.com/redis/redis-py/actions?query=workflow%3ACI+branch%3Amaster) [![docs](https://readthedocs.org/projects/redis/badge/?version=stable&style=flat)](https://redis-py.readthedocs.io/en/stable/) [![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE) [![pypi](https://badge.fury.io/py/redis.svg)](https://pypi.org/project/redis/) [![pre-release](https://img.shields.io/github/v/release/redis/redis-py?include_prereleases&label=latest-prerelease)](https://github.com/redis/redis-py/releases) [![codecov](https://codecov.io/gh/redis/redis-py/branch/master/graph/badge.svg?token=yenl5fzxxr)](https://codecov.io/gh/redis/redis-py) [Installation](#installation) | [Usage](#usage) | [Advanced Topics](#advanced-topics) | [Contributing](https://github.com/redis/redis-py/blob/master/CONTRIBUTING.md) --------------------------------------------- **Note:** redis-py 5.0 will be the last version of redis-py to support Python 3.7, as it has reached [end of life](https://devguide.python.org/versions/). redis-py 5.1 will support Python 3.8+. --------------------------------------------- ## How do I Redis? [Learn for free at Redis University](https://redis.io/learn/university) [Try the Redis Cloud](https://redis.io/try-free/) [Dive in developer tutorials](https://redis.io/learn) [Join the Redis community](https://redis.io/community/) [Work at Redis](https://redis.io/careers/) ## Installation Start a redis via docker (for Redis versions >= 8.0): ``` bash $ docker run -p 6379:6379 -it redis:latest ``` Start a redis via docker (for Redis versions < 8.0): ``` bash $ docker run -p 6379:6379 -it redis/redis-stack:latest ``` To install redis-py, simply: ``` bash $ pip install redis ``` For faster performance, install redis with hiredis support, this provides a compiled response parser, and *for most cases* requires zero code changes. By default, if hiredis >= 1.0 is available, redis-py will attempt to use it for response parsing. ``` bash $ pip install "redis[hiredis]" ``` Looking for a high-level library to handle object mapping? See [redis-om-python](https://github.com/redis/redis-om-python)! ## Supported Redis Versions The most recent version of this library supports Redis version [7.2](https://github.com/redis/redis/blob/7.2/00-RELEASENOTES), [7.4](https://github.com/redis/redis/blob/7.4/00-RELEASENOTES) and [8.0](https://github.com/redis/redis/blob/8.0/00-RELEASENOTES). The table below highlights version compatibility of the most-recent library versions and redis versions. | Library version | Supported redis versions | |-----------------|-------------------| | 3.5.3 | <= 6.2 Family of releases | | >= 4.5.0 | Version 5.0 to 7.0 | | >= 5.0.0 | Version 5.0 to 7.4 | | >= 6.0.0 | Version 7.2 to current | ## Usage ### Basic Example ``` python >>> import redis >>> r = redis.Redis(host='localhost', port=6379, db=0) >>> r.set('foo', 'bar') True >>> r.get('foo') b'bar' ``` The above code connects to localhost on port 6379, sets a value in Redis, and retrieves it. All responses are returned as bytes in Python, to receive decoded strings, set *decode_responses=True*. For this, and more connection options, see [these examples](https://redis.readthedocs.io/en/stable/examples.html). #### RESP3 Support To enable support for RESP3, ensure you have at least version 5.0 of the client, and change your connection object to include *protocol=3* ``` python >>> import redis >>> r = redis.Redis(host='localhost', port=6379, db=0, protocol=3) ``` ### Connection Pools By default, redis-py uses a connection pool to manage connections. Each instance of a Redis class receives its own connection pool. You can however define your own [redis.ConnectionPool](https://redis.readthedocs.io/en/stable/connections.html#connection-pools). ``` python >>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0) >>> r = redis.Redis(connection_pool=pool) ``` Alternatively, you might want to look at [Async connections](https://redis.readthedocs.io/en/stable/examples/asyncio_examples.html), or [Cluster connections](https://redis.readthedocs.io/en/stable/connections.html#cluster-client), or even [Async Cluster connections](https://redis.readthedocs.io/en/stable/connections.html#async-cluster-client). ### Redis Commands There is built-in support for all of the [out-of-the-box Redis commands](https://redis.io/commands). They are exposed using the raw Redis command names (`HSET`, `HGETALL`, etc.) except where a word (i.e. del) is reserved by the language. The complete set of commands can be found [here](https://github.com/redis/redis-py/tree/master/redis/commands), or [the documentation](https://redis.readthedocs.io/en/stable/commands.html). ## Advanced Topics The [official Redis command documentation](https://redis.io/commands) does a great job of explaining each command in detail. redis-py attempts to adhere to the official command syntax. There are a few exceptions: - **MULTI/EXEC**: These are implemented as part of the Pipeline class. The pipeline is wrapped with the MULTI and EXEC statements by default when it is executed, which can be disabled by specifying transaction=False. See more about Pipelines below. - **SUBSCRIBE/LISTEN**: Similar to pipelines, PubSub is implemented as a separate class as it places the underlying connection in a state where it can\'t execute non-pubsub commands. Calling the pubsub method from the Redis client will return a PubSub instance where you can subscribe to channels and listen for messages. You can only call PUBLISH from the Redis client (see [this comment on issue #151](https://github.com/redis/redis-py/issues/151#issuecomment-1545015) for details). For more details, please see the documentation on [advanced topics page](https://redis.readthedocs.io/en/stable/advanced_features.html). ### Pipelines The following is a basic example of a [Redis pipeline](https://redis.io/docs/manual/pipelining/), a method to optimize round-trip calls, by batching Redis commands, and receiving their results as a list. ``` python >>> pipe = r.pipeline() >>> pipe.set('foo', 5) >>> pipe.set('bar', 18.5) >>> pipe.set('blee', "hello world!") >>> pipe.execute() [True, True, True] ``` ### PubSub The following example shows how to utilize [Redis Pub/Sub](https://redis.io/docs/manual/pubsub/) to subscribe to specific channels. ``` python >>> r = redis.Redis(...) >>> p = r.pubsub() >>> p.subscribe('my-first-channel', 'my-second-channel', ...) >>> p.get_message() {'pattern': None, 'type': 'subscribe', 'channel': b'my-second-channel', 'data': 1} ``` ### Redis’ search and query capabilities default dialect Release 6.0.0 introduces a client-side default dialect for Redis’ search and query capabilities. By default, the client now overrides the server-side dialect with version 2, automatically appending *DIALECT 2* to commands like *FT.AGGREGATE* and *FT.SEARCH*. **Important**: Be aware that the query dialect may impact the results returned. If needed, you can revert to a different dialect version by configuring the client accordingly. ``` python >>> from redis.commands.search.field import TextField >>> from redis.commands.search.query import Query >>> from redis.commands.search.index_definition import IndexDefinition >>> import redis >>> r = redis.Redis(host='localhost', port=6379, db=0) >>> r.ft().create_index( >>> (TextField("name"), TextField("lastname")), >>> definition=IndexDefinition(prefix=["test:"]), >>> ) >>> r.hset("test:1", "name", "James") >>> r.hset("test:1", "lastname", "Brown") >>> # Query with default DIALECT 2 >>> query = "@name: James Brown" >>> q = Query(query) >>> res = r.ft().search(q) >>> # Query with explicit DIALECT 1 >>> query = "@name: James Brown" >>> q = Query(query).dialect(1) >>> res = r.ft().search(q) ``` You can find further details in the [query dialect documentation](https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/dialects/). --------------------------------------------- ### Author redis-py is developed and maintained by [Redis Inc](https://redis.io). It can be found [here]( https://github.com/redis/redis-py), or downloaded from [pypi](https://pypi.org/project/redis/). Special thanks to: - Andy McCurdy () the original author of redis-py. - Ludovico Magnocavallo, author of the original Python Redis client, from which some of the socket code is still used. - Alexander Solovyov for ideas on the generic response callback system. - Paul Hubbard for initial packaging support. [![Redis](./docs/_static/logo-redis.svg)](https://redis.io)redis-py-6.1.0/benchmarks/000077500000000000000000000000001501061645500153725ustar00rootroot00000000000000redis-py-6.1.0/benchmarks/__init__.py000066400000000000000000000000001501061645500174710ustar00rootroot00000000000000redis-py-6.1.0/benchmarks/base.py000066400000000000000000000025131501061645500166570ustar00rootroot00000000000000import functools import itertools import sys import timeit import redis class Benchmark: ARGUMENTS = () def __init__(self): self._client = None def get_client(self, **kwargs): # eventually make this more robust and take optional args from # argparse if self._client is None or kwargs: defaults = {"db": 9} defaults.update(kwargs) pool = redis.ConnectionPool(**kwargs) self._client = redis.Redis(connection_pool=pool) return self._client def setup(self, **kwargs): pass def run(self, **kwargs): pass def run_benchmark(self): group_names = [group["name"] for group in self.ARGUMENTS] group_values = [group["values"] for group in self.ARGUMENTS] for value_set in itertools.product(*group_values): pairs = list(zip(group_names, value_set)) arg_string = ", ".join(f"{p[0]}={p[1]}" for p in pairs) sys.stdout.write(f"Benchmark: {arg_string}... ") sys.stdout.flush() kwargs = dict(pairs) setup = functools.partial(self.setup, **kwargs) run = functools.partial(self.run, **kwargs) t = timeit.timeit(stmt=run, setup=setup, number=1000) sys.stdout.write(f"{t:f}\n") sys.stdout.flush() redis-py-6.1.0/benchmarks/basic_operations.py000066400000000000000000000114021501061645500212660ustar00rootroot00000000000000import time from argparse import ArgumentParser from functools import wraps import redis def parse_args(): parser = ArgumentParser() parser.add_argument( "-n", type=int, help="Total number of requests (default 100000)", default=100000 ) parser.add_argument( "-P", type=int, help=("Pipeline requests. Default 1 (no pipeline)."), default=1, ) parser.add_argument( "-s", type=int, help="Data size of SET/GET value in bytes (default 2)", default=2, ) args = parser.parse_args() return args def run(): args = parse_args() r = redis.Redis() r.flushall() set_str(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s) set_int(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s) get_str(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s) get_int(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s) incr(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s) lpush(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s) lrange_300(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s) lpop(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s) hmset(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s) def timer(func): @wraps(func) def wrapper(*args, **kwargs): start = time.monotonic() ret = func(*args, **kwargs) duration = time.monotonic() - start if "num" in kwargs: count = kwargs["num"] else: count = args[1] print(f"{func.__name__} - {count} Requests") print(f"Duration = {duration}") print(f"Rate = {count / duration}") print() return ret return wrapper @timer def set_str(conn, num, pipeline_size, data_size): if pipeline_size > 1: conn = conn.pipeline() set_data = "a".ljust(data_size, "0") for i in range(num): conn.set(f"set_str:{i}", set_data) if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() if pipeline_size > 1: conn.execute() @timer def set_int(conn, num, pipeline_size, data_size): if pipeline_size > 1: conn = conn.pipeline() set_data = 10 ** (data_size - 1) for i in range(num): conn.set(f"set_int:{i}", set_data) if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() if pipeline_size > 1: conn.execute() @timer def get_str(conn, num, pipeline_size, data_size): if pipeline_size > 1: conn = conn.pipeline() for i in range(num): conn.get(f"set_str:{i}") if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() if pipeline_size > 1: conn.execute() @timer def get_int(conn, num, pipeline_size, data_size): if pipeline_size > 1: conn = conn.pipeline() for i in range(num): conn.get(f"set_int:{i}") if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() if pipeline_size > 1: conn.execute() @timer def incr(conn, num, pipeline_size, *args, **kwargs): if pipeline_size > 1: conn = conn.pipeline() for i in range(num): conn.incr("incr_key") if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() if pipeline_size > 1: conn.execute() @timer def lpush(conn, num, pipeline_size, data_size): if pipeline_size > 1: conn = conn.pipeline() set_data = 10 ** (data_size - 1) for i in range(num): conn.lpush("lpush_key", set_data) if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() if pipeline_size > 1: conn.execute() @timer def lrange_300(conn, num, pipeline_size, data_size): if pipeline_size > 1: conn = conn.pipeline() for i in range(num): conn.lrange("lpush_key", i, i + 300) if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() if pipeline_size > 1: conn.execute() @timer def lpop(conn, num, pipeline_size, data_size): if pipeline_size > 1: conn = conn.pipeline() for i in range(num): conn.lpop("lpush_key") if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() if pipeline_size > 1: conn.execute() @timer def hmset(conn, num, pipeline_size, data_size): if pipeline_size > 1: conn = conn.pipeline() set_data = {"str_value": "string", "int_value": 123456, "float_value": 123456.0} for i in range(num): conn.hmset("hmset_key", set_data) if pipeline_size > 1 and i % pipeline_size == 0: conn.execute() if pipeline_size > 1: conn.execute() if __name__ == "__main__": run() redis-py-6.1.0/benchmarks/cluster_async.py000066400000000000000000000161021501061645500206220ustar00rootroot00000000000000import asyncio import functools import time import aioredis_cluster import aredis import uvloop import redis.asyncio as redispy def timer(func): @functools.wraps(func) async def wrapper(*args, **kwargs): tic = time.perf_counter() await func(*args, **kwargs) toc = time.perf_counter() return f"{toc - tic:.4f}" return wrapper @timer async def set_str(client, gather, data): if gather: for _ in range(count // 100): await asyncio.gather( *( asyncio.create_task(client.set(f"bench:str_{i}", data)) for i in range(100) ) ) else: for i in range(count): await client.set(f"bench:str_{i}", data) @timer async def set_int(client, gather, data): if gather: for _ in range(count // 100): await asyncio.gather( *( asyncio.create_task(client.set(f"bench:int_{i}", data)) for i in range(100) ) ) else: for i in range(count): await client.set(f"bench:int_{i}", data) @timer async def get_str(client, gather): if gather: for _ in range(count // 100): await asyncio.gather( *(asyncio.create_task(client.get(f"bench:str_{i}")) for i in range(100)) ) else: for i in range(count): await client.get(f"bench:str_{i}") @timer async def get_int(client, gather): if gather: for _ in range(count // 100): await asyncio.gather( *(asyncio.create_task(client.get(f"bench:int_{i}")) for i in range(100)) ) else: for i in range(count): await client.get(f"bench:int_{i}") @timer async def hset(client, gather, data): if gather: for _ in range(count // 100): await asyncio.gather( *( asyncio.create_task(client.hset("bench:hset", str(i), data)) for i in range(100) ) ) else: for i in range(count): await client.hset("bench:hset", str(i), data) @timer async def hget(client, gather): if gather: for _ in range(count // 100): await asyncio.gather( *( asyncio.create_task(client.hget("bench:hset", str(i))) for i in range(100) ) ) else: for i in range(count): await client.hget("bench:hset", str(i)) @timer async def incr(client, gather): if gather: for _ in range(count // 100): await asyncio.gather( *(asyncio.create_task(client.incr("bench:incr")) for i in range(100)) ) else: for i in range(count): await client.incr("bench:incr") @timer async def lpush(client, gather, data): if gather: for _ in range(count // 100): await asyncio.gather( *( asyncio.create_task(client.lpush("bench:lpush", data)) for i in range(100) ) ) else: for i in range(count): await client.lpush("bench:lpush", data) @timer async def lrange_300(client, gather): if gather: for _ in range(count // 100): await asyncio.gather( *( asyncio.create_task(client.lrange("bench:lpush", i, i + 300)) for i in range(100) ) ) else: for i in range(count): await client.lrange("bench:lpush", i, i + 300) @timer async def lpop(client, gather): if gather: for _ in range(count // 100): await asyncio.gather( *(asyncio.create_task(client.lpop("bench:lpush")) for i in range(100)) ) else: for i in range(count): await client.lpop("bench:lpush") @timer async def warmup(client): await asyncio.gather( *(asyncio.create_task(client.exists(f"bench:warmup_{i}")) for i in range(100)) ) @timer async def run(client, gather): data_str = "a" * size data_int = int("1" * size) if gather is False: for ret in await asyncio.gather( asyncio.create_task(set_str(client, gather, data_str)), asyncio.create_task(set_int(client, gather, data_int)), asyncio.create_task(hset(client, gather, data_str)), asyncio.create_task(incr(client, gather)), asyncio.create_task(lpush(client, gather, data_int)), ): print(ret) for ret in await asyncio.gather( asyncio.create_task(get_str(client, gather)), asyncio.create_task(get_int(client, gather)), asyncio.create_task(hget(client, gather)), asyncio.create_task(lrange_300(client, gather)), asyncio.create_task(lpop(client, gather)), ): print(ret) else: print(await set_str(client, gather, data_str)) print(await set_int(client, gather, data_int)) print(await hset(client, gather, data_str)) print(await incr(client, gather)) print(await lpush(client, gather, data_int)) print(await get_str(client, gather)) print(await get_int(client, gather)) print(await hget(client, gather)) print(await lrange_300(client, gather)) print(await lpop(client, gather)) async def main(loop, gather=None): arc = aredis.StrictRedisCluster( host=host, port=port, password=password, max_connections=2**31, max_connections_per_node=2**31, readonly=False, reinitialize_steps=count, skip_full_coverage_check=True, decode_responses=False, max_idle_time=count, idle_check_interval=count, ) print(f"{loop} {gather} {await warmup(arc)} aredis") print(await run(arc, gather=gather)) arc.connection_pool.disconnect() aiorc = await aioredis_cluster.create_redis_cluster( [(host, port)], password=password, state_reload_interval=count, idle_connection_timeout=count, pool_maxsize=2**31, ) print(f"{loop} {gather} {await warmup(aiorc)} aioredis-cluster") print(await run(aiorc, gather=gather)) aiorc.close() await aiorc.wait_closed() async with redispy.RedisCluster( host=host, port=port, password=password, reinitialize_steps=count, read_from_replicas=False, decode_responses=False, max_connections=2**31, ) as rca: print(f"{loop} {gather} {await warmup(rca)} redispy") print(await run(rca, gather=gather)) if __name__ == "__main__": host = "localhost" port = 16379 password = None count = 10000 size = 256 asyncio.run(main("asyncio")) asyncio.run(main("asyncio", gather=False)) asyncio.run(main("asyncio", gather=True)) uvloop.install() asyncio.run(main("uvloop")) asyncio.run(main("uvloop", gather=False)) asyncio.run(main("uvloop", gather=True)) redis-py-6.1.0/benchmarks/cluster_async_pipeline.py000066400000000000000000000050451501061645500225130ustar00rootroot00000000000000import asyncio import functools import time import aioredis_cluster import aredis import uvloop import redis.asyncio as redispy def timer(func): @functools.wraps(func) async def wrapper(*args, **kwargs): tic = time.perf_counter() await func(*args, **kwargs) toc = time.perf_counter() return f"{toc - tic:.4f}" return wrapper @timer async def warmup(client): await asyncio.gather( *(asyncio.create_task(client.exists(f"bench:warmup_{i}")) for i in range(100)) ) @timer async def run(client): data_str = "a" * size data_int = int("1" * size) for i in range(count): with client.pipeline() as pipe: await ( pipe.set(f"bench:str_{i}", data_str) .set(f"bench:int_{i}", data_int) .get(f"bench:str_{i}") .get(f"bench:int_{i}") .hset("bench:hset", str(i), data_str) .hget("bench:hset", str(i)) .incr("bench:incr") .lpush("bench:lpush", data_int) .lrange("bench:lpush", 0, 300) .lpop("bench:lpush") .execute() ) async def main(loop): arc = aredis.StrictRedisCluster( host=host, port=port, password=password, max_connections=2**31, max_connections_per_node=2**31, readonly=False, reinitialize_steps=count, skip_full_coverage_check=True, decode_responses=False, max_idle_time=count, idle_check_interval=count, ) print(f"{loop} {await warmup(arc)} aredis") print(await run(arc)) arc.connection_pool.disconnect() aiorc = await aioredis_cluster.create_redis_cluster( [(host, port)], password=password, state_reload_interval=count, idle_connection_timeout=count, pool_maxsize=2**31, ) print(f"{loop} {await warmup(aiorc)} aioredis-cluster") print(await run(aiorc)) aiorc.close() await aiorc.wait_closed() async with redispy.RedisCluster( host=host, port=port, password=password, reinitialize_steps=count, read_from_replicas=False, decode_responses=False, max_connections=2**31, ) as rca: print(f"{loop} {await warmup(rca)} redispy") print(await run(rca)) if __name__ == "__main__": host = "localhost" port = 16379 password = None count = 10000 size = 256 asyncio.run(main("asyncio")) uvloop.install() asyncio.run(main("uvloop")) redis-py-6.1.0/benchmarks/command_packer_benchmark.py000066400000000000000000000063111501061645500227220ustar00rootroot00000000000000from base import Benchmark from redis.connection import SYM_CRLF, SYM_DOLLAR, SYM_EMPTY, SYM_STAR, Connection class StringJoiningConnection(Connection): def send_packed_command(self, command, check_health=True): "Send an already packed command to the Redis server" if not self._sock: self.connect() try: self._sock.sendall(command) except OSError as e: self.disconnect() if len(e.args) == 1: _errno, errmsg = "UNKNOWN", e.args[0] else: _errno, errmsg = e.args raise ConnectionError(f"Error {_errno} while writing to socket. {errmsg}.") except Exception: self.disconnect() raise def pack_command(self, *args): "Pack a series of arguments into a value Redis command" args_output = SYM_EMPTY.join( [ SYM_EMPTY.join( (SYM_DOLLAR, str(len(k)).encode(), SYM_CRLF, k, SYM_CRLF) ) for k in map(self.encoder.encode, args) ] ) output = SYM_EMPTY.join( (SYM_STAR, str(len(args)).encode(), SYM_CRLF, args_output) ) return output class ListJoiningConnection(Connection): def send_packed_command(self, command, check_health=True): if not self._sock: self.connect() try: if isinstance(command, str): command = [command] for item in command: self._sock.sendall(item) except OSError as e: self.disconnect() if len(e.args) == 1: _errno, errmsg = "UNKNOWN", e.args[0] else: _errno, errmsg = e.args raise ConnectionError(f"Error {_errno} while writing to socket. {errmsg}.") except Exception: self.disconnect() raise def pack_command(self, *args): output = [] buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF)) for k in map(self.encoder.encode, args): if len(buff) > 6000 or len(k) > 6000: buff = SYM_EMPTY.join( (buff, SYM_DOLLAR, str(len(k)).encode(), SYM_CRLF) ) output.append(buff) output.append(k) buff = SYM_CRLF else: buff = SYM_EMPTY.join( (buff, SYM_DOLLAR, str(len(k)).encode(), SYM_CRLF, k, SYM_CRLF) ) output.append(buff) return output class CommandPackerBenchmark(Benchmark): ARGUMENTS = ( { "name": "connection_class", "values": [StringJoiningConnection, ListJoiningConnection], }, { "name": "value_size", "values": [10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000], }, ) def setup(self, connection_class, value_size): self.get_client(connection_class=connection_class) def run(self, connection_class, value_size): r = self.get_client() x = "a" * value_size r.set("benchmark", x) if __name__ == "__main__": CommandPackerBenchmark().run_benchmark() redis-py-6.1.0/benchmarks/socket_read_size.py000066400000000000000000000014351501061645500212640ustar00rootroot00000000000000from base import Benchmark from redis.connection import PythonParser, _HiredisParser class SocketReadBenchmark(Benchmark): ARGUMENTS = ( {"name": "parser", "values": [PythonParser, _HiredisParser]}, { "name": "value_size", "values": [10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000], }, {"name": "read_size", "values": [4096, 8192, 16384, 32768, 65536, 131072]}, ) def setup(self, value_size, read_size, parser): r = self.get_client(parser_class=parser, socket_read_size=read_size) r.set("benchmark", "a" * value_size) def run(self, value_size, read_size, parser): r = self.get_client() r.get("benchmark") if __name__ == "__main__": SocketReadBenchmark().run_benchmark() redis-py-6.1.0/codecov.yml000066400000000000000000000003201501061645500154150ustar00rootroot00000000000000ignore: - "benchmarks/**" - "tasks.py" codecov: require_ci_to_pass: yes coverage: precision: 2 round: down range: "80...100" status: patch: off # off for now as it yells about everything redis-py-6.1.0/dev_requirements.txt000066400000000000000000000003321501061645500173750ustar00rootroot00000000000000build click==8.0.4 invoke==2.2.0 mock packaging>=20.4 pytest pytest-asyncio>=0.23.0 pytest-cov pytest-profiling==1.8.1 pytest-timeout ruff==0.9.6 ujson>=4.2.0 uvloop vulture>=2.3.0 numpy>=1.24.0 redis-entraid==0.4.0b2 redis-py-6.1.0/docker-compose.yml000066400000000000000000000052171501061645500167170ustar00rootroot00000000000000--- # image tag 8.0-RC2-pre is the one matching the 8.0 GA release x-client-libs-stack-image: &client-libs-stack-image image: "redislabs/client-libs-test:${CLIENT_LIBS_TEST_STACK_IMAGE_TAG:-8.0-RC2-pre}" x-client-libs-image: &client-libs-image image: "redislabs/client-libs-test:${CLIENT_LIBS_TEST_IMAGE_TAG:-8.0-RC2-pre}" services: redis: <<: *client-libs-image container_name: redis-standalone environment: - TLS_ENABLED=yes - REDIS_CLUSTER=no - PORT=6379 - TLS_PORT=6666 command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""} ports: - 6379:6379 - 6666:6666 # TLS port volumes: - "./dockers/standalone:/redis/work" profiles: - standalone - sentinel - replica - all-stack - all replica: <<: *client-libs-image container_name: redis-replica depends_on: - redis environment: - TLS_ENABLED=no - REDIS_CLUSTER=no - PORT=6380 command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --replicaof redis 6379 --protected-mode no --save ""} ports: - 6380:6380 volumes: - "./dockers/replica:/redis/work" profiles: - replica - all-stack - all cluster: <<: *client-libs-image container_name: redis-cluster environment: - REDIS_CLUSTER=yes - NODES=6 - REPLICAS=1 - TLS_ENABLED=yes - PORT=16379 - TLS_PORT=27379 command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""} ports: - "16379-16384:16379-16384" - "27379-27384:27379-27384" volumes: - "./dockers/cluster:/redis/work" profiles: - cluster - all-stack - all sentinel: <<: *client-libs-image container_name: redis-sentinel depends_on: - redis environment: - REDIS_CLUSTER=no - NODES=3 - PORT=26379 command: ${REDIS_EXTRA_ARGS:---sentinel} ports: - 26379:26379 - 26380:26380 - 26381:26381 volumes: - "./dockers/sentinel.conf:/redis/config-default/redis.conf" - "./dockers/sentinel:/redis/work" profiles: - sentinel - all-stack - all redis-stack: <<: *client-libs-stack-image container_name: redis-stack environment: - REDIS_CLUSTER=no - PORT=6379 command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --save ""} ports: - 6479:6379 volumes: - "./dockers/redis-stack:/redis/work" profiles: - standalone - all-stack - all redis-py-6.1.0/dockers/000077500000000000000000000000001501061645500147075ustar00rootroot00000000000000redis-py-6.1.0/dockers/sentinel.conf000066400000000000000000000003241501061645500173760ustar00rootroot00000000000000sentinel resolve-hostnames yes sentinel monitor redis-py-test redis 6379 2 sentinel down-after-milliseconds redis-py-test 5000 sentinel failover-timeout redis-py-test 60000 sentinel parallel-syncs redis-py-test 1redis-py-6.1.0/docs/000077500000000000000000000000001501061645500142055ustar00rootroot00000000000000redis-py-6.1.0/docs/Makefile000066400000000000000000000127041501061645500156510ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/redis-py.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/redis-py.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/redis-py" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/redis-py" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." redis-py-6.1.0/docs/_static/000077500000000000000000000000001501061645500156335ustar00rootroot00000000000000redis-py-6.1.0/docs/_static/.keep000066400000000000000000000000001501061645500165460ustar00rootroot00000000000000redis-py-6.1.0/docs/_static/logo-redis.svg000066400000000000000000000075451501061645500204330ustar00rootroot00000000000000 redis-py-6.1.0/docs/_templates/000077500000000000000000000000001501061645500163425ustar00rootroot00000000000000redis-py-6.1.0/docs/_templates/.keep000066400000000000000000000000001501061645500172550ustar00rootroot00000000000000redis-py-6.1.0/docs/advanced_features.rst000066400000000000000000000536301501061645500204110ustar00rootroot00000000000000Advanced Features ================= A note about threading ---------------------- Redis client instances can safely be shared between threads. Internally, connection instances are only retrieved from the connection pool during command execution, and returned to the pool directly after. Command execution never modifies state on the client instance. However, there is one caveat: the Redis SELECT command. The SELECT command allows you to switch the database currently in use by the connection. That database remains selected until another is selected or until the connection is closed. This creates an issue in that connections could be returned to the pool that are connected to a different database. As a result, redis-py does not implement the SELECT command on client instances. If you use multiple Redis databases within the same application, you should create a separate client instance (and possibly a separate connection pool) for each database. It is not safe to pass PubSub or Pipeline objects between threads. Pipelines --------- Default pipelines ~~~~~~~~~~~~~~~~~ Pipelines are a subclass of the base Redis class that provide support for buffering multiple commands to the server in a single request. They can be used to dramatically increase the performance of groups of commands by reducing the number of back-and-forth TCP packets between the client and server. Pipelines are quite simple to use: .. code:: python >>> r = redis.Redis(...) >>> r.set('bing', 'baz') >>> # Use the pipeline() method to create a pipeline instance >>> pipe = r.pipeline() >>> # The following SET commands are buffered >>> pipe.set('foo', 'bar') >>> pipe.get('bing') >>> # the EXECUTE call sends all buffered commands to the server, returning >>> # a list of responses, one for each command. >>> pipe.execute() [True, b'baz'] For ease of use, all commands being buffered into the pipeline return the pipeline object itself. Therefore calls can be chained like: .. code:: python >>> pipe.set('foo', 'bar').sadd('faz', 'baz').incr('auto_number').execute() [True, True, 6] In addition, pipelines can also ensure the buffered commands are executed atomically as a group. This happens by default. If you want to disable the atomic nature of a pipeline but still want to buffer commands, you can turn off transactions. .. code:: python >>> pipe = r.pipeline(transaction=False) A common issue occurs when requiring atomic transactions but needing to retrieve values in Redis prior for use within the transaction. For instance, let's assume that the INCR command didn't exist and we need to build an atomic version of INCR in Python. The completely naive implementation could GET the value, increment it in Python, and SET the new value back. However, this is not atomic because multiple clients could be doing this at the same time, each getting the same value from GET. Enter the WATCH command. WATCH provides the ability to monitor one or more keys prior to starting a transaction. If any of those keys change prior the execution of that transaction, the entire transaction will be canceled and a WatchError will be raised. To implement our own client-side INCR command, we could do something like this: .. code:: python >>> with r.pipeline() as pipe: ... while True: ... try: ... # put a WATCH on the key that holds our sequence value ... pipe.watch('OUR-SEQUENCE-KEY') ... # after WATCHing, the pipeline is put into immediate execution ... # mode until we tell it to start buffering commands again. ... # this allows us to get the current value of our sequence ... current_value = pipe.get('OUR-SEQUENCE-KEY') ... next_value = int(current_value) + 1 ... # now we can put the pipeline back into buffered mode with MULTI ... pipe.multi() ... pipe.set('OUR-SEQUENCE-KEY', next_value) ... # and finally, execute the pipeline (the set command) ... pipe.execute() ... # if a WatchError wasn't raised during execution, everything ... # we just did happened atomically. ... break ... except WatchError: ... # another client must have changed 'OUR-SEQUENCE-KEY' between ... # the time we started WATCHing it and the pipeline's execution. ... # our best bet is to just retry. ... continue Note that, because the Pipeline must bind to a single connection for the duration of a WATCH, care must be taken to ensure that the connection is returned to the connection pool by calling the reset() method. If the Pipeline is used as a context manager (as in the example above) reset() will be called automatically. Of course you can do this the manual way by explicitly calling reset(): .. code:: python >>> pipe = r.pipeline() >>> while True: ... try: ... pipe.watch('OUR-SEQUENCE-KEY') ... ... ... pipe.execute() ... break ... except WatchError: ... continue ... finally: ... pipe.reset() A convenience method named "transaction" exists for handling all the boilerplate of handling and retrying watch errors. It takes a callable that should expect a single parameter, a pipeline object, and any number of keys to be WATCHed. Our client-side INCR command above can be written like this, which is much easier to read: .. code:: python >>> def client_side_incr(pipe): ... current_value = pipe.get('OUR-SEQUENCE-KEY') ... next_value = int(current_value) + 1 ... pipe.multi() ... pipe.set('OUR-SEQUENCE-KEY', next_value) >>> >>> r.transaction(client_side_incr, 'OUR-SEQUENCE-KEY') [True] Be sure to call pipe.multi() in the callable passed to Redis.transaction prior to any write commands. Pipelines in clusters ~~~~~~~~~~~~~~~~~~~~~ ClusterPipeline is a subclass of RedisCluster that provides support for Redis pipelines in cluster mode. When calling the execute() command, all the commands are grouped by the node on which they will be executed, and are then executed by the respective nodes in parallel. The pipeline instance will wait for all the nodes to respond before returning the result to the caller. Command responses are returned as a list sorted in the same order in which they were sent. Pipelines can be used to dramatically increase the throughput of Redis Cluster by significantly reducing the number of network round trips between the client and the server. .. code:: python >>> rc = RedisCluster() >>> with rc.pipeline() as pipe: ... pipe.set('foo', 'value1') ... pipe.set('bar', 'value2') ... pipe.get('foo') ... pipe.get('bar') ... print(pipe.execute()) [True, True, b'value1', b'value2'] ... pipe.set('foo1', 'bar1').get('foo1').execute() [True, b'bar1'] Please note: - RedisCluster pipelines currently only support key-based commands. - The pipeline gets its ‘load_balancing_strategy’ value from the cluster’s parameter. Thus, if read from replications is enabled in the cluster instance, the pipeline will also direct read commands to replicas. Transactions in clusters ~~~~~~~~~~~~~~~~~~~~~~~~ Transactions are supported in cluster-mode with one caveat: all keys of all commands issued on a transaction pipeline must reside on the same slot. This is similar to the limitation of multikey commands in cluster. The reason behind this is that the Redis engine does not offer a mechanism to block or exchange key data across nodes on the fly. A client may add some logic to abstract engine limitations when running on a cluster, such as the pipeline behavior explained on the previous block, but there is no simple way that a client can enforce atomicity across nodes on a distributed system. The compromise of limiting the transaction pipeline to same-slot keys is exactly that: a compromise. While this behavior is different from non-transactional cluster pipelines, it simplifies migration of clients from standalone to cluster under some circumstances. Note that application code that issues multi/exec commands on a standalone client without embedding them within a pipeline would eventually get ‘AttributeError’. With this approach, if the application uses ‘client.pipeline(transaction=True)’, then switching the client with a cluster-aware instance would simplify code changes (to some extent). This may be true for application code that makes use of hash keys, since its transactions may already be mapping all commands to the same slot. An alternative is some kind of two-step commit solution, where a slot validation is run before the actual commands are run. This could work with controlled node maintenance but does not cover single node failures. Given the cluster limitations for transactions, by default pipeline isn't in transactional mode. To enable transactional context set: .. code:: python >>> p = rc.pipeline(transaction=True) After entering the transactional context you can add commands to a transactional context, by one of the following ways: .. code:: python >>> p = rc.pipeline(transaction=True) # Chaining commands >>> p.set("key", "value") >>> p.get("key") >>> response = p.execute() Or .. code:: python >>> with rc.pipeline(transaction=True) as pipe: # Using context manager ... pipe.set("key", "value") ... pipe.get("key") ... response = pipe.execute() As you see there's no need to explicitly send `MULTI/EXEC` commands to control context start/end `ClusterPipeline` will take care of it. To ensure that different keys will be mapped to a same hash slot on the server side prepend your keys with the same hash tag, the technique that allows you to control keys distribution. More information `here `_ .. code:: python >>> with rc.pipeline(transaction=True) as pipe: ... pipe.set("{tag}foo", "bar") ... pipe.set("{tag}bar", "foo") ... pipe.get("{tag}foo") ... pipe.get("{tag}bar") ... response = pipe.execute() CAS Transactions ~~~~~~~~~~~~~~~~~~~~~~~~ If you want to apply optimistic locking for certain keys, you have to execute `WATCH` command in transactional context. `WATCH` command follows the same limitations as any other multi key command - all keys should be mapped to the same hash slot. However, the difference between CAS transaction and normal one is that you have to explicitly call MULTI command to indicate the start of transactional context, WATCH command itself and any subsequent commands before MULTI will be immediately executed on the server side so you can apply optimistic locking and get necessary data before transaction execution. .. code:: python >>> with rc.pipeline(transaction=True) as pipe: ... pipe.watch("mykey") # Apply locking by immediately executing command ... val = pipe.get("mykey") # Immediately retrieves value ... val = val + 1 # Increment value ... pipe.multi() # Starting transaction context ... pipe.set("mykey", val) # Command will be pipelined ... response = pipe.execute() # Returns OK or None if key was modified in the meantime Publish / Subscribe ------------------- redis-py includes a PubSub object that subscribes to channels and listens for new messages. Creating a PubSub object is easy. .. code:: python >>> r = redis.Redis(...) >>> p = r.pubsub() Once a PubSub instance is created, channels and patterns can be subscribed to. .. code:: python >>> p.subscribe('my-first-channel', 'my-second-channel', ...) >>> p.psubscribe('my-*', ...) The PubSub instance is now subscribed to those channels/patterns. The subscription confirmations can be seen by reading messages from the PubSub instance. .. code:: python >>> p.get_message() {'pattern': None, 'type': 'subscribe', 'channel': b'my-second-channel', 'data': 1} >>> p.get_message() {'pattern': None, 'type': 'subscribe', 'channel': b'my-first-channel', 'data': 2} >>> p.get_message() {'pattern': None, 'type': 'psubscribe', 'channel': b'my-*', 'data': 3} Every message read from a PubSub instance will be a dictionary with the following keys. - **type**: One of the following: 'subscribe', 'unsubscribe', 'psubscribe', 'punsubscribe', 'message', 'pmessage' - **channel**: The channel [un]subscribed to or the channel a message was published to - **pattern**: The pattern that matched a published message's channel. Will be None in all cases except for 'pmessage' types. - **data**: The message data. With [un]subscribe messages, this value will be the number of channels and patterns the connection is currently subscribed to. With [p]message messages, this value will be the actual published message. Let's send a message now. .. code:: python # the publish method returns the number matching channel and pattern # subscriptions. 'my-first-channel' matches both the 'my-first-channel' # subscription and the 'my-*' pattern subscription, so this message will # be delivered to 2 channels/patterns >>> r.publish('my-first-channel', 'some data') 2 >>> p.get_message() {'channel': b'my-first-channel', 'data': b'some data', 'pattern': None, 'type': 'message'} >>> p.get_message() {'channel': b'my-first-channel', 'data': b'some data', 'pattern': b'my-*', 'type': 'pmessage'} Unsubscribing works just like subscribing. If no arguments are passed to [p]unsubscribe, all channels or patterns will be unsubscribed from. .. code:: python >>> p.unsubscribe() >>> p.punsubscribe('my-*') >>> p.get_message() {'channel': b'my-second-channel', 'data': 2, 'pattern': None, 'type': 'unsubscribe'} >>> p.get_message() {'channel': b'my-first-channel', 'data': 1, 'pattern': None, 'type': 'unsubscribe'} >>> p.get_message() {'channel': b'my-*', 'data': 0, 'pattern': None, 'type': 'punsubscribe'} redis-py also allows you to register callback functions to handle published messages. Message handlers take a single argument, the message, which is a dictionary just like the examples above. To subscribe to a channel or pattern with a message handler, pass the channel or pattern name as a keyword argument with its value being the callback function. When a message is read on a channel or pattern with a message handler, the message dictionary is created and passed to the message handler. In this case, a None value is returned from get_message() since the message was already handled. .. code:: python >>> def my_handler(message): ... print('MY HANDLER: ', message['data']) >>> p.subscribe(**{'my-channel': my_handler}) # read the subscribe confirmation message >>> p.get_message() {'pattern': None, 'type': 'subscribe', 'channel': b'my-channel', 'data': 1} >>> r.publish('my-channel', 'awesome data') 1 # for the message handler to work, we need tell the instance to read data. # this can be done in several ways (read more below). we'll just use # the familiar get_message() function for now >>> message = p.get_message() MY HANDLER: awesome data # note here that the my_handler callback printed the string above. # `message` is None because the message was handled by our handler. >>> print(message) None If your application is not interested in the (sometimes noisy) subscribe/unsubscribe confirmation messages, you can ignore them by passing ignore_subscribe_messages=True to r.pubsub(). This will cause all subscribe/unsubscribe messages to be read, but they won't bubble up to your application. .. code:: python >>> p = r.pubsub(ignore_subscribe_messages=True) >>> p.subscribe('my-channel') >>> p.get_message() # hides the subscribe message and returns None >>> r.publish('my-channel', 'my data') 1 >>> p.get_message() {'channel': b'my-channel', 'data': b'my data', 'pattern': None, 'type': 'message'} There are three different strategies for reading messages. The examples above have been using pubsub.get_message(). Behind the scenes, get_message() uses the system's 'select' module to quickly poll the connection's socket. If there's data available to be read, get_message() will read it, format the message and return it or pass it to a message handler. If there's no data to be read, get_message() will immediately return None. This makes it trivial to integrate into an existing event loop inside your application. .. code:: python >>> while True: >>> message = p.get_message() >>> if message: >>> # do something with the message >>> time.sleep(0.001) # be nice to the system :) Older versions of redis-py only read messages with pubsub.listen(). listen() is a generator that blocks until a message is available. If your application doesn't need to do anything else but receive and act on messages received from redis, listen() is an easy way to get up an running. .. code:: python >>> for message in p.listen(): ... # do something with the message The third option runs an event loop in a separate thread. pubsub.run_in_thread() creates a new thread and starts the event loop. The thread object is returned to the caller of run_in_thread(). The caller can use the thread.stop() method to shut down the event loop and thread. Behind the scenes, this is simply a wrapper around get_message() that runs in a separate thread, essentially creating a tiny non-blocking event loop for you. run_in_thread() takes an optional sleep_time argument. If specified, the event loop will call time.sleep() with the value in each iteration of the loop. Note: Since we're running in a separate thread, there's no way to handle messages that aren't automatically handled with registered message handlers. Therefore, redis-py prevents you from calling run_in_thread() if you're subscribed to patterns or channels that don't have message handlers attached. .. code:: python >>> p.subscribe(**{'my-channel': my_handler}) >>> thread = p.run_in_thread(sleep_time=0.001) # the event loop is now running in the background processing messages # when it's time to shut it down... >>> thread.stop() run_in_thread also supports an optional exception handler, which lets you catch exceptions that occur within the worker thread and handle them appropriately. The exception handler will take as arguments the exception itself, the pubsub object, and the worker thread returned by run_in_thread. .. code:: python >>> p.subscribe(**{'my-channel': my_handler}) >>> def exception_handler(ex, pubsub, thread): >>> print(ex) >>> thread.stop() >>> thread = p.run_in_thread(exception_handler=exception_handler) A PubSub object adheres to the same encoding semantics as the client instance it was created from. Any channel or pattern that's unicode will be encoded using the encoding specified on the client before being sent to Redis. If the client's decode_responses flag is set the False (the default), the 'channel', 'pattern' and 'data' values in message dictionaries will be byte strings (str on Python 2, bytes on Python 3). If the client's decode_responses is True, then the 'channel', 'pattern' and 'data' values will be automatically decoded to unicode strings using the client's encoding. PubSub objects remember what channels and patterns they are subscribed to. In the event of a disconnection such as a network error or timeout, the PubSub object will re-subscribe to all prior channels and patterns when reconnecting. Messages that were published while the client was disconnected cannot be delivered. When you're finished with a PubSub object, call its .close() method to shutdown the connection. .. code:: python >>> p = r.pubsub() >>> ... >>> p.close() The PUBSUB set of subcommands CHANNELS, NUMSUB and NUMPAT are also supported: .. code:: python >>> r.pubsub_channels() [b'foo', b'bar'] >>> r.pubsub_numsub('foo', 'bar') [(b'foo', 9001), (b'bar', 42)] >>> r.pubsub_numsub('baz') [(b'baz', 0)] >>> r.pubsub_numpat() 1204 Sharded pubsub ~~~~~~~~~~~~~~ `Sharded pubsub `_ is a feature introduced with Redis 7.0, and fully supported by redis-py as of 5.0. It helps scale the usage of pub/sub in cluster mode, by having the cluster shard messages to nodes that own a slot for a shard channel. Here, the cluster ensures the published shard messages are forwarded to the appropriate nodes. Clients subscribe to a channel by connecting to either the master responsible for the slot, or any of its replicas. This makes use of the `SSUBSCRIBE `_ and `SPUBLISH `_ commands within Redis. The following, is a simplified example: .. code:: python >>> from redis.cluster import RedisCluster, ClusterNode >>> r = RedisCluster(startup_nodes=[ClusterNode('localhost', 6379), ClusterNode('localhost', 6380)]) >>> p = r.pubsub() >>> p.ssubscribe('foo') >>> # assume someone sends a message along the channel via a publish >>> message = p.get_sharded_message() Similarly, the same process can be used to acquire sharded pubsub messages, that have already been sent to a specific node, by passing the node to get_sharded_message: .. code:: python >>> from redis.cluster import RedisCluster, ClusterNode >>> first_node = ClusterNode['localhost', 6379] >>> second_node = ClusterNode['localhost', 6380] >>> r = RedisCluster(startup_nodes=[first_node, second_node]) >>> p = r.pubsub() >>> p.ssubscribe('foo') >>> # assume someone sends a message along the channel via a publish >>> message = p.get_sharded_message(target_node=second_node) Monitor ~~~~~~~ redis-py includes a Monitor object that streams every command processed by the Redis server. Use listen() on the Monitor object to block until a command is received. .. code:: python >>> r = redis.Redis(...) >>> with r.monitor() as m: >>> for command in m.listen(): >>> print(command) redis-py-6.1.0/docs/backoff.rst000066400000000000000000000001271501061645500163320ustar00rootroot00000000000000.. _backoff-label: Backoff ############# .. automodule:: redis.backoff :members: redis-py-6.1.0/docs/clustering.rst000066400000000000000000000254641501061645500171310ustar00rootroot00000000000000Clustering ========== redis-py now supports cluster mode and provides a client for `Redis Cluster `__. The cluster client is based on Grokzen’s `redis-py-cluster `__, has added bug fixes, and now supersedes that library. Support for these changes is thanks to his contributions. To learn more about Redis Cluster, see `Redis Cluster specifications `__. `Creating clusters <#creating-clusters>`__ \| `Specifying Target Nodes <#specifying-target-nodes>`__ \| `Multi-key Commands <#multi-key-commands>`__ \| `Known PubSub Limitations <#known-pubsub-limitations>`__ Connecting to cluster --------------------- Connecting redis-py to a Redis Cluster instance(s) requires at a minimum a single node for cluster discovery. There are multiple ways in which a cluster instance can be created: - Using ‘host’ and ‘port’ arguments: .. code:: python >>> from redis.cluster import RedisCluster as Redis >>> rc = Redis(host='localhost', port=6379) >>> print(rc.get_nodes()) [[host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,redis_connection=Redis>>], [host=127.0.0.1,port=6378,name=127.0.0.1:6378,server_type=primary,redis_connection=Redis>>], [host=127.0.0.1,port=6377,name=127.0.0.1:6377,server_type=replica,redis_connection=Redis>>]] - Using the Redis URL specification: .. code:: python >>> from redis.cluster import RedisCluster as Redis >>> rc = Redis.from_url("redis://localhost:6379/0") - Directly, via the ClusterNode class: .. code:: python >>> from redis.cluster import RedisCluster as Redis >>> from redis.cluster import ClusterNode >>> nodes = [ClusterNode('localhost', 6379), ClusterNode('localhost', 6378)] >>> rc = Redis(startup_nodes=nodes) When a RedisCluster instance is being created it first attempts to establish a connection to one of the provided startup nodes. If none of the startup nodes are reachable, a ‘RedisClusterException’ will be thrown. After a connection to the one of the cluster’s nodes is established, the RedisCluster instance will be initialized with 3 caches: a slots cache which maps each of the 16384 slots to the node/s handling them, a nodes cache that contains ClusterNode objects (name, host, port, redis connection) for all of the cluster’s nodes, and a commands cache contains all the server supported commands that were retrieved using the Redis ‘COMMAND’ output. See *RedisCluster specific options* below for more. RedisCluster instance can be directly used to execute Redis commands. When a command is being executed through the cluster instance, the target node(s) will be internally determined. When using a key-based command, the target node will be the node that holds the key’s slot. Cluster management commands and other commands that are not key-based have a parameter called ‘target_nodes’ where you can specify which nodes to execute the command on. In the absence of target_nodes, the command will be executed on the default cluster node. As part of cluster instance initialization, the cluster’s default node is randomly selected from the cluster’s primaries, and will be updated upon reinitialization. Using r.get_default_node(), you can get the cluster’s default node, or you can change it using the ‘set_default_node’ method. The ‘target_nodes’ parameter is explained in the following section, ‘Specifying Target Nodes’. .. code:: python >>> # target-nodes: the node that holds 'foo1's key slot >>> rc.set('foo1', 'bar1') >>> # target-nodes: the node that holds 'foo2's key slot >>> rc.set('foo2', 'bar2') >>> # target-nodes: the node that holds 'foo1's key slot >>> print(rc.get('foo1')) b'bar' >>> # target-node: default-node >>> print(rc.keys()) [b'foo1'] >>> # target-node: default-node >>> rc.ping() Specifying Target Nodes ----------------------- As mentioned above, all non key-based RedisCluster commands accept the kwarg parameter ‘target_nodes’ that specifies the node/nodes that the command should be executed on. The best practice is to specify target nodes using RedisCluster class’s node flags: PRIMARIES, REPLICAS, ALL_NODES, RANDOM. When a nodes flag is passed along with a command, it will be internally resolved to the relevant node/s. If the nodes topology of the cluster changes during the execution of a command, the client will be able to resolve the nodes flag again with the new topology and attempt to retry executing the command. .. code:: python >>> from redis.cluster import RedisCluster as Redis >>> # run cluster-meet command on all of the cluster's nodes >>> rc.cluster_meet('127.0.0.1', 6379, target_nodes=Redis.ALL_NODES) >>> # ping all replicas >>> rc.ping(target_nodes=Redis.REPLICAS) >>> # ping a random node >>> rc.ping(target_nodes=Redis.RANDOM) >>> # get the keys from all cluster nodes >>> rc.keys(target_nodes=Redis.ALL_NODES) [b'foo1', b'foo2'] >>> # execute bgsave in all primaries >>> rc.bgsave(Redis.PRIMARIES) You could also pass ClusterNodes directly if you want to execute a command on a specific node / node group that isn’t addressed by the nodes flag. However, if the command execution fails due to cluster topology changes, a retry attempt will not be made, since the passed target node/s may no longer be valid, and the relevant cluster or connection error will be returned. .. code:: python >>> node = rc.get_node('localhost', 6379) >>> # Get the keys only for that specific node >>> rc.keys(target_nodes=node) >>> # get Redis info from a subset of primaries >>> subset_primaries = [node for node in rc.get_primaries() if node.port > 6378] >>> rc.info(target_nodes=subset_primaries) In addition, the RedisCluster instance can query the Redis instance of a specific node and execute commands on that node directly. The Redis client, however, does not handle cluster failures and retries. .. code:: python >>> cluster_node = rc.get_node(host='localhost', port=6379) >>> print(cluster_node) [host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,redis_connection=Redis>>] >>> r = cluster_node.redis_connection >>> r.client_list() [{'id': '276', 'addr': '127.0.0.1:64108', 'fd': '16', 'name': '', 'age': '0', 'idle': '0', 'flags': 'N', 'db': '0', 'sub': '0', 'psub': '0', 'multi': '-1', 'qbuf': '26', 'qbuf-free': '32742', 'argv-mem': '10', 'obl': '0', 'oll': '0', 'omem': '0', 'tot-mem': '54298', 'events': 'r', 'cmd': 'client', 'user': 'default'}] >>> # Get the keys only for that specific node >>> r.keys() [b'foo1'] Multi-key Commands ------------------ Redis supports multi-key commands in Cluster Mode, such as Set type unions or intersections, mset and mget, as long as the keys all hash to the same slot. By using RedisCluster client, you can use the known functions (e.g. mget, mset) to perform an atomic multi-key operation. However, you must ensure all keys are mapped to the same slot, otherwise a RedisClusterException will be thrown. Redis Cluster implements a concept called hash tags that can be used in order to force certain keys to be stored in the same hash slot, see `Keys hash tag `__. You can also use nonatomic for some of the multikey operations, and pass keys that aren’t mapped to the same slot. The client will then map the keys to the relevant slots, sending the commands to the slots’ node owners. Non-atomic operations batch the keys according to their hash value, and then each batch is sent separately to the slot’s owner. .. code:: python # Atomic operations can be used when all keys are mapped to the same slot >>> rc.mset({'{foo}1': 'bar1', '{foo}2': 'bar2'}) >>> rc.mget('{foo}1', '{foo}2') [b'bar1', b'bar2'] # Non-atomic multi-key operations splits the keys into different slots >>> rc.mset_nonatomic({'foo': 'value1', 'bar': 'value2', 'zzz': 'value3') >>> rc.mget_nonatomic('foo', 'bar', 'zzz') [b'value1', b'value2', b'value3'] **Cluster PubSub:** When a ClusterPubSub instance is created without specifying a node, a single node will be transparently chosen for the pubsub connection on the first command execution. The node will be determined by: 1. Hashing the channel name in the request to find its keyslot 2. Selecting a node that handles the keyslot: If read_from_replicas is set to true or load_balancing_strategy is provided, a replica can be selected. Known PubSub Limitations ------------------------ Pattern subscribe and publish do not currently work properly due to key slots. If we hash a pattern like fo\* we will receive a keyslot for that string but there are endless possibilities for channel names based on this pattern - unknowable in advance. This feature is not disabled but the commands are not currently recommended for use. See `redis-py-cluster documentation `__ for more. .. code:: python >>> p1 = rc.pubsub() # p1 connection will be set to the node that holds 'foo' keyslot >>> p1.subscribe('foo') # p2 connection will be set to node 'localhost:6379' >>> p2 = rc.pubsub(rc.get_node('localhost', 6379)) **Read Only Mode** By default, Redis Cluster always returns MOVE redirection response on accessing a replica node. You can overcome this limitation and scale read commands by triggering READONLY mode. To enable READONLY mode pass read_from_replicas=True or define a load_balancing_strategy to RedisCluster constructor. When read_from_replicas is set to true read commands will be assigned between the primary and its replications in a Round-Robin manner. With load_balancing_strategy you can define a custom strategy for assigning read commands to the replicas and primary nodes. READONLY mode can be set at runtime by calling the readonly() method with target_nodes=‘replicas’, and read-write access can be restored by calling the readwrite() method. .. code:: python >>> from cluster import RedisCluster as Redis # Use 'debug' log level to print the node that the command is executed on >>> rc_readonly = Redis(startup_nodes=startup_nodes, ... read_from_replicas=True) >>> rc_readonly.set('{foo}1', 'bar1') >>> for i in range(0, 4): ... # Assigns read command to the slot's hosts in a Round-Robin manner ... rc_readonly.get('{foo}1') # set command would be directed only to the slot's primary node >>> rc_readonly.set('{foo}2', 'bar2') # reset READONLY flag >>> rc_readonly.readwrite(target_nodes='replicas') # now the get command would be directed only to the slot's primary node >>> rc_readonly.get('{foo}1') redis-py-6.1.0/docs/commands.rst000066400000000000000000000016771501061645500165530ustar00rootroot00000000000000Redis Commands ############## Core Commands ************* The following functions can be used to replicate their equivalent `Redis command `_. Generally they can be used as functions on your redis connection. For the simplest example, see below: Getting and settings data in redis:: import redis r = redis.Redis(decode_responses=True) r.set('mykey', 'thevalueofmykey') r.get('mykey') .. autoclass:: redis.commands.core.CoreCommands :inherited-members: Sentinel Commands ***************** .. autoclass:: redis.commands.sentinel.SentinelCommands :inherited-members: Redis Cluster Commands ********************** The following `Redis commands `_ are available within a `Redis Cluster `_. Generally they can be used as functions on your redis connection. .. autoclass:: redis.commands.cluster.RedisClusterCommands :inherited-members: redis-py-6.1.0/docs/conf.py000066400000000000000000000231651501061645500155130ustar00rootroot00000000000000# redis-py documentation build configuration file, created by # sphinx-quickstart on Fri Feb 8 00:47:08 2013. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import datetime import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) sys.path.append(os.path.abspath(os.path.pardir)) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "nbsphinx", "sphinx_gallery.load_style", "sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.autosectionlabel", "sphinx.ext.napoleon", ] # Napoleon settings. We only accept Google-style docstrings. napoleon_google_docstring = True napoleon_numpy_docstring = False # AutosectionLabel settings. # Uses a :